hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
2032e854599fe4c3e6166ae1ebf33b1a6015aabc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #define N 20 #define M 3 __global__ void add(int *a, int *b, int *c, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n ) c[index] = a[index] + b[index]; } void random_ints(int *x, int n){ int i; for(i=0;i<n;i++){ x[i]=rand()%99; } } void print(int *a, int n){ int i; for(i=0;i<n;i++){ printf(" %d ",a[i]); } printf("\n"); } int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Alloc space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); print(a,N); print(b,N); print(c,N); // Copy inputs to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks //add<<<N,1>>>(d_a, d_b, d_c); // Launch add() kernel on GPU with N threads //add<<<1,N>>>(d_a, d_b, d_c,N); hipLaunchKernelGGL(( add), dim3((N + M-1) / M),dim3(M), 0, 0, d_a, d_b, d_c, N); // Copy result back to host hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); printf("-------------------------------\n"); print(c,N); // Cleanup free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
2032e854599fe4c3e6166ae1ebf33b1a6015aabc.cu
#include<stdio.h> #include<stdlib.h> #define N 20 #define M 3 __global__ void add(int *a, int *b, int *c, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n ) c[index] = a[index] + b[index]; } void random_ints(int *x, int n){ int i; for(i=0;i<n;i++){ x[i]=rand()%99; } } void print(int *a, int n){ int i; for(i=0;i<n;i++){ printf(" %d ",a[i]); } printf("\n"); } int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Alloc space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); print(a,N); print(b,N); print(c,N); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks //add<<<N,1>>>(d_a, d_b, d_c); // Launch add() kernel on GPU with N threads //add<<<1,N>>>(d_a, d_b, d_c,N); add<<<(N + M-1) / M,M>>>(d_a, d_b, d_c, N); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); printf("-------------------------------\n"); print(c,N); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
ff809bf2341a055cbca59194aa03c09f39225656.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright Ramtin Shams (hereafter referred to as 'the author'). All rights reserved. **Citation required in derived works or publications** NOTICE TO USER: Users and possessors of this source code are hereby granted a nonexclusive, royalty-free license to use this source code for non-commercial purposes only, as long as the author is appropriately acknowledged by inclusion of this notice in derived works and citation of appropriate publication(s) listed at the end of this notice in any derived works or publications that use or have benefited from this source code in its entirety or in part. THE AUTHOR MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. Relevant publication(s): @inproceedings{Shams_ICSPCS_2007, author = "R. Shams and R. A. Kennedy", title = "Efficient Histogram Algorithms for {NVIDIA} {CUDA} Compatible Devices", booktitle = "Proc. Int. Conf. on Signal Processing and Communications Systems ({ICSPCS})", address = "Gold Coast, Australia", month = dec, year = "2007", pages = "418-422", } @inproceedings{Shams_DICTA_2007a, author = "R. Shams and N. Barnes", title = "Speeding up Mutual Information Computation Using {NVIDIA} {CUDA} Hardware", booktitle = "Proc. Digital Image Computing: Techniques and Applications ({DICTA})", address = "Adelaide, Australia", month = dec, year = "2007", pages = "555-560", doi = "10.1109/DICTA.2007.4426846", }; */ // includes, system #include <stdlib.h> #include <tchar.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> #include "cuda_basics.h" // includes, kernels #include "gpu_basics.cu" #ifdef MATLAB extern "C" int mexfPrintf(FILE * _File, const char * fmt, ...) { va_list arg; va_start(arg, fmt); char s[4096]; //I am hoping that the output of vsprintf is not going to exceed this limit vsprintf(s, fmt, arg); va_end(arg); return mexPrintf("%s", s); } #endif //Round a / b to the nearest higher integer value extern "C" int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a to the nearest multiple of b extern "C" int iRoundUp(int a, int b) { return iDivUp(a, b) * b; } extern "C" void cudaMallocWrapper(void** devPtr, size_t count) { CUDA_SAFE_CALL(hipMalloc(devPtr, count)); } extern "C" void cudaFreeWrapper(void *devPtr) { CUDA_SAFE_CALL(hipFree(devPtr)); } extern "C" void cudaMemcpyHostToDeviceWrapper(void *dst, const void *src, size_t size) { CUDA_SAFE_CALL(hipMemcpy(dst, src, size, hipMemcpyHostToDevice)); } extern "C" int cudaGetDeviceCountWrapper(void) { int count; CUDA_SAFE_CALL(hipGetDeviceCount(&count)); return count; } extern "C" void cudaGetDevicePropertiesWrapper(hipDeviceProp_t *prop, int dev) { CUDA_SAFE_CALL(hipGetDeviceProperties(prop, dev)); } /* 'width' is limted to 2^16 and 'height' to 2^15. Exceeding these limits throws a cuda error with the following message: 'invalid parameter' */ extern "C" void cudaMallocArrayWrapper(void** devPtr, size_t width, size_t height) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); CUDA_SAFE_CALL(hipMallocArray((hipArray **) devPtr, &channelDesc, width, height)); } extern "C" void cudaMemcpyHostToArrayWrapper(void *dst, const void *src, size_t size) { CUDA_SAFE_CALL(hipMemcpyToArray((hipArray *) dst, 0, 0, src, size, hipMemcpyHostToDevice)); } extern "C" void cudaFreeArrayWrapper(void *devPtr) { CUDA_SAFE_CALL(hipFreeArray((hipArray *) devPtr)); } /* d_mem must point to device memory */ extern "C" void cudaZeroMem(float *d_mem, int length) { dim3 grid, block; TIMER_CREATE; const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; int blocks = iDivUp(good_len, max_threads); if (blocks > MAX_BLOCKS_PER_DIM) { grid.x = ceil(sqrtf(blocks)); grid.y = grid.x; grid.z = 1; } else { grid.x = blocks; grid.y = 1; grid.z = 1; } TIMER_START; hipLaunchKernelGGL(( gpuZeroMem), dim3(grid), dim3(block), 0, 0, d_mem, length); CUT_CHECK_ERROR("gpuZeroMem() execution failed\n"); TIMER_PRINT("gpuZeroMem", length) TIMER_DELETE; } /* src: An MxN source matrix. dst: An Mx1 destination matrix; must be allocated by the caller. xdim: N ydim: M device: If set, the function assumes src and dst are given in device memory */ extern "C" void cudaSumAlongRows(float *src, float *dst, int xdim, int ydim, bool device /*= false*/) { float *d_src, *d_dst; //src and dst in device memory float *d_dst_tmp; dim3 grid, block; TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src, sizeof(float) * xdim * ydim)); CUDA_SAFE_CALL(hipMalloc((void**) &d_dst, sizeof(float) * ydim)); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src, src, sizeof(float) * xdim * ydim, hipMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0); } else { d_src = src; d_dst = dst; } const int cutoff = min(64, MAX_THREADS); //For EmuDebug int dst_ofs = 0, src_ofs = 0; int remaining = xdim ; int data_per_block = 2 * MAX_THREADS ; int dst_width = 0; while (remaining > 0) { dst_width += remaining / data_per_block; remaining = remaining - remaining / data_per_block * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } CUDA_SAFE_CALL(hipMalloc((void**) &d_dst_tmp, sizeof(float) * ydim * dst_width)); TIMER_START; remaining = xdim; data_per_block = 2 * MAX_THREADS; do { grid.x = remaining / data_per_block ; grid.y = ydim; grid.z = 1; block.x = MAX_THREADS ; block.y = 1; block.z = 1; if (grid.x > 0) { hipLaunchKernelGGL(( gpuSumAlongRows), dim3(grid), dim3(block), 0, 0, d_src + src_ofs, d_dst_tmp + dst_ofs, data_per_block, xdim, dst_width); CUT_CHECK_ERROR("gpuSumAlongRows() execution failed\n"); } src_ofs += data_per_block * grid.x; dst_ofs += grid.x; remaining = remaining - grid.x * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } while (remaining > 0); TIMER_PRINT("gpuSumAlongRows", xdim * ydim); if (dst_ofs > 1) //recursive call cudaSumAlongRows(d_dst_tmp, d_dst, dst_ofs, ydim, true); else CUDA_SAFE_CALL(hipMemcpy2D(d_dst, 1 * sizeof(float), d_dst_tmp, dst_width * sizeof(float), dst_width * sizeof(float), ydim, hipMemcpyDeviceToDevice)); CUDA_SAFE_CALL(hipFree(d_dst_tmp)); if (!device) { TIMER_START; //Copy dst data from device memory CUDA_SAFE_CALL(hipMemcpy(dst, d_dst, sizeof(float) * ydim, hipMemcpyDeviceToHost)); //Free memory CUDA_SAFE_CALL(hipFree(d_src)); CUDA_SAFE_CALL(hipFree(d_dst)); TIMER_PRINT("Storing data", 0); } TIMER_DELETE; } /* src: An MxN source matrix. dst: A 1xN destination matrix; must be allocated by the caller. xdim: N ydim: M device: If set, the function assumes src and dst are given in device memory */ extern "C" void cudaSumAlongCols(float *src, float *dst, int xdim, int ydim, bool device /*= false*/) { float *d_src, *d_dst; //src and dst in device memory float *d_dst_tmp; dim3 grid, block; TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src, sizeof(float) * xdim * ydim)); CUDA_SAFE_CALL(hipMalloc((void**) &d_dst, sizeof(float) * xdim)); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src, src, sizeof(float) * xdim * ydim, hipMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0); } else { d_src = src; d_dst = dst; } const int cutoff = min(64, MAX_THREADS); //For EmuDebug int dst_ofs = 0, src_ofs = 0; int remaining = ydim ; int data_per_block = 2 * MAX_THREADS ; int dst_height = 0; while (remaining > 0) { dst_height += remaining / data_per_block; remaining = remaining - remaining / data_per_block * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } CUDA_SAFE_CALL(hipMalloc((void**) &d_dst_tmp, sizeof(float) * xdim * dst_height)); TIMER_START; remaining = ydim; data_per_block = 2 * MAX_THREADS; int num_rows = 0; do { grid.x = xdim; grid.y = remaining / data_per_block; grid.z = 1; block.x = 1; block.y = MAX_THREADS; block.z = 1; if (grid.y > 0) { hipLaunchKernelGGL(( gpuSumAlongCols), dim3(grid), dim3(block), 0, 0, d_src + src_ofs, d_dst_tmp + dst_ofs, data_per_block, xdim, xdim); CUT_CHECK_ERROR("gpuSumAlongCols() execution failed\n"); } src_ofs += data_per_block * grid.y * xdim; dst_ofs += grid.y * xdim; num_rows += grid.y; remaining = remaining - grid.y * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } while (remaining > 0); TIMER_PRINT("gpuSumAlongCols", xdim * ydim); if (num_rows > 1) //recursive call cudaSumAlongCols(d_dst_tmp, d_dst, xdim, num_rows, true); else CUDA_SAFE_CALL(hipMemcpy(d_dst, d_dst_tmp, xdim * sizeof(float), hipMemcpyDeviceToDevice)); CUDA_SAFE_CALL(hipFree(d_dst_tmp)); if (!device) { TIMER_START; //Copy dst data from device memory CUDA_SAFE_CALL(hipMemcpy(dst, d_dst, sizeof(float) * xdim, hipMemcpyDeviceToHost)); //Free memory CUDA_SAFE_CALL(hipFree(d_src)); CUDA_SAFE_CALL(hipFree(d_dst)); TIMER_PRINT("Storing data", 0); } TIMER_DELETE; } //xyzReduction methods can be used for debugging and testing extern "C" float cudaReduction(float *src, int length, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_dst1, *d_dst2, *d_dst; float res; TIMER_CREATE; const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; //We can process up to 2 * max_threads in each round grid.x = ceil(sqrtf(iDivUp(good_len, 2 * max_threads))); grid.y = grid.x; grid.z = 1; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(hipMalloc((void**) &d_src, size)); //Copy src data to device memory CUDA_SAFE_CALL(hipMemcpy(d_src, src, size, hipMemcpyHostToDevice)); } else d_src = src; CUDA_SAFE_CALL(hipMalloc((void**) &d_dst1, grid.x * grid.y * sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void**) &d_dst2, grid.x * grid.y * sizeof(float))); TIMER_PRINT("Loading data", 0); TIMER_START; float *d_tmp = d_src; int count = 0; int len = length; do { d_dst = count % 2 ? d_dst = d_dst2 : d_dst = d_dst1; hipLaunchKernelGGL(( gpuReduction), dim3(grid), dim3(block), 0, 0, d_tmp, d_dst, len); CUT_CHECK_ERROR("gpuSum() execution failed\n"); d_tmp = d_dst; count++; len = grid.x * grid.y; good_len = iRoundUp(len, WARP_SIZE); grid.x = ceil(sqrtf(iDivUp(good_len, 2 * max_threads))); grid.y = grid.x; grid.z = 1; }while (len != 1); TIMER_PRINT("gpuSum", length); TIMER_START; if (!device) CUDA_SAFE_CALL(hipFree(d_src)); CUDA_SAFE_CALL(hipMemcpy(&res, d_dst, sizeof(float), hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_dst1)); CUDA_SAFE_CALL(hipFree(d_dst2)); TIMER_PRINT("Storing data", 0); TIMER_DELETE; return res; } CUDA_REDUCTION(Sum); CUDA_REDUCTION(Mul); CUDA_REDUCTION(Max); CUDA_REDUCTION(Min); CUDA_BINARY(SSDBinary) CUDA_BINARY(SADBinary)
ff809bf2341a055cbca59194aa03c09f39225656.cu
/* Copyright Ramtin Shams (hereafter referred to as 'the author'). All rights reserved. **Citation required in derived works or publications** NOTICE TO USER: Users and possessors of this source code are hereby granted a nonexclusive, royalty-free license to use this source code for non-commercial purposes only, as long as the author is appropriately acknowledged by inclusion of this notice in derived works and citation of appropriate publication(s) listed at the end of this notice in any derived works or publications that use or have benefited from this source code in its entirety or in part. THE AUTHOR MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOURCE CODE. Relevant publication(s): @inproceedings{Shams_ICSPCS_2007, author = "R. Shams and R. A. Kennedy", title = "Efficient Histogram Algorithms for {NVIDIA} {CUDA} Compatible Devices", booktitle = "Proc. Int. Conf. on Signal Processing and Communications Systems ({ICSPCS})", address = "Gold Coast, Australia", month = dec, year = "2007", pages = "418-422", } @inproceedings{Shams_DICTA_2007a, author = "R. Shams and N. Barnes", title = "Speeding up Mutual Information Computation Using {NVIDIA} {CUDA} Hardware", booktitle = "Proc. Digital Image Computing: Techniques and Applications ({DICTA})", address = "Adelaide, Australia", month = dec, year = "2007", pages = "555-560", doi = "10.1109/DICTA.2007.4426846", }; */ // includes, system #include <stdlib.h> #include <tchar.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> #include "cuda_basics.h" // includes, kernels #include "gpu_basics.cu" #ifdef MATLAB extern "C" int mexfPrintf(FILE * _File, const char * fmt, ...) { va_list arg; va_start(arg, fmt); char s[4096]; //I am hoping that the output of vsprintf is not going to exceed this limit vsprintf(s, fmt, arg); va_end(arg); return mexPrintf("%s", s); } #endif //Round a / b to the nearest higher integer value extern "C" int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a to the nearest multiple of b extern "C" int iRoundUp(int a, int b) { return iDivUp(a, b) * b; } extern "C" void cudaMallocWrapper(void** devPtr, size_t count) { CUDA_SAFE_CALL(cudaMalloc(devPtr, count)); } extern "C" void cudaFreeWrapper(void *devPtr) { CUDA_SAFE_CALL(cudaFree(devPtr)); } extern "C" void cudaMemcpyHostToDeviceWrapper(void *dst, const void *src, size_t size) { CUDA_SAFE_CALL(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice)); } extern "C" int cudaGetDeviceCountWrapper(void) { int count; CUDA_SAFE_CALL(cudaGetDeviceCount(&count)); return count; } extern "C" void cudaGetDevicePropertiesWrapper(cudaDeviceProp *prop, int dev) { CUDA_SAFE_CALL(cudaGetDeviceProperties(prop, dev)); } /* 'width' is limted to 2^16 and 'height' to 2^15. Exceeding these limits throws a cuda error with the following message: 'invalid parameter' */ extern "C" void cudaMallocArrayWrapper(void** devPtr, size_t width, size_t height) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); CUDA_SAFE_CALL(cudaMallocArray((cudaArray **) devPtr, &channelDesc, width, height)); } extern "C" void cudaMemcpyHostToArrayWrapper(void *dst, const void *src, size_t size) { CUDA_SAFE_CALL(cudaMemcpyToArray((cudaArray *) dst, 0, 0, src, size, cudaMemcpyHostToDevice)); } extern "C" void cudaFreeArrayWrapper(void *devPtr) { CUDA_SAFE_CALL(cudaFreeArray((cudaArray *) devPtr)); } /* d_mem must point to device memory */ extern "C" void cudaZeroMem(float *d_mem, int length) { dim3 grid, block; TIMER_CREATE; const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; int blocks = iDivUp(good_len, max_threads); if (blocks > MAX_BLOCKS_PER_DIM) { grid.x = ceil(sqrtf(blocks)); grid.y = grid.x; grid.z = 1; } else { grid.x = blocks; grid.y = 1; grid.z = 1; } TIMER_START; gpuZeroMem<<<grid, block>>>(d_mem, length); CUT_CHECK_ERROR("gpuZeroMem() execution failed\n"); TIMER_PRINT("gpuZeroMem", length) TIMER_DELETE; } /* src: An MxN source matrix. dst: An Mx1 destination matrix; must be allocated by the caller. xdim: N ydim: M device: If set, the function assumes src and dst are given in device memory */ extern "C" void cudaSumAlongRows(float *src, float *dst, int xdim, int ydim, bool device /*= false*/) { float *d_src, *d_dst; //src and dst in device memory float *d_dst_tmp; dim3 grid, block; TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, sizeof(float) * xdim * ydim)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_dst, sizeof(float) * ydim)); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src, src, sizeof(float) * xdim * ydim, cudaMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0); } else { d_src = src; d_dst = dst; } const int cutoff = min(64, MAX_THREADS); //For EmuDebug int dst_ofs = 0, src_ofs = 0; int remaining = xdim ; int data_per_block = 2 * MAX_THREADS ; int dst_width = 0; while (remaining > 0) { dst_width += remaining / data_per_block; remaining = remaining - remaining / data_per_block * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } CUDA_SAFE_CALL(cudaMalloc((void**) &d_dst_tmp, sizeof(float) * ydim * dst_width)); TIMER_START; remaining = xdim; data_per_block = 2 * MAX_THREADS; do { grid.x = remaining / data_per_block ; grid.y = ydim; grid.z = 1; block.x = MAX_THREADS ; block.y = 1; block.z = 1; if (grid.x > 0) { gpuSumAlongRows<<<grid, block>>>(d_src + src_ofs, d_dst_tmp + dst_ofs, data_per_block, xdim, dst_width); CUT_CHECK_ERROR("gpuSumAlongRows() execution failed\n"); } src_ofs += data_per_block * grid.x; dst_ofs += grid.x; remaining = remaining - grid.x * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } while (remaining > 0); TIMER_PRINT("gpuSumAlongRows", xdim * ydim); if (dst_ofs > 1) //recursive call cudaSumAlongRows(d_dst_tmp, d_dst, dst_ofs, ydim, true); else CUDA_SAFE_CALL(cudaMemcpy2D(d_dst, 1 * sizeof(float), d_dst_tmp, dst_width * sizeof(float), dst_width * sizeof(float), ydim, cudaMemcpyDeviceToDevice)); CUDA_SAFE_CALL(cudaFree(d_dst_tmp)); if (!device) { TIMER_START; //Copy dst data from device memory CUDA_SAFE_CALL(cudaMemcpy(dst, d_dst, sizeof(float) * ydim, cudaMemcpyDeviceToHost)); //Free memory CUDA_SAFE_CALL(cudaFree(d_src)); CUDA_SAFE_CALL(cudaFree(d_dst)); TIMER_PRINT("Storing data", 0); } TIMER_DELETE; } /* src: An MxN source matrix. dst: A 1xN destination matrix; must be allocated by the caller. xdim: N ydim: M device: If set, the function assumes src and dst are given in device memory */ extern "C" void cudaSumAlongCols(float *src, float *dst, int xdim, int ydim, bool device /*= false*/) { float *d_src, *d_dst; //src and dst in device memory float *d_dst_tmp; dim3 grid, block; TIMER_CREATE; if (!device) { TIMER_START; //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, sizeof(float) * xdim * ydim)); CUDA_SAFE_CALL(cudaMalloc((void**) &d_dst, sizeof(float) * xdim)); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src, src, sizeof(float) * xdim * ydim, cudaMemcpyHostToDevice)); TIMER_PRINT("Loading data", 0); } else { d_src = src; d_dst = dst; } const int cutoff = min(64, MAX_THREADS); //For EmuDebug int dst_ofs = 0, src_ofs = 0; int remaining = ydim ; int data_per_block = 2 * MAX_THREADS ; int dst_height = 0; while (remaining > 0) { dst_height += remaining / data_per_block; remaining = remaining - remaining / data_per_block * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } CUDA_SAFE_CALL(cudaMalloc((void**) &d_dst_tmp, sizeof(float) * xdim * dst_height)); TIMER_START; remaining = ydim; data_per_block = 2 * MAX_THREADS; int num_rows = 0; do { grid.x = xdim; grid.y = remaining / data_per_block; grid.z = 1; block.x = 1; block.y = MAX_THREADS; block.z = 1; if (grid.y > 0) { gpuSumAlongCols<<<grid, block>>>(d_src + src_ofs, d_dst_tmp + dst_ofs, data_per_block, xdim, xdim); CUT_CHECK_ERROR("gpuSumAlongCols() execution failed\n"); } src_ofs += data_per_block * grid.y * xdim; dst_ofs += grid.y * xdim; num_rows += grid.y; remaining = remaining - grid.y * data_per_block; data_per_block >>= 1; if (remaining < cutoff) data_per_block = remaining; } while (remaining > 0); TIMER_PRINT("gpuSumAlongCols", xdim * ydim); if (num_rows > 1) //recursive call cudaSumAlongCols(d_dst_tmp, d_dst, xdim, num_rows, true); else CUDA_SAFE_CALL(cudaMemcpy(d_dst, d_dst_tmp, xdim * sizeof(float), cudaMemcpyDeviceToDevice)); CUDA_SAFE_CALL(cudaFree(d_dst_tmp)); if (!device) { TIMER_START; //Copy dst data from device memory CUDA_SAFE_CALL(cudaMemcpy(dst, d_dst, sizeof(float) * xdim, cudaMemcpyDeviceToHost)); //Free memory CUDA_SAFE_CALL(cudaFree(d_src)); CUDA_SAFE_CALL(cudaFree(d_dst)); TIMER_PRINT("Storing data", 0); } TIMER_DELETE; } //xyzReduction methods can be used for debugging and testing extern "C" float cudaReduction(float *src, int length, bool device /*= false*/) { dim3 grid, block; int size = length * sizeof(float); //Device memory pointers float *d_src, *d_dst1, *d_dst2, *d_dst; float res; TIMER_CREATE; const int max_threads = MAX_THREADS; int good_len = iRoundUp(length, WARP_SIZE); block.x = max_threads; block.y = 1; block.z = 1; //We can process up to 2 * max_threads in each round grid.x = ceil(sqrtf(iDivUp(good_len, 2 * max_threads))); grid.y = grid.x; grid.z = 1; TIMER_START; if (!device) { //Allocate data on the device CUDA_SAFE_CALL(cudaMalloc((void**) &d_src, size)); //Copy src data to device memory CUDA_SAFE_CALL(cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice)); } else d_src = src; CUDA_SAFE_CALL(cudaMalloc((void**) &d_dst1, grid.x * grid.y * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void**) &d_dst2, grid.x * grid.y * sizeof(float))); TIMER_PRINT("Loading data", 0); TIMER_START; float *d_tmp = d_src; int count = 0; int len = length; do { d_dst = count % 2 ? d_dst = d_dst2 : d_dst = d_dst1; gpuReduction<<<grid, block>>>(d_tmp, d_dst, len); CUT_CHECK_ERROR("gpuSum() execution failed\n"); d_tmp = d_dst; count++; len = grid.x * grid.y; good_len = iRoundUp(len, WARP_SIZE); grid.x = ceil(sqrtf(iDivUp(good_len, 2 * max_threads))); grid.y = grid.x; grid.z = 1; }while (len != 1); TIMER_PRINT("gpuSum", length); TIMER_START; if (!device) CUDA_SAFE_CALL(cudaFree(d_src)); CUDA_SAFE_CALL(cudaMemcpy(&res, d_dst, sizeof(float), cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_dst1)); CUDA_SAFE_CALL(cudaFree(d_dst2)); TIMER_PRINT("Storing data", 0); TIMER_DELETE; return res; } CUDA_REDUCTION(Sum); CUDA_REDUCTION(Mul); CUDA_REDUCTION(Max); CUDA_REDUCTION(Min); CUDA_BINARY(SSDBinary) CUDA_BINARY(SADBinary)
af5f8f677563e3e420ed5cea447cd9fa4b294599.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "stdio.h" using namespace std; __global__ void mykernel() { printf("Hello World!"); } int main(){ hipLaunchKernelGGL(( mykernel) , dim3(1),dim3(1), 0, 0, ); return 0; }
af5f8f677563e3e420ed5cea447cd9fa4b294599.cu
#include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include "stdio.h" using namespace std; __global__ void mykernel() { printf("Hello World!"); } int main(){ mykernel <<< 1,1>>> (); return 0; }
e8afb8fbc5e63e020c3f611005afb568d0901ee0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixMulCUDA.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int *B = NULL; hipMalloc(&B, XSIZE*YSIZE); int *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixMulCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixMulCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixMulCUDA), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e8afb8fbc5e63e020c3f611005afb568d0901ee0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixMulCUDA.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); int *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixMulCUDA<<<gridBlock,threadBlock>>>(A,B,C); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixMulCUDA<<<gridBlock,threadBlock>>>(A,B,C); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixMulCUDA<<<gridBlock,threadBlock>>>(A,B,C); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
41fb6057f7415221f4ef5f8c997638e3d0690321.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void axpy(float a, float* x, float* y) { y[threadIdx.x] = a * x[threadIdx.x]; } int main(int argc, char* argv[]) { const int kDataLen = 4; float a = 2.0f; float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; float host_y[kDataLen]; // Copy input data to device. float* device_x; float* device_y; hipMalloc(&device_x, kDataLen * sizeof(float)); hipMalloc(&device_y, kDataLen * sizeof(float)); hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice); // Launch the kernel. hipLaunchKernelGGL(( axpy), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y); // Copy output data to host. hipDeviceSynchronize(); hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_y[i] << "\n"; } hipDeviceReset(); return 0; }
41fb6057f7415221f4ef5f8c997638e3d0690321.cu
#include <iostream> __global__ void axpy(float a, float* x, float* y) { y[threadIdx.x] = a * x[threadIdx.x]; } int main(int argc, char* argv[]) { const int kDataLen = 4; float a = 2.0f; float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f}; float host_y[kDataLen]; // Copy input data to device. float* device_x; float* device_y; cudaMalloc(&device_x, kDataLen * sizeof(float)); cudaMalloc(&device_y, kDataLen * sizeof(float)); cudaMemcpy(device_x, host_x, kDataLen * sizeof(float), cudaMemcpyHostToDevice); // Launch the kernel. axpy<<<1, kDataLen>>>(a, device_x, device_y); // Copy output data to host. cudaDeviceSynchronize(); cudaMemcpy(host_y, device_y, kDataLen * sizeof(float), cudaMemcpyDeviceToHost); // Print the results. for (int i = 0; i < kDataLen; ++i) { std::cout << "y[" << i << "] = " << host_y[i] << "\n"; } cudaDeviceReset(); return 0; }
pomdp_pbvi_gpu.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * The MIT License (MIT) * * Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <nova/pomdp/algorithms/pomdp_pbvi_gpu.h> #include <nova/pomdp/utilities/pomdp_model_gpu.h> #include <stdio.h> #include <nova/error_codes.h> #include <nova/constants.h> namespace nova { __global__ void pomdp_pbvi_initialize_alphaBA_gpu(unsigned int n, unsigned int m, unsigned int r, const float *R, float *alphaBA) { unsigned int beliefIndex = blockIdx.x; unsigned int action = blockIdx.y; if (beliefIndex >= r || action >= m) { return; } // Compute Gamma_{a,*} and set it to the first value of alphaBA. Stride here. for (unsigned int s = threadIdx.x; s < n; s += blockDim.x) { alphaBA[beliefIndex * m * n + action * n + s] = R[s * m + action]; } } __global__ void pomdp_pbvi_compute_alphaBA_gpu(unsigned int n, unsigned int ns, unsigned int m, unsigned int z, unsigned int r, unsigned int rz, float gamma, const int *S, const float *T, const float *O, const float *R, const int *Z, const float *B, const float *Gamma, float *alphaBA) { // Since float and unsigned int are 4 bytes each, and we need each array to be the size of // the number of threads, we will need to call this with: // sizeof(float) * numThreads + sizeof(unsigned int) * numThreads. // Note: blockDim.x == numThreads extern __shared__ float sdata[]; float *maxAlphaDotBeta = (float *)sdata; unsigned int *maxAlphaIndex = (unsigned int *)&maxAlphaDotBeta[blockDim.x]; maxAlphaDotBeta[threadIdx.x] = FLT_MIN; maxAlphaIndex[threadIdx.x] = 0; __syncthreads(); unsigned int beliefIndex = blockIdx.x; unsigned int action = blockIdx.y; unsigned int observation = blockIdx.z; if (beliefIndex >= r || action >= m || observation >= z) { return; } // Compute the max alpha vector from Gamma, given the fixed action and observation. // Note: this is the max w.r.t. just the strided elements. The reduction will // be computed afterwards for the max over all alpha-vectors. for (unsigned int alphaIndex = threadIdx.x; alphaIndex < r; alphaIndex += blockDim.x) { float alphaDotBeta = 0.0f; for (unsigned int i = 0; i < rz; i++) { int s = Z[beliefIndex * rz + i]; if (s < 0) { break; } // We compute the value of this state in the alpha-vector, then multiply it by the // belief, and add it to the current dot product value for this alpha-vector. float value = 0.0f; for (unsigned int j = 0; j < ns; j++) { int sp = S[s * m * ns + action * ns + j]; if (sp < 0) { break; } value += T[s * m * ns + action * ns + j] * O[action * n * z + sp * z + observation] * Gamma[alphaIndex * n + sp]; } __syncthreads(); value *= gamma; alphaDotBeta += value * B[beliefIndex * rz + i]; } __syncthreads(); // Store the maximal value and index. if (alphaIndex == threadIdx.x || alphaDotBeta > maxAlphaDotBeta[threadIdx.x]) { maxAlphaDotBeta[threadIdx.x] = alphaDotBeta; maxAlphaIndex[threadIdx.x] = alphaIndex; } } // Note: The above code essentially does the first add during load. It takes care of *all* // the other elements *outside* the number of threads we have. In other words, starting here, // we already have computed part of the maxAlphaDotBeta and maxAlphaIndex; we just need to // finish the rest quickly, using a reduction. __syncthreads(); // Use reduction to compute the max overall alpha-vector. for (unsigned int alphaIndex = blockDim.x / 2; alphaIndex > 0; alphaIndex >>= 1) { if (threadIdx.x < alphaIndex && threadIdx.x < r && threadIdx.x + alphaIndex < r) { if (maxAlphaDotBeta[threadIdx.x] < maxAlphaDotBeta[threadIdx.x + alphaIndex]) { maxAlphaDotBeta[threadIdx.x] = maxAlphaDotBeta[threadIdx.x + alphaIndex]; maxAlphaIndex[threadIdx.x] = maxAlphaIndex[threadIdx.x + alphaIndex]; } } __syncthreads(); } // Now we can compute the alpha-vector component for this observation, since we have the max. // We will need to compute the dot product anyway, so let's just distribute the belief over the // sum over observations, and add it all up here. // Note: This re-uses the thread to stride over states now. for (unsigned int s = threadIdx.x; s < n; s += blockDim.x) { // We compute the value of this state in the alpha-vector, then multiply it by the belief, // and add it to the current dot product value for this alpha-vector. float value = 0.0f; for (unsigned int i = 0; i < ns; i++) { int sp = S[s * m * ns + action * ns + i]; if (sp < 0) { break; } // Note: maxAlphaIndex[0] holds the maximal index value computed from the reduction. value += T[s * m * ns + action * ns + i] * O[action * n * z + sp * z + observation] * Gamma[maxAlphaIndex[0] * n + sp]; } __syncthreads(); alphaBA[beliefIndex * m * n + action * n + s] += gamma * value; } } __global__ void pomdp_pbvi_update_step_gpu(unsigned int n, unsigned int ns, unsigned int m, unsigned int z, unsigned int r, unsigned int rz, float gamma, const int *S, const float *T, const float *O, const float *R, const int *Z, const float *B, const float *Gamma, float *alphaBA, float *GammaPrime, unsigned int *piPrime) { // Each block will run a different belief. Our overall goal: Compute the value // of GammaPrime[beliefIndex * n + ???] and piPrime[beliefIndex]. unsigned int beliefIndex = blockIdx.x * blockDim.x + threadIdx.x; if (beliefIndex >= r) { return; } // We want to find the action that maximizes the value, store it in piPrime, as well as // its alpha-vector GammaPrime. float maxActionValue = FLT_MIN; for (unsigned int action = 0; action < m; action++) { // Only execute if the action is available. //if (available[beliefIndex * m + action]) { // The potential alpha-vector has been computed, so compute the value with respect // to the belief state. float actionValue = 0.0f; for (unsigned int i = 0; i < rz; i++) { int s = Z[beliefIndex * rz + i]; if (s < 0) { break; } actionValue += alphaBA[beliefIndex * m * n + action * n + s] * B[beliefIndex * rz + i]; } // If this was larger, then overwrite piPrime and GammaPrime's values. if (actionValue > maxActionValue) { maxActionValue = actionValue; piPrime[beliefIndex] = action; } //} //__syncthreads(); } memcpy(&GammaPrime[beliefIndex * n], &alphaBA[beliefIndex * m * n + piPrime[beliefIndex] * n], n * sizeof(float)); //for (unsigned int s = 0; s < n; s++) { // GammaPrime[beliefIndex * n + s] = alphaBA[beliefIndex * m * n + piPrime[beliefIndex] * n + s]; //} } int pomdp_pbvi_initialize_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi) { // Reset the current horizon. pbvi->currentHorizon = 0; // Create the device-side Gamma. if (hipMalloc(&pbvi->d_Gamma, pomdp->r * pomdp->n * sizeof(float)) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for Gamma."); return NOVA_ERROR_DEVICE_MALLOC; } if (hipMemcpy(pbvi->d_Gamma, pbvi->GammaInitial, pomdp->r * pomdp->n * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to copy memory from host to device for Gamma."); return NOVA_ERROR_MEMCPY_TO_DEVICE; } if (hipMalloc(&pbvi->d_GammaPrime, pomdp->r * pomdp->n * sizeof(float)) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for Gamma (prime)."); return NOVA_ERROR_DEVICE_MALLOC; } if (hipMemcpy(pbvi->d_GammaPrime, pbvi->GammaInitial, pomdp->r * pomdp->n * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to copy memory from host to device for Gamma (prime)."); return NOVA_ERROR_MEMCPY_TO_DEVICE; } // Create the device-side pi. if (hipMalloc(&pbvi->d_pi, pomdp->r * sizeof(unsigned int)) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for pi."); return NOVA_ERROR_DEVICE_MALLOC; } // Create the device-side memory for the intermediate variable alphaBA. if (hipMalloc(&pbvi->d_alphaBA, pomdp->r * pomdp->m * pomdp->n * sizeof(float)) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for alphaBA."); return NOVA_ERROR_DEVICE_MALLOC; } return NOVA_SUCCESS; } int pomdp_pbvi_execute_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi, POMDPAlphaVectors *policy) { // The result from calling other functions. int result; // Ensure the data is valid. if (pomdp == nullptr || pomdp->n == 0 || pomdp->ns == 0 || pomdp->m == 0 || pomdp->z == 0 || pomdp->r == 0 || pomdp->rz == 0 || pomdp->d_S == nullptr || pomdp->d_T == nullptr || pomdp->d_O == nullptr || pomdp->d_R == nullptr || pomdp->d_Z == nullptr || pomdp->d_B == nullptr || pomdp->gamma < 0.0f || pomdp->gamma > 1.0f || pomdp->horizon < 1 || pbvi == nullptr || pbvi->GammaInitial == nullptr || policy == nullptr) { fprintf(stderr, "Error[pomdp_pbvi_execute_gpu]: %s\n", "Invalid arguments."); return NOVA_ERROR_INVALID_DATA; } // Ensure threads are correct. if (pbvi->numThreads % 32 != 0) { fprintf(stderr, "Error[pomdp_pbvi_execute_gpu]: %s\n", "Invalid number of threads."); return NOVA_ERROR_INVALID_CUDA_PARAM; } result = pomdp_pbvi_initialize_gpu(pomdp, pbvi); if (result != NOVA_SUCCESS) { return result; } // For each of the updates, run PBVI. Note that the currentHorizon is initialized to zero // above, and is updated in the update function below. while (pbvi->currentHorizon < pomdp->horizon) { //printf("PBVI (GPU Version) -- Iteration %i of %i\n", pomdp->currentHorizon, pomdp->horizon); result = pomdp_pbvi_update_gpu(pomdp, pbvi); if (result != NOVA_SUCCESS) { return result; } } result = pomdp_pbvi_get_policy_gpu(pomdp, pbvi, policy); if (result != NOVA_SUCCESS) { return result; } result = pomdp_pbvi_uninitialize_gpu(pomdp, pbvi); if (result != NOVA_SUCCESS) { return result; } return NOVA_SUCCESS; } int pomdp_pbvi_uninitialize_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi) { int result; result = NOVA_SUCCESS; // Reset the current horizon. pbvi->currentHorizon = 0; if (pbvi->d_Gamma != nullptr) { if (hipFree(pbvi->d_Gamma) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for the Gamma (the alpha-vectors)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_Gamma = nullptr; if (pbvi->d_GammaPrime != nullptr) { if (hipFree(pbvi->d_GammaPrime) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for the GammaPrime (the alpha-vectors' copy)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_GammaPrime = nullptr; if (pbvi->d_pi != nullptr) { if (hipFree(pbvi->d_pi) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for the pi (the policy)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_pi = nullptr; if (pbvi->d_alphaBA != nullptr) { if (hipFree(pbvi->d_alphaBA) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for alphaBA (alpha-vector collection)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_alphaBA = nullptr; return result; } int pomdp_pbvi_update_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi) { // The number of blocks in the main CUDA kernel call. int numBlocks; hipLaunchKernelGGL(( pomdp_pbvi_initialize_alphaBA_gpu), dim3(dim3(pomdp->r, pomdp->m, 1)), dim3(pbvi->numThreads) , 0, 0, pomdp->n, pomdp->m, pomdp->r, pomdp->d_R, pbvi->d_alphaBA); // Check if there was an error executing the kernel. if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to execute the 'initialization of alphaBA' kernel."); return NOVA_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to synchronize the device after 'initialization of alphaBA' kernel."); return NOVA_ERROR_DEVICE_SYNCHRONIZE; } hipLaunchKernelGGL(( pomdp_pbvi_compute_alphaBA_gpu), dim3(dim3(pomdp->r, pomdp->m, pomdp->z)), dim3(pbvi->numThreads), pbvi->numThreads * sizeof(float) + pbvi->numThreads * sizeof(unsigned int) , 0, pomdp->n, pomdp->ns, pomdp->m, pomdp->z, pomdp->r, pomdp->rz, pomdp->gamma, pomdp->d_S, pomdp->d_T, pomdp->d_O, pomdp->d_R, pomdp->d_Z, pomdp->d_B, pbvi->d_Gamma, pbvi->d_alphaBA); // Check if there was an error executing the kernel. if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to execute the 'compute_alphaBA' kernel."); return NOVA_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to synchronize the device after 'compute_alphaBA' kernel."); return NOVA_ERROR_DEVICE_SYNCHRONIZE; } // Compute the number of blocks. numBlocks = (unsigned int)((float)pomdp->r / (float)pbvi->numThreads) + 1; // Execute a kernel for the first three stages of for-loops: B, A, Z, as a 3d-block, // and the 4th stage for-loop over Gamma as the threads. if (pbvi->currentHorizon % 2 == 0) { hipLaunchKernelGGL(( pomdp_pbvi_update_step_gpu), dim3(numBlocks), dim3(pbvi->numThreads) , 0, 0, pomdp->n, pomdp->ns, pomdp->m, pomdp->z, pomdp->r, pomdp->rz, pomdp->gamma, pomdp->d_S, pomdp->d_T, pomdp->d_O, pomdp->d_R, pomdp->d_Z, pomdp->d_B, pbvi->d_Gamma, pbvi->d_alphaBA, pbvi->d_GammaPrime, pbvi->d_pi); } else { hipLaunchKernelGGL(( pomdp_pbvi_update_step_gpu), dim3(numBlocks), dim3(pbvi->numThreads) , 0, 0, pomdp->n, pomdp->ns, pomdp->m, pomdp->z, pomdp->r, pomdp->rz, pomdp->gamma, pomdp->d_S, pomdp->d_T, pomdp->d_O, pomdp->d_R, pomdp->d_Z, pomdp->d_B, pbvi->d_GammaPrime, pbvi->d_alphaBA, pbvi->d_Gamma, pbvi->d_pi); } // Check if there was an error executing the kernel. if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to execute the 'pomdp_pbvi_update_step_gpu' kernel."); return NOVA_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (hipDeviceSynchronize() != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to synchronize the device after 'pomdp_pbvi_update_step_gpu' kernel."); return NOVA_ERROR_DEVICE_SYNCHRONIZE; } pbvi->currentHorizon++; return NOVA_SUCCESS; } int pomdp_pbvi_get_policy_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi, POMDPAlphaVectors *policy) { if (pomdp == nullptr || pbvi == nullptr || policy == nullptr) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Invalid arguments."); return NOVA_ERROR_INVALID_DATA; } // Initialize the policy, which allocates memory. int result = pomdp_alpha_vectors_initialize(policy, pomdp->n, pomdp->m, pomdp->r); if (result != NOVA_SUCCESS) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Could not create the policy."); return NOVA_ERROR_POLICY_CREATION; } // Copy the final result of Gamma and pi to the variables provided, from device to host. // This assumes that the memory has been allocated for the variables provided. if (pbvi->currentHorizon % 2 == 0) { if (hipMemcpy(policy->Gamma, pbvi->d_Gamma, pomdp->r * pomdp->n * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Failed to copy memory from device to host for Gamma."); return NOVA_ERROR_MEMCPY_TO_HOST; } } else { if (hipMemcpy(policy->Gamma, pbvi->d_GammaPrime, pomdp->r * pomdp->n * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Failed to copy memory from device to host for Gamma (prime)."); return NOVA_ERROR_MEMCPY_TO_HOST; } } if (hipMemcpy(policy->pi, pbvi->d_pi, pomdp->r * sizeof(unsigned int), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Failed to copy memory from device to host for pi."); return NOVA_ERROR_MEMCPY_TO_HOST; } return NOVA_SUCCESS; } }; // namespace nova
pomdp_pbvi_gpu.cu
/** * The MIT License (MIT) * * Copyright (c) 2015 Kyle Hollins Wray, University of Massachusetts * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <nova/pomdp/algorithms/pomdp_pbvi_gpu.h> #include <nova/pomdp/utilities/pomdp_model_gpu.h> #include <stdio.h> #include <nova/error_codes.h> #include <nova/constants.h> namespace nova { __global__ void pomdp_pbvi_initialize_alphaBA_gpu(unsigned int n, unsigned int m, unsigned int r, const float *R, float *alphaBA) { unsigned int beliefIndex = blockIdx.x; unsigned int action = blockIdx.y; if (beliefIndex >= r || action >= m) { return; } // Compute Gamma_{a,*} and set it to the first value of alphaBA. Stride here. for (unsigned int s = threadIdx.x; s < n; s += blockDim.x) { alphaBA[beliefIndex * m * n + action * n + s] = R[s * m + action]; } } __global__ void pomdp_pbvi_compute_alphaBA_gpu(unsigned int n, unsigned int ns, unsigned int m, unsigned int z, unsigned int r, unsigned int rz, float gamma, const int *S, const float *T, const float *O, const float *R, const int *Z, const float *B, const float *Gamma, float *alphaBA) { // Since float and unsigned int are 4 bytes each, and we need each array to be the size of // the number of threads, we will need to call this with: // sizeof(float) * numThreads + sizeof(unsigned int) * numThreads. // Note: blockDim.x == numThreads extern __shared__ float sdata[]; float *maxAlphaDotBeta = (float *)sdata; unsigned int *maxAlphaIndex = (unsigned int *)&maxAlphaDotBeta[blockDim.x]; maxAlphaDotBeta[threadIdx.x] = FLT_MIN; maxAlphaIndex[threadIdx.x] = 0; __syncthreads(); unsigned int beliefIndex = blockIdx.x; unsigned int action = blockIdx.y; unsigned int observation = blockIdx.z; if (beliefIndex >= r || action >= m || observation >= z) { return; } // Compute the max alpha vector from Gamma, given the fixed action and observation. // Note: this is the max w.r.t. just the strided elements. The reduction will // be computed afterwards for the max over all alpha-vectors. for (unsigned int alphaIndex = threadIdx.x; alphaIndex < r; alphaIndex += blockDim.x) { float alphaDotBeta = 0.0f; for (unsigned int i = 0; i < rz; i++) { int s = Z[beliefIndex * rz + i]; if (s < 0) { break; } // We compute the value of this state in the alpha-vector, then multiply it by the // belief, and add it to the current dot product value for this alpha-vector. float value = 0.0f; for (unsigned int j = 0; j < ns; j++) { int sp = S[s * m * ns + action * ns + j]; if (sp < 0) { break; } value += T[s * m * ns + action * ns + j] * O[action * n * z + sp * z + observation] * Gamma[alphaIndex * n + sp]; } __syncthreads(); value *= gamma; alphaDotBeta += value * B[beliefIndex * rz + i]; } __syncthreads(); // Store the maximal value and index. if (alphaIndex == threadIdx.x || alphaDotBeta > maxAlphaDotBeta[threadIdx.x]) { maxAlphaDotBeta[threadIdx.x] = alphaDotBeta; maxAlphaIndex[threadIdx.x] = alphaIndex; } } // Note: The above code essentially does the first add during load. It takes care of *all* // the other elements *outside* the number of threads we have. In other words, starting here, // we already have computed part of the maxAlphaDotBeta and maxAlphaIndex; we just need to // finish the rest quickly, using a reduction. __syncthreads(); // Use reduction to compute the max overall alpha-vector. for (unsigned int alphaIndex = blockDim.x / 2; alphaIndex > 0; alphaIndex >>= 1) { if (threadIdx.x < alphaIndex && threadIdx.x < r && threadIdx.x + alphaIndex < r) { if (maxAlphaDotBeta[threadIdx.x] < maxAlphaDotBeta[threadIdx.x + alphaIndex]) { maxAlphaDotBeta[threadIdx.x] = maxAlphaDotBeta[threadIdx.x + alphaIndex]; maxAlphaIndex[threadIdx.x] = maxAlphaIndex[threadIdx.x + alphaIndex]; } } __syncthreads(); } // Now we can compute the alpha-vector component for this observation, since we have the max. // We will need to compute the dot product anyway, so let's just distribute the belief over the // sum over observations, and add it all up here. // Note: This re-uses the thread to stride over states now. for (unsigned int s = threadIdx.x; s < n; s += blockDim.x) { // We compute the value of this state in the alpha-vector, then multiply it by the belief, // and add it to the current dot product value for this alpha-vector. float value = 0.0f; for (unsigned int i = 0; i < ns; i++) { int sp = S[s * m * ns + action * ns + i]; if (sp < 0) { break; } // Note: maxAlphaIndex[0] holds the maximal index value computed from the reduction. value += T[s * m * ns + action * ns + i] * O[action * n * z + sp * z + observation] * Gamma[maxAlphaIndex[0] * n + sp]; } __syncthreads(); alphaBA[beliefIndex * m * n + action * n + s] += gamma * value; } } __global__ void pomdp_pbvi_update_step_gpu(unsigned int n, unsigned int ns, unsigned int m, unsigned int z, unsigned int r, unsigned int rz, float gamma, const int *S, const float *T, const float *O, const float *R, const int *Z, const float *B, const float *Gamma, float *alphaBA, float *GammaPrime, unsigned int *piPrime) { // Each block will run a different belief. Our overall goal: Compute the value // of GammaPrime[beliefIndex * n + ???] and piPrime[beliefIndex]. unsigned int beliefIndex = blockIdx.x * blockDim.x + threadIdx.x; if (beliefIndex >= r) { return; } // We want to find the action that maximizes the value, store it in piPrime, as well as // its alpha-vector GammaPrime. float maxActionValue = FLT_MIN; for (unsigned int action = 0; action < m; action++) { // Only execute if the action is available. //if (available[beliefIndex * m + action]) { // The potential alpha-vector has been computed, so compute the value with respect // to the belief state. float actionValue = 0.0f; for (unsigned int i = 0; i < rz; i++) { int s = Z[beliefIndex * rz + i]; if (s < 0) { break; } actionValue += alphaBA[beliefIndex * m * n + action * n + s] * B[beliefIndex * rz + i]; } // If this was larger, then overwrite piPrime and GammaPrime's values. if (actionValue > maxActionValue) { maxActionValue = actionValue; piPrime[beliefIndex] = action; } //} //__syncthreads(); } memcpy(&GammaPrime[beliefIndex * n], &alphaBA[beliefIndex * m * n + piPrime[beliefIndex] * n], n * sizeof(float)); //for (unsigned int s = 0; s < n; s++) { // GammaPrime[beliefIndex * n + s] = alphaBA[beliefIndex * m * n + piPrime[beliefIndex] * n + s]; //} } int pomdp_pbvi_initialize_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi) { // Reset the current horizon. pbvi->currentHorizon = 0; // Create the device-side Gamma. if (cudaMalloc(&pbvi->d_Gamma, pomdp->r * pomdp->n * sizeof(float)) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for Gamma."); return NOVA_ERROR_DEVICE_MALLOC; } if (cudaMemcpy(pbvi->d_Gamma, pbvi->GammaInitial, pomdp->r * pomdp->n * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to copy memory from host to device for Gamma."); return NOVA_ERROR_MEMCPY_TO_DEVICE; } if (cudaMalloc(&pbvi->d_GammaPrime, pomdp->r * pomdp->n * sizeof(float)) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for Gamma (prime)."); return NOVA_ERROR_DEVICE_MALLOC; } if (cudaMemcpy(pbvi->d_GammaPrime, pbvi->GammaInitial, pomdp->r * pomdp->n * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to copy memory from host to device for Gamma (prime)."); return NOVA_ERROR_MEMCPY_TO_DEVICE; } // Create the device-side pi. if (cudaMalloc(&pbvi->d_pi, pomdp->r * sizeof(unsigned int)) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for pi."); return NOVA_ERROR_DEVICE_MALLOC; } // Create the device-side memory for the intermediate variable alphaBA. if (cudaMalloc(&pbvi->d_alphaBA, pomdp->r * pomdp->m * pomdp->n * sizeof(float)) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_initialize_gpu]: %s\n", "Failed to allocate device-side memory for alphaBA."); return NOVA_ERROR_DEVICE_MALLOC; } return NOVA_SUCCESS; } int pomdp_pbvi_execute_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi, POMDPAlphaVectors *policy) { // The result from calling other functions. int result; // Ensure the data is valid. if (pomdp == nullptr || pomdp->n == 0 || pomdp->ns == 0 || pomdp->m == 0 || pomdp->z == 0 || pomdp->r == 0 || pomdp->rz == 0 || pomdp->d_S == nullptr || pomdp->d_T == nullptr || pomdp->d_O == nullptr || pomdp->d_R == nullptr || pomdp->d_Z == nullptr || pomdp->d_B == nullptr || pomdp->gamma < 0.0f || pomdp->gamma > 1.0f || pomdp->horizon < 1 || pbvi == nullptr || pbvi->GammaInitial == nullptr || policy == nullptr) { fprintf(stderr, "Error[pomdp_pbvi_execute_gpu]: %s\n", "Invalid arguments."); return NOVA_ERROR_INVALID_DATA; } // Ensure threads are correct. if (pbvi->numThreads % 32 != 0) { fprintf(stderr, "Error[pomdp_pbvi_execute_gpu]: %s\n", "Invalid number of threads."); return NOVA_ERROR_INVALID_CUDA_PARAM; } result = pomdp_pbvi_initialize_gpu(pomdp, pbvi); if (result != NOVA_SUCCESS) { return result; } // For each of the updates, run PBVI. Note that the currentHorizon is initialized to zero // above, and is updated in the update function below. while (pbvi->currentHorizon < pomdp->horizon) { //printf("PBVI (GPU Version) -- Iteration %i of %i\n", pomdp->currentHorizon, pomdp->horizon); result = pomdp_pbvi_update_gpu(pomdp, pbvi); if (result != NOVA_SUCCESS) { return result; } } result = pomdp_pbvi_get_policy_gpu(pomdp, pbvi, policy); if (result != NOVA_SUCCESS) { return result; } result = pomdp_pbvi_uninitialize_gpu(pomdp, pbvi); if (result != NOVA_SUCCESS) { return result; } return NOVA_SUCCESS; } int pomdp_pbvi_uninitialize_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi) { int result; result = NOVA_SUCCESS; // Reset the current horizon. pbvi->currentHorizon = 0; if (pbvi->d_Gamma != nullptr) { if (cudaFree(pbvi->d_Gamma) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for the Gamma (the alpha-vectors)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_Gamma = nullptr; if (pbvi->d_GammaPrime != nullptr) { if (cudaFree(pbvi->d_GammaPrime) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for the GammaPrime (the alpha-vectors' copy)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_GammaPrime = nullptr; if (pbvi->d_pi != nullptr) { if (cudaFree(pbvi->d_pi) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for the pi (the policy)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_pi = nullptr; if (pbvi->d_alphaBA != nullptr) { if (cudaFree(pbvi->d_alphaBA) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_uninitialize_gpu]: %s\n", "Failed to allocate device-side memory for alphaBA (alpha-vector collection)."); result = NOVA_ERROR_DEVICE_FREE; } } pbvi->d_alphaBA = nullptr; return result; } int pomdp_pbvi_update_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi) { // The number of blocks in the main CUDA kernel call. int numBlocks; pomdp_pbvi_initialize_alphaBA_gpu<<< dim3(pomdp->r, pomdp->m, 1), pbvi->numThreads >>>( pomdp->n, pomdp->m, pomdp->r, pomdp->d_R, pbvi->d_alphaBA); // Check if there was an error executing the kernel. if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to execute the 'initialization of alphaBA' kernel."); return NOVA_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (cudaDeviceSynchronize() != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to synchronize the device after 'initialization of alphaBA' kernel."); return NOVA_ERROR_DEVICE_SYNCHRONIZE; } pomdp_pbvi_compute_alphaBA_gpu<<< dim3(pomdp->r, pomdp->m, pomdp->z), pbvi->numThreads, pbvi->numThreads * sizeof(float) + pbvi->numThreads * sizeof(unsigned int) >>>( pomdp->n, pomdp->ns, pomdp->m, pomdp->z, pomdp->r, pomdp->rz, pomdp->gamma, pomdp->d_S, pomdp->d_T, pomdp->d_O, pomdp->d_R, pomdp->d_Z, pomdp->d_B, pbvi->d_Gamma, pbvi->d_alphaBA); // Check if there was an error executing the kernel. if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to execute the 'compute_alphaBA' kernel."); return NOVA_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (cudaDeviceSynchronize() != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to synchronize the device after 'compute_alphaBA' kernel."); return NOVA_ERROR_DEVICE_SYNCHRONIZE; } // Compute the number of blocks. numBlocks = (unsigned int)((float)pomdp->r / (float)pbvi->numThreads) + 1; // Execute a kernel for the first three stages of for-loops: B, A, Z, as a 3d-block, // and the 4th stage for-loop over Gamma as the threads. if (pbvi->currentHorizon % 2 == 0) { pomdp_pbvi_update_step_gpu<<< numBlocks, pbvi->numThreads >>>( pomdp->n, pomdp->ns, pomdp->m, pomdp->z, pomdp->r, pomdp->rz, pomdp->gamma, pomdp->d_S, pomdp->d_T, pomdp->d_O, pomdp->d_R, pomdp->d_Z, pomdp->d_B, pbvi->d_Gamma, pbvi->d_alphaBA, pbvi->d_GammaPrime, pbvi->d_pi); } else { pomdp_pbvi_update_step_gpu<<< numBlocks, pbvi->numThreads >>>( pomdp->n, pomdp->ns, pomdp->m, pomdp->z, pomdp->r, pomdp->rz, pomdp->gamma, pomdp->d_S, pomdp->d_T, pomdp->d_O, pomdp->d_R, pomdp->d_Z, pomdp->d_B, pbvi->d_GammaPrime, pbvi->d_alphaBA, pbvi->d_Gamma, pbvi->d_pi); } // Check if there was an error executing the kernel. if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to execute the 'pomdp_pbvi_update_step_gpu' kernel."); return NOVA_ERROR_KERNEL_EXECUTION; } // Wait for the kernel to finish before looping more. if (cudaDeviceSynchronize() != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_update_gpu]: %s\n", "Failed to synchronize the device after 'pomdp_pbvi_update_step_gpu' kernel."); return NOVA_ERROR_DEVICE_SYNCHRONIZE; } pbvi->currentHorizon++; return NOVA_SUCCESS; } int pomdp_pbvi_get_policy_gpu(const POMDP *pomdp, POMDPPBVIGPU *pbvi, POMDPAlphaVectors *policy) { if (pomdp == nullptr || pbvi == nullptr || policy == nullptr) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Invalid arguments."); return NOVA_ERROR_INVALID_DATA; } // Initialize the policy, which allocates memory. int result = pomdp_alpha_vectors_initialize(policy, pomdp->n, pomdp->m, pomdp->r); if (result != NOVA_SUCCESS) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Could not create the policy."); return NOVA_ERROR_POLICY_CREATION; } // Copy the final result of Gamma and pi to the variables provided, from device to host. // This assumes that the memory has been allocated for the variables provided. if (pbvi->currentHorizon % 2 == 0) { if (cudaMemcpy(policy->Gamma, pbvi->d_Gamma, pomdp->r * pomdp->n * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Failed to copy memory from device to host for Gamma."); return NOVA_ERROR_MEMCPY_TO_HOST; } } else { if (cudaMemcpy(policy->Gamma, pbvi->d_GammaPrime, pomdp->r * pomdp->n * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Failed to copy memory from device to host for Gamma (prime)."); return NOVA_ERROR_MEMCPY_TO_HOST; } } if (cudaMemcpy(policy->pi, pbvi->d_pi, pomdp->r * sizeof(unsigned int), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Error[pomdp_pbvi_get_policy_gpu]: %s\n", "Failed to copy memory from device to host for pi."); return NOVA_ERROR_MEMCPY_TO_HOST; } return NOVA_SUCCESS; } }; // namespace nova
32bf91b9aca233f5d35ba9cac2e497286f71a7b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> static hipStream_t *streams; // CUDA kernel to pause for at least num_cycle cycles __global__ void sleep(int64_t num_cycles) { int64_t cycles = 0; int64_t start = clock64(); while(cycles < num_cycles) { cycles = clock64() - start; } } // Returns number of cycles required for requested seconds extern "C" int64_t get_cycles(float seconds) { // Get device frequency in KHz int64_t Hz; hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); Hz = int64_t(prop.clockRate) * 1000; // Calculate number of cycles to wait int64_t num_cycles; num_cycles = (int64_t)(seconds * Hz); return num_cycles; } // Create streams extern "C" void create_streams(int num_streams) { // Allocate streams streams = (hipStream_t *) malloc((num_streams+1)*sizeof(hipStream_t)); // Default stream streams[0] = NULL; // Primer kernel launch hipLaunchKernelGGL(( sleep), dim3(1), dim3(1) , 0, 0, 1); // Create streams for(int i = 1; i <= num_streams; i++) hipStreamCreate(&streams[i]); } // Launches a kernel that sleeps for num_cycles extern "C" void sleep_kernel(int64_t num_cycles, int stream_id) { // Launch a single GPU thread to sleep int blockSize, gridSize; blockSize = 1; gridSize = 1; // Execute the kernel hipLaunchKernelGGL(( sleep), dim3(gridSize), dim3(blockSize), 0, streams[stream_id] , num_cycles); } // Wait for stream to complete extern "C" void wait_for_stream(int stream_id) { hipStreamSynchronize(streams[stream_id]); } // Wait for streams to complete extern "C" void wait_for_streams(int num_streams) { for(int i = 1; i <= num_streams; i++) hipStreamSynchronize(streams[i]); } // Destroy stream objects extern "C" void destroy_streams(int num_streams) { // Clean up stream for(int i = 1; i <= num_streams; i++) hipStreamDestroy(streams[i]); free(streams); }
32bf91b9aca233f5d35ba9cac2e497286f71a7b4.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> static cudaStream_t *streams; // CUDA kernel to pause for at least num_cycle cycles __global__ void sleep(int64_t num_cycles) { int64_t cycles = 0; int64_t start = clock64(); while(cycles < num_cycles) { cycles = clock64() - start; } } // Returns number of cycles required for requested seconds extern "C" int64_t get_cycles(float seconds) { // Get device frequency in KHz int64_t Hz; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); Hz = int64_t(prop.clockRate) * 1000; // Calculate number of cycles to wait int64_t num_cycles; num_cycles = (int64_t)(seconds * Hz); return num_cycles; } // Create streams extern "C" void create_streams(int num_streams) { // Allocate streams streams = (cudaStream_t *) malloc((num_streams+1)*sizeof(cudaStream_t)); // Default stream streams[0] = NULL; // Primer kernel launch sleep<<< 1, 1 >>>(1); // Create streams for(int i = 1; i <= num_streams; i++) cudaStreamCreate(&streams[i]); } // Launches a kernel that sleeps for num_cycles extern "C" void sleep_kernel(int64_t num_cycles, int stream_id) { // Launch a single GPU thread to sleep int blockSize, gridSize; blockSize = 1; gridSize = 1; // Execute the kernel sleep<<< gridSize, blockSize, 0, streams[stream_id] >>>(num_cycles); } // Wait for stream to complete extern "C" void wait_for_stream(int stream_id) { cudaStreamSynchronize(streams[stream_id]); } // Wait for streams to complete extern "C" void wait_for_streams(int num_streams) { for(int i = 1; i <= num_streams; i++) cudaStreamSynchronize(streams[i]); } // Destroy stream objects extern "C" void destroy_streams(int num_streams) { // Clean up stream for(int i = 1; i <= num_streams; i++) cudaStreamDestroy(streams[i]); free(streams); }
f23758c41e80342b96655934a1547c9867e47bb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void add1(int *A, int *B, int *C, int n){ //each thread computes the sum of elements row-wise int row = threadIdx.x; for(int i=0;i<n;i++){ C[row*n+i] = A[row*n +i] + B[row*n+i]; } } __global__ void add2(int *A, int *B, int *C, int m){ //each thread computes the sum of elements column-wise int col = threadIdx.x; for(int i=0;i<m;i++){ C[col*m+i] = A[col*m +i] + B[col*m+i]; } } __global__ void add3(int *A, int *B, int *C){ //each thread computes sum of 2 elements int ele = threadIdx.x, row=blockIdx.x, no_eles = blockDim.x; C[row*no_eles + ele] = A[row*no_eles + ele] + B[row*no_eles + ele]; } int main(){ int *a, *b, *t, m, n; int *d_a, *d_b, *d_t; printf("Enter the value of m: "); scanf("%d",&m); printf("Enter the value of n: "); scanf("%d",&n); int size = sizeof(int)*m*n; a=(int*)malloc(size); b=(int*)malloc(size); t=(int*)malloc(size); printf("Enter input matrix A: \n"); for(int i=0; i<m*n; i++) scanf("%d",&a[i]); printf("Enter input matrix B: \n"); for(int i=0; i<m*n; i++) scanf("%d",&b[i]); hipMalloc((void**)&d_a,size); hipMalloc((void**)&d_b,size); hipMalloc((void**)&d_t,size); hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); hipMemcpy(d_b,b,size,hipMemcpyHostToDevice); hipLaunchKernelGGL(( add1), dim3(1),dim3(m), 0, 0, d_a,d_b,d_t,n); hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost); printf("Resultant matrix ADD3:\n"); for(int i=0; i<m; i++){ for(int j=0; j<n; j++){ printf("%d ",t[i*n+j]); } printf("\n"); } hipLaunchKernelGGL(( add2), dim3(1),dim3(n), 0, 0, d_a,d_b,d_t,m); hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost); printf("Resultant matrix ADD3:\n"); for(int i=0; i<m; i++){ for(int j=0; j<n; j++){ printf("%d ",t[i*n+j]); } printf("\n"); } hipLaunchKernelGGL(( add3), dim3(m),dim3(n), 0, 0, d_a,d_b,d_t); hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost); printf("Resultant matrix ADD3:\n"); for(int i=0; i<m; i++){ for(int j=0; j<n; j++){ printf("%d ",t[i*n+j]); } printf("\n"); } hipFree(d_a); hipFree(d_t); return 0; }
f23758c41e80342b96655934a1547c9867e47bb1.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> __global__ void add1(int *A, int *B, int *C, int n){ //each thread computes the sum of elements row-wise int row = threadIdx.x; for(int i=0;i<n;i++){ C[row*n+i] = A[row*n +i] + B[row*n+i]; } } __global__ void add2(int *A, int *B, int *C, int m){ //each thread computes the sum of elements column-wise int col = threadIdx.x; for(int i=0;i<m;i++){ C[col*m+i] = A[col*m +i] + B[col*m+i]; } } __global__ void add3(int *A, int *B, int *C){ //each thread computes sum of 2 elements int ele = threadIdx.x, row=blockIdx.x, no_eles = blockDim.x; C[row*no_eles + ele] = A[row*no_eles + ele] + B[row*no_eles + ele]; } int main(){ int *a, *b, *t, m, n; int *d_a, *d_b, *d_t; printf("Enter the value of m: "); scanf("%d",&m); printf("Enter the value of n: "); scanf("%d",&n); int size = sizeof(int)*m*n; a=(int*)malloc(size); b=(int*)malloc(size); t=(int*)malloc(size); printf("Enter input matrix A: \n"); for(int i=0; i<m*n; i++) scanf("%d",&a[i]); printf("Enter input matrix B: \n"); for(int i=0; i<m*n; i++) scanf("%d",&b[i]); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_b,size); cudaMalloc((void**)&d_t,size); cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); add1<<<1,m>>>(d_a,d_b,d_t,n); cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost); printf("Resultant matrix ADD3:\n"); for(int i=0; i<m; i++){ for(int j=0; j<n; j++){ printf("%d ",t[i*n+j]); } printf("\n"); } add2<<<1,n>>>(d_a,d_b,d_t,m); cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost); printf("Resultant matrix ADD3:\n"); for(int i=0; i<m; i++){ for(int j=0; j<n; j++){ printf("%d ",t[i*n+j]); } printf("\n"); } add3<<<m,n>>>(d_a,d_b,d_t); cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost); printf("Resultant matrix ADD3:\n"); for(int i=0; i<m; i++){ for(int j=0; j<n; j++){ printf("%d ",t[i*n+j]); } printf("\n"); } cudaFree(d_a); cudaFree(d_t); return 0; }
ee0b95d2f2e4ef9e0a3a230e8bd379f0b7051ee3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "assert.h" #define WORK_PER_THREAD 4 __global__ void saxpy_parallel(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; i *= WORK_PER_THREAD; if (i < n) { #pragma unroll for(int j=0; j<WORK_PER_THREAD; j++) y[i+j] = a * x[i+j] + y[i+j]; } } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(-1); } } int main() { int N = 32 * 10000000; // allocate vectors on host int size = N * sizeof(float); float* h_x = (float*)malloc(size); float* h_y = (float*)malloc(size); // allocate device memory float* d_x; float* d_y; hipMalloc((void**) &d_x, size); hipMalloc((void**) &d_y, size); // put values in h_x and h_y for (int i = 0; i<N ;i++) { h_x[i]= (float) i; h_y[i]= (float) i; } hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, size, hipMemcpyHostToDevice); // calculate number of blocks needed for N int nblocks = ((N / WORK_PER_THREAD)+255)/256; // call hipLaunchKernelGGL(( saxpy_parallel), dim3(nblocks),dim3(256), 0, 0, N , 1.0, d_x, d_y); // Copy results back from device memory to host memory // implicty waits for threads to excute hipMemcpy(h_y, d_y, size, hipMemcpyDeviceToHost); for(int i = 0; i<N; i++) assert(h_y[i] == 2*i); // Check for any CUDA errors checkCUDAError("hipMemcpy calls"); hipFree(d_x); hipFree(d_y); free(h_x); free(h_y); return 0; }
ee0b95d2f2e4ef9e0a3a230e8bd379f0b7051ee3.cu
#include <iostream> #include "assert.h" #define WORK_PER_THREAD 4 __global__ void saxpy_parallel(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; i *= WORK_PER_THREAD; if (i < n) { #pragma unroll for(int j=0; j<WORK_PER_THREAD; j++) y[i+j] = a * x[i+j] + y[i+j]; } } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(-1); } } int main() { int N = 32 * 10000000; // allocate vectors on host int size = N * sizeof(float); float* h_x = (float*)malloc(size); float* h_y = (float*)malloc(size); // allocate device memory float* d_x; float* d_y; cudaMalloc((void**) &d_x, size); cudaMalloc((void**) &d_y, size); // put values in h_x and h_y for (int i = 0; i<N ;i++) { h_x[i]= (float) i; h_y[i]= (float) i; } cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice); // calculate number of blocks needed for N int nblocks = ((N / WORK_PER_THREAD)+255)/256; // call saxpy_parallel<<<nblocks,256>>>(N , 1.0, d_x, d_y); // Copy results back from device memory to host memory // implicty waits for threads to excute cudaMemcpy(h_y, d_y, size, cudaMemcpyDeviceToHost); for(int i = 0; i<N; i++) assert(h_y[i] == 2*i); // Check for any CUDA errors checkCUDAError("cudaMemcpy calls"); cudaFree(d_x); cudaFree(d_y); free(h_x); free(h_y); return 0; }
d4f72ce4814b0dd1425a117599977d4439cfb4cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv ([email protected]) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void adaGradUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo, const T lr, const T epsilon) { const auto x = reinterpret_cast<const T*>(vx); const auto init = reinterpret_cast<const T*>(vin); auto up = reinterpret_cast<T*>(vz); auto st = reinterpret_cast<T*>(vst); __shared__ bool bEWS, bOrdering, bXZsame, bXInSame, bXStSame; __shared__ Nd4jLong xLen; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stShapeInfo) && 1 == shape::elementWiseStride(inShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(stShapeInfo) && shape::order(xShapeInfo) == shape::order(inShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInSame = shape::haveSameShapeAndStrides(xShapeInfo, inShapeInfo); bXStSame = shape::haveSameShapeAndStrides(xShapeInfo, stShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initOffset = i, stOffset = i; if (!bEWS || !bOrdering) { shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initOffset = bXInSame ? xOffset : shape::getOffset(inShapeInfo, coords); stOffset = bXStSame ? xOffset : shape::getOffset(stShapeInfo, coords); } st[stOffset] = init[initOffset] + x[xOffset] * x[xOffset]; up[zOffset] = (lr * x[xOffset]) / (math::nd4j_sqrt<T, T>(st[stOffset]) + epsilon); } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void adaGradUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo, const double dLr, const double dEpsilon) { const T lr = static_cast<T>(dLr); const T epsilon = static_cast<T>(dEpsilon); adaGradUpdaterCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, vin, inShapeInfo, vz, zShapeInfo, vst, stShapeInfo, lr, epsilon); } /////////////////////////////////////////////////////////////////// void updaterAdaGrad(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initState, NDArray& update, NDArray& stateH, const double dLr, const double dEpsilon) { PointersManager manager(context, "adaGradUpdater"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({ &update, &stateH }, { &gradient, &initState }); BUILD_SINGLE_SELECTOR(gradient.dataType(), adaGradUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.getSpecialBuffer(), gradient.getSpecialShapeInfo(), initState.getSpecialBuffer(), initState.getSpecialShapeInfo(), update.getSpecialBuffer(), update.getSpecialShapeInfo(), stateH.getSpecialBuffer(), stateH.getSpecialShapeInfo(), dLr, dEpsilon), FLOAT_TYPES); NDArray::registerSpecialUse({ &update, &stateH }, { &gradient, &initState }); manager.synchronize(); } } } }
d4f72ce4814b0dd1425a117599977d4439cfb4cd.cu
/******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Oleh Semeniv ([email protected]) // #include <system/op_boilerplate.h> #include <ops/declarable/helpers/updatersHelpers.h> #include <helpers/PointersManager.h> #include <math/platformmath.h> #include <math/templatemath.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ void adaGradUpdaterCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo, const T lr, const T epsilon) { const auto x = reinterpret_cast<const T*>(vx); const auto init = reinterpret_cast<const T*>(vin); auto up = reinterpret_cast<T*>(vz); auto st = reinterpret_cast<T*>(vst); __shared__ bool bEWS, bOrdering, bXZsame, bXInSame, bXStSame; __shared__ Nd4jLong xLen; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); bEWS = 1 == shape::elementWiseStride(xShapeInfo) && 1 == shape::elementWiseStride(zShapeInfo) && 1 == shape::elementWiseStride(stShapeInfo) && 1 == shape::elementWiseStride(inShapeInfo); bOrdering = shape::order(xShapeInfo) == shape::order(zShapeInfo) && shape::order(xShapeInfo) == shape::order(stShapeInfo) && shape::order(xShapeInfo) == shape::order(inShapeInfo); bXZsame = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); bXInSame = shape::haveSameShapeAndStrides(xShapeInfo, inShapeInfo); bXStSame = shape::haveSameShapeAndStrides(xShapeInfo, stShapeInfo); } __syncthreads(); int coords[MAX_RANK]; for (Nd4jLong i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) { auto xOffset = i, zOffset = i, initOffset = i, stOffset = i; if (!bEWS || !bOrdering) { shape::index2coords(i, xShapeInfo, coords); xOffset = shape::getOffset(xShapeInfo, coords); zOffset = bXZsame ? xOffset : shape::getOffset(zShapeInfo, coords); initOffset = bXInSame ? xOffset : shape::getOffset(inShapeInfo, coords); stOffset = bXStSame ? xOffset : shape::getOffset(stShapeInfo, coords); } st[stOffset] = init[initOffset] + x[xOffset] * x[xOffset]; up[zOffset] = (lr * x[xOffset]) / (math::nd4j_sqrt<T, T>(st[stOffset]) + epsilon); } } /////////////////////////////////////////////////////////////////// template<typename T> linkage void adaGradUpdaterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t* stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vin, const Nd4jLong* inShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vst, const Nd4jLong* stShapeInfo, const double dLr, const double dEpsilon) { const T lr = static_cast<T>(dLr); const T epsilon = static_cast<T>(dEpsilon); adaGradUpdaterCuda<T> << <blocksPerGrid, threadsPerBlock, 256, * stream >> > (vx, xShapeInfo, vin, inShapeInfo, vz, zShapeInfo, vst, stShapeInfo, lr, epsilon); } /////////////////////////////////////////////////////////////////// void updaterAdaGrad(sd::LaunchContext* context, const NDArray& gradient, const NDArray& initState, NDArray& update, NDArray& stateH, const double dLr, const double dEpsilon) { PointersManager manager(context, "adaGradUpdater"); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradient.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; NDArray::prepareSpecialUse({ &update, &stateH }, { &gradient, &initState }); BUILD_SINGLE_SELECTOR(gradient.dataType(), adaGradUpdaterCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), gradient.getSpecialBuffer(), gradient.getSpecialShapeInfo(), initState.getSpecialBuffer(), initState.getSpecialShapeInfo(), update.getSpecialBuffer(), update.getSpecialShapeInfo(), stateH.getSpecialBuffer(), stateH.getSpecialShapeInfo(), dLr, dEpsilon), FLOAT_TYPES); NDArray::registerSpecialUse({ &update, &stateH }, { &gradient, &initState }); manager.synchronize(); } } } }
8c532ea8b81bcd1742cff5bc42dda0f6697f35f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //************************************************************************************/ // Title : Parallel Processing of SAR Signal for Image generation on CUDA Platform //************************************************************************************/ // Program to generate processed image from raw SAR image using parallel // programming language CUDA // ******** Header Files ***********// #include <stdlib.h> #include <math.h> // includes, project #include <hip/hip_runtime.h> #include <hipfft.h> #include <hipfftXt.h> // *****End of Header Files***********// // No of rows and columns and the no of points are defined #define ROW 2048 #define COL 2048 #define NOP 2048 // Definition for matrix multiplication // Thread block size #define BLOCK_SIZE 1 // Basic Matrix dimensions // (chosen as multiples of the thread block size for simplicity) #define WA (24 * BLOCK_SIZE) // Matrix A width #define HA (7 * BLOCK_SIZE) // Matrix A height #define WB (3584 * BLOCK_SIZE) // Matrix B width #define HB WA // Matrix B height #define WC WB // Matrix C width #define HC HA // Matrix C height // Used in matrixMul kernel #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] //************************************************************************************/ // Start of Function prototype // Function to perform block correlation on input data vector void block_corr(int, int, int, hipfftComplex *, hipfftComplex *, hipfftComplex *, hipfftComplex *, hipfftComplex *, hipComplex *, hipComplex *, hipComplex *, hipComplex *, hipComplex *, hipComplex *, hipComplex *, hipComplex *); // Function to Flip matrix in up/down direction void flipud(hipComplex **, int, int, hipComplex **); // Function to Flip matrix in left/right direction void fliplr(hipComplex *, int, hipComplex *); // Function to swap data in blocks of 512 __global__ void swap_data(hipfftComplex *, hipfftComplex *, int); // Function to launch kernel for range data processing __global__ void process_range(hipComplex *, hipComplex *, hipComplex *, hipComplex *, int); // Function to launch kernel for azimuth data processing __global__ void process_az(hipComplex *, hipComplex *, hipComplex *, int); // Function to matrix multiplication kernel __global__ void matrixMul(hipComplex*, hipComplex*, hipComplex*, int, int); // Function to normalize azimuth data after ifft __global__ void divide_by_N_azimuth(hipfftComplex *, int); // Function to normalize range data after ifft __global__ void divide_by_N_range(hipfftComplex *, int); // Function to populate the C matrix with data void populate_C(hipComplex *, int, hipComplex*); // End of function prototype //************************************************************************************/ // Start of main() function int main() { int i, j, k, w, m, flag = 0; int N = NOP / 4; double length = 1349; m = (int)floor(length * 8 / NOP); m = (int)(m + 1)*(NOP / 8); hipError_t cuda_error; /*Reading Raw Image file from image.txt and storing it in a[*][*] */ int row = ROW / 2; // this is done to read only half the image as per logic int col = COL; FILE *fp1; fp1 = fopen("image.txt", "r"); fseek(fp1, row * COL * 4 * sizeof(hipComplex), SEEK_SET); hipComplex **a = (hipComplex **)calloc(row, sizeof(hipComplex *)); printf("Reading complex image\n"); for (i = 0; i < row; i++) { a[i] = (hipComplex *)calloc(col, sizeof(hipComplex)); for (j = 0; j < col; j++) { fscanf(fp1, "%f%f", &a[i][j].x, &a[i][j].y); } } fclose(fp1); printf("finished reading image\n"); /*============================================================================*/ /* RANGE IMAGE PROCESSING */ /*============================================================================*/ /* Reading transmit data from x_data.txt and storing it to x_data[*] */ fp1 = fopen("x_data.txt", "r"); fseek(fp1, 0, SEEK_END); int fileLen; fileLen = ftell(fp1); fileLen = fileLen / (4 * sizeof(hipComplex)); // 4*sizeof(hipComplex) is the size of one complex data fseek(fp1, 0, SEEK_SET); hipComplex * x_data = (hipComplex *)calloc(fileLen, sizeof(hipComplex)); for (i = 0; i < fileLen; i++) { fscanf(fp1, "%f%f", &x_data[i].x, &x_data[i].y); } fclose(fp1); /*Calculate cuFFT of transmit data ( x_data[*] ) */ // Rearrange the block of x_data : 3-1 to 1-3 blocks ... one block of 512 points. hipComplex * tx_temp = (hipComplex *)calloc(3 * N, sizeof(hipComplex)); j = 2 * N; for (i = 0; i < N; i++) { tx_temp[i].x = x_data[j].x; tx_temp[i].y = x_data[j].y; tx_temp[i + N].x = x_data[j - N].x; tx_temp[i + N].y = x_data[j - N].y; tx_temp[i + (2 * N)].x = x_data[j - (2 * N)].x; tx_temp[i + (2 * N)].y = x_data[j - (2 * N)].y; j++; } // Making 12 blocks of data from given 3 blocks // each block is of size 1024 = 2*512 = 2 * Previous block size hipComplex * tx_new = (hipComplex *)calloc(12 * 2 * N, sizeof(hipComplex)); int p = 0; for (w = 0; w < 4; w++) { for (i = 0; i < 3; i++) { for (k = (i*N); k < (i + 1)*N; k++) { tx_new[p] = tx_temp[k]; p++; } for (k = (i + 1)*N; k < (i + 2)*N; k++) { tx_new[p].x = 0.0; tx_new[p].y = 0.0; p++; } } } hipError_t error; // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Calculating cuFFFT of transmit data (tx_new[*]) hipfftComplex * d_signal_tx; int mem_size = sizeof(hipfftComplex)*N * 12 * 2; // Memory allocation & Transfer on Device cuda_error = hipMalloc((void**)&d_signal_tx, mem_size); if (cuda_error != hipSuccess) { printf("error in Cuda Malloc...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } cuda_error = hipMemcpy(d_signal_tx, tx_new, mem_size, hipMemcpyHostToDevice); if (cuda_error != hipSuccess) { printf("error in Cuda Mem Copy of d_signal_tx...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Finding cuFFT by declaring a plan hipfftHandle plan; hipfftPlan1d(&plan, 2 * 512, HIPFFT_C2C, 12); hipfftExecC2C(plan, (hipfftComplex *)d_signal_tx, (hipfftComplex *)d_signal_tx, HIPFFT_FORWARD); // FFT results are stored back in to d_signal_tx on device /*Calculate cuFFT of received data ( y_data[*] ) */ /*y_data[*] contains SINGLE ROW of received image data (a[*][*]) */ hipComplex * y_data = (hipComplex *)calloc(NOP, sizeof(hipComplex)); hipComplex * corr_output = (hipComplex *)calloc(2 * NOP, sizeof(hipComplex)); hipComplex * rx_new = (hipComplex *)calloc(N * 12 * 2, sizeof(hipComplex)); // Passing the variable to divide by 2 on device // This can be defined directly on the device - Do it later hipfftComplex h_tmp; h_tmp.x = 0.5; h_tmp.y = 0; int mem_size1 = sizeof(hipfftComplex); hipfftComplex *d_tmp; cuda_error = hipMalloc((void**)&d_tmp, mem_size1); if (cuda_error != hipSuccess) { printf("error in Cuda Malloc of d_out...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } cuda_error = hipMemcpy(d_tmp, &h_tmp, mem_size1, hipMemcpyHostToDevice); if (cuda_error != hipSuccess) { printf("error in Cuda Memcpy of d_tmp...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } // CUDA Malloc of Receive data (y_data -> d_signal_rx) & O/P data (d_out) hipfftComplex * d_signal_rx, *d_out, *d_tmp_out; // d_tmp_out is used to temporary store the data on device cuda_error = hipMalloc((void**)&d_signal_rx, mem_size); if (cuda_error != hipSuccess) { printf("error in Cuda Malloc of d_signal_rx...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } cuda_error = hipMalloc((void**)&d_out, mem_size); if (cuda_error != hipSuccess) { printf("error in Cuda Malloc of d_out...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } cuda_error = hipMalloc((void**)&d_tmp_out, mem_size); if (cuda_error != hipSuccess) { printf("error in Cuda Malloc of d_tmp_out...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } // M is the number of blocks of transmit data. int M = 4; int row_size = M + m / N; int col_size = 2 * (M*m / N); // Defining a constant 'A' matrix hipComplex * A = (hipComplex *)calloc(row_size*col_size, sizeof(hipComplex)); A[0].x = 1; // All imaginary values of 'A' matrix elements are zero for (i = 25; i < 28; i++) A[i].x = 1; for (i = 52; i < 57; i++) A[i].x = 1; for (i = 81; i < 87; i++) A[i].x = 1; for (i = 111; i < 116; i++) A[i].x = 1; for (i = 140; i < 143; i++) A[i].x = 1; A[167].x = 1; hipComplex *C = (hipComplex *)calloc(24 * 7 * N, sizeof(hipComplex)); hipComplex *fft_temp = (hipComplex *)calloc(12 * 2 * N, sizeof(hipComplex)); // memory allocation for matrix multiplication // allocate host memory for matrices A and B unsigned int mem_size_A = sizeof(hipComplex) * WA * HA; unsigned int mem_size_B = sizeof(hipComplex) * WB * HB; unsigned int mem_size_C = sizeof(hipComplex) * WC * HC; // allocate device memory hipComplex *d_A, *d_B, *d_C; hipMalloc((void**)&d_A, mem_size_A); hipMalloc((void**)&d_B, mem_size_B); hipMalloc((void**)&d_C, mem_size_C); // allocate host memory for the result hipComplex* h_C = (hipComplex*)malloc(mem_size_C); // end of matrix mult memory allocation hipComplex **range_image = (hipComplex **)calloc(row, sizeof(hipComplex *)); hipComplex **range_image_flip = (hipComplex **)calloc(row, sizeof(hipComplex *)); /*Starting of Range Image Processing */ i = 0; for (j = 0; j < row; j++) { for (k = 0; k < NOP; k++) { y_data[k] = cuConjf(a[j][k]); } // Block formation of receive data of block size = 1024 points p = 0; for (w = 0; w < 4; w++) { for (int x = 0; x < 3; x++) { for (k = (w*N); k < (w + 1)*N; k++) { rx_new[p] = y_data[k]; p++; } for (k = (w + 1)*N; k < (w + 2)*N; k++) { rx_new[p].x = 0.0; rx_new[p].y = 0.0; p++; } } } // Compute Block Correlation of the Transmit & Receive data. cuda_error = hipMemcpy(d_signal_rx, rx_new, mem_size, hipMemcpyHostToDevice); if (cuda_error != hipSuccess) { printf("error in Cuda Mem Copy of d_signal_rx...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } block_corr(flag, N, m, d_signal_tx, d_signal_rx, d_out, d_tmp, d_tmp_out, corr_output, A, C, fft_temp, d_A, d_B, d_C, h_C); range_image[j] = (hipComplex *)calloc(col, sizeof(hipComplex)); range_image_flip[j] = (hipComplex *)calloc(col, sizeof(hipComplex)); int z = NOP; for (k = 0; k < NOP; k++) { range_image[i][k] = corr_output[z]; z++; } i++; } printf("Finished range image processing\n"); /*END OF RANGE IMAGE PROCESSING*/ /*============================================================================*/ /* RANGE IMAGE PROCESSING */ /*============================================================================*/ //azimuth processing commences /* fp1=fopen("range_image1.txt","r"); for(i=0;i<row;i++) { for(j=0;j<col;j++) { fscanf(fp1,"%f%f",&range_image[i][j].x,&range_image[i][j].y); } } fclose(fp1);*/ flipud(range_image, row, col, range_image_flip); flag = 1; if (flag == 1) { int nrow = 1024; double L = 701; N = nrow / 4; m = (int)floor(L / N); m = (int)(m + 1)*N; fp1 = fopen("x_data_az.txt", "r"); fseek(fp1, 0, SEEK_END); fileLen = ftell(fp1); fileLen = fileLen / (4 * sizeof(hipComplex)); // 4*sizeof(hipComplex) is the size of one complex data fseek(fp1, 0, SEEK_SET); hipComplex * x_data = (hipComplex *)calloc(fileLen, sizeof(hipComplex)); for (i = 0; i < fileLen; i++) { fscanf(fp1, "%f%f", &x_data[i].x, &x_data[i].y); } fclose(fp1); hipComplex * x_flip_data = (hipComplex *)calloc(N, sizeof(hipComplex)); hipComplex * x_temp_data = (hipComplex *)calloc(N, sizeof(hipComplex)); hipComplex * rx_new = (hipComplex *)calloc(24 * N, sizeof(hipComplex)); hipComplex * tx_temp = (hipComplex *)calloc(3 * N, sizeof(hipComplex)); hipComplex * tx_new = (hipComplex *)calloc(12 * 2 * N, sizeof(hipComplex)); j = 2 * N; for (i = 0; i < N; i++) { tx_temp[i].x = x_data[j].x; tx_temp[i].y = x_data[j].y; tx_temp[i + N].x = x_data[j - N].x; tx_temp[i + N].y = x_data[j - N].y; tx_temp[i + (2 * N)].x = x_data[j - (2 * N)].x; tx_temp[i + (2 * N)].y = x_data[j - (2 * N)].y; j++; } mem_size = sizeof(hipfftComplex)*N * 12 * 2; int p = 0; int z, q; for (w = 0; w < 4; w++) { for (i = 0; i < 3; i++) { q = 0; for (z = i*N; z < (i + 1)*N; z++) { x_temp_data[q] = cuConjf(tx_temp[z]); q++; } fliplr(x_temp_data, N, x_flip_data); for (k = 0; k < N; k++) { tx_new[p] = x_flip_data[k]; p++; } for (k = 0; k < N; k++) { tx_new[p].x = 0.0; tx_new[p].y = 0.0; p++; } } } cuda_error = hipMemcpy(d_signal_tx, tx_new, mem_size, hipMemcpyHostToDevice); if (cuda_error != hipSuccess) { printf("error in Cuda Mem Copy...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } hipfftPlan1d(&plan, 512, HIPFFT_C2C, 12); hipfftExecC2C(plan, (hipfftComplex *)d_signal_tx, (hipfftComplex *)d_signal_tx, HIPFFT_BACKWARD); // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(hipComplex) * size_A; unsigned int size_B = (WB / 2) * HB; unsigned int mem_size_B = sizeof(hipComplex) * size_B; // allocate device memory hipComplex* d_A; hipMalloc((void**)&d_A, mem_size_A); hipComplex* d_B; hipMalloc((void**)&d_B, mem_size_B); // allocate device memory for result unsigned int size_C = (WC / 2) * HC; unsigned int mem_size_C = sizeof(hipComplex) * size_C; hipComplex* d_C; hipMalloc((void**)&d_C, mem_size_C); // allocate host memory for the result hipComplex* h_C = (hipComplex*)malloc(mem_size_C); // end of matrix multi memory allocation i = 0; for (j = 0; j < col; j++) { for (k = 0; k < row; k++) { y_data[k] = range_image_flip[k][j]; } p = 0; for (w = 0; w < 4; w++) { for (int x = 0; x < 3; x++) { for (k = (w*N); k < (w + 1)*N; k++) { rx_new[p] = y_data[k]; p++; } for (k = (w + 1)*N; k < (w + 2)*N; k++) { rx_new[p].x = 0.0; rx_new[p].y = 0.0; p++; } } } cuda_error = hipMemcpy(d_signal_rx, rx_new, mem_size, hipMemcpyHostToDevice); if (cuda_error != hipSuccess) { printf("error in Cuda Mem Copy...\n"); printf("%s\n", hipGetErrorString(cuda_error)); } block_corr(flag, N, m, d_signal_tx, d_signal_rx, d_out, d_tmp, d_tmp_out, corr_output, A, C, fft_temp, d_A, d_B, d_C, h_C); w = N; for (k = 0; k < 4 * N; k++) { range_image[k][i] = corr_output[w]; w++; } i++; } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } printf("\nProcessing time: %f (ms)\n\n", msecTotal); // data is written in azimuth file seperately bcos azimuth image is stored in col major format // and not in row major format fp1 = fopen("azimuth_image.txt", "w"); for (i = 0; i < row; i++) { for (j = 0; j < col; j++) { fprintf(fp1, "%lg\t", cuCabsf(range_image[i][j])); } } fclose(fp1); } // Memory free allocated for (i = 0; i < row; i++) { free(range_image[i]); free(range_image_flip[i]); } free(range_image); free(range_image_flip); free(C); free(A); free(fft_temp); free(h_C); hipFree(d_signal_tx); hipFree(d_signal_rx); hipFree(d_out); hipFree(d_A); hipFree(d_B); hipFree(d_C); return 0; } // End of main() // Start of user defined functions // fliplr function implementation of one row and 'n' col of data void fliplr(hipComplex *in, int col, hipComplex *out) { int i, k; if (col % 2 != 0) { k = col; for (i = 0; i < col / 2; i++) { out[i] = in[k - 1 - i]; } k = col - 1; for (i = col / 2; i < col; i++) { out[i] = in[k - i]; } } else { k = col; for (i = 0; i < col; i++) { out[i] = in[k - 1 - i]; } } } void flipud(hipComplex **in, int row, int col, hipComplex **out) { int i, j, k; if (row % 2 != 0) { k = row; for (i = 0; i < row / 2; i++) { for (j = 0; j < col; j++) { out[i][j] = in[k - 1 - i][j]; } } k = row - 1; for (i = row / 2 + 1; i < row; i++) { for (j = 0; j < col; j++) { out[i][j] = in[k - i][j]; } } for (j = 0; j < col; j++) { out[row / 2][j] = in[row / 2][j]; } } else { k = row; for (i = 0; i < row; i++) { for (j = 0; j < col; j++) { out[i][j] = in[k - 1 - i][j]; } } } } __global__ void matrixMul(hipComplex* C, hipComplex* A, hipComplex* B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread hipComplex Csub; Csub.x = 0; Csub.y = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ hipComplex As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ hipComplex Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) Csub = cuCaddf(Csub, cuCmulf(AS(ty, k), BS(k, tx))); //Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } __global__ void divide_by_N_azimuth(hipfftComplex *d_out, int N) { //step 1: d_out signal normalization, after cuFFT inverse of d_out from host. int thread_ID = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_ID < N) { d_out[thread_ID].x = d_out[thread_ID].x / (2 * 256); d_out[thread_ID].y = d_out[thread_ID].y / (2 * 256); } __syncthreads(); } __global__ void divide_by_N_range(hipfftComplex *d_out, int N) { //step 1: d_out signal normalization, after cuFFT inverse of d_out from host. int thread_ID = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_ID < N) { d_out[thread_ID].x = d_out[thread_ID].x / (2 * 512); d_out[thread_ID].y = d_out[thread_ID].y / (2 * 512); } __syncthreads(); } __global__ void process_range(hipComplex *tx_new, hipComplex *rx_new, hipComplex *d_out, hipComplex *d_tmp, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (index < N) { d_out[index] = cuCmulf(d_tmp[0], cuCmulf(cuConjf(tx_new[index]), rx_new[index])); } __syncthreads(); } __global__ void swap_data(hipfftComplex *d_tmp_out, hipfftComplex *d_out, int N) { int thread_ID = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_ID < N) { if (blockIdx.x % 2 == 0) { d_tmp_out[thread_ID] = d_out[thread_ID + 512]; } else { d_tmp_out[thread_ID] = d_out[thread_ID - 512]; } } __syncthreads(); } __global__ void process_az(hipComplex *tx_new, hipComplex *rx_new, hipComplex *d_out, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (index < N) { d_out[index] = cuConjf(cuCmulf(cuConjf(rx_new[index]), tx_new[index])); } __syncthreads(); } void populate_C(hipComplex * C, int N, hipComplex* fft_temp) { int i, j = 0; int w; j = 0; w = 0; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 8 * N; w = N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 15 * N; w = 2 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 22 * N; w = 6 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 30 * N; w = 3 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 37 * N; w = 7 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 44 * N; w = 4 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 51 * N; w = 8 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 58 * N; w = 12 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 66 * N; w = 5 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 73 * N; w = 9 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 80 * N; w = 10 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 87 * N; w = 13 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 94 * N; w = 14 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 101 * N; w = 18 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 109 * N; w = 11 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 116 * N; w = 15 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 123 * N; w = 16 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 130 * N; w = 19 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 137 * N; w = 20 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 145 * N; w = 17 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 152 * N; w = 21 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 159 * N; w = 22 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 167 * N; w = 23 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } } void block_corr(int flag, int N, int m, hipfftComplex *d_signal_tx, hipfftComplex *d_signal_rx, hipfftComplex *d_out, hipfftComplex *d_tmp, hipfftComplex *d_tmp_out, hipComplex *corr_output, hipComplex *A, hipComplex *C, hipComplex *fft_temp, hipComplex *d_A, hipComplex *d_B, hipComplex *d_C, hipComplex *h_C) { hipError_t cuda_error; int mem_size = sizeof(hipfftComplex)*N * 12 * 2; hipfftHandle plan1; dim3 dim_block(512, 1, 1); dim3 dim_grid(12, 2, 1); if (flag == 0) { hipfftPlan1d(&plan1, 2 * 512, HIPFFT_C2C, 12); hipfftExecC2C(plan1, (hipfftComplex *)d_signal_rx, (hipfftComplex *)d_signal_rx, HIPFFT_FORWARD); process_range << <dim_grid, dim_block >> >(d_signal_tx, d_signal_rx, d_out, d_tmp, 24 * N); hipDeviceSynchronize(); cuda_error = hipGetLastError(); if (cuda_error != hipSuccess) { printf("error in launching kernel processdata_kernel.\n"); printf("%s\n", hipGetErrorString(cuda_error)); } hipfftPlan1d(&plan1, 2 * 512, HIPFFT_C2C, 12); hipfftExecC2C(plan1, (hipfftComplex *)d_out, (hipfftComplex *)d_out, HIPFFT_BACKWARD); divide_by_N_range << <dim_grid, dim_block >> >(d_out, 24 * N); hipDeviceSynchronize(); cuda_error = hipGetLastError(); if (cuda_error != hipSuccess) { printf("error in launching kernel process_range_kernel.\n"); printf("%s\n", hipGetErrorString(cuda_error)); } // kernel call to swap data after ifft swap_data << <dim_grid, dim_block >> >(d_tmp_out, d_out, 24 * N); hipDeviceSynchronize(); hipfftDestroy(plan1); hipMemcpy(fft_temp, d_tmp_out, mem_size, hipMemcpyDeviceToHost); } if (flag == 1) { hipfftPlan1d(&plan1, 512, HIPFFT_C2C, 12); hipfftExecC2C(plan1, (hipfftComplex *)d_signal_rx, (hipfftComplex *)d_signal_rx, HIPFFT_FORWARD); process_az << <12, 512 >> >(d_signal_tx, d_signal_rx, d_out, 24 * N); hipDeviceSynchronize(); hipfftExecC2C(plan1, (hipfftComplex *)d_out, (hipfftComplex *)d_out, HIPFFT_BACKWARD); divide_by_N_azimuth << <dim_grid, dim_block >> >(d_out, 24 * N); hipDeviceSynchronize(); // we need not swipe back the data which was not done in range processing // bcos the swipe data ops is not required in azimuth processing hipMemcpy(fft_temp, d_out, mem_size, hipMemcpyDeviceToHost); } int i, j = 0, k; int M = 4; int row_size = M + m / N; int col_size = 2 * (M*m / N); populate_C(C, N, fft_temp); // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(hipComplex) * size_A; unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(hipComplex) * size_B; unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(hipComplex) * size_C; if (flag == 1) { mem_size_B = mem_size_B / 2; mem_size_C = mem_size_C / 2; } hipMemcpy(d_A, A, mem_size_A, hipMemcpyHostToDevice); hipMemcpy(d_B, C, mem_size_B, hipMemcpyHostToDevice); // setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); if (flag == 1) { matrixMul << < grid, threads >> >(d_C, d_A, d_B, WA, WB / 2); hipDeviceSynchronize(); } else { matrixMul << < grid, threads >> >(d_C, d_A, d_B, WA, WB); hipDeviceSynchronize(); } // copy result from device to host hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); k = 0; for (i = 0; i < 7 * N; i += N) { for (j = i + N; j < i + 2 * N; j++) { corr_output[j] = h_C[k]; k++; } k = k + 7 * N; } } // End of user defined functions
8c532ea8b81bcd1742cff5bc42dda0f6697f35f9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //************************************************************************************/ // Title : Parallel Processing of SAR Signal for Image generation on CUDA Platform //************************************************************************************/ // Program to generate processed image from raw SAR image using parallel // programming language CUDA // ******** Header Files ***********// #include <stdlib.h> #include <math.h> // includes, project #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> // *****End of Header Files***********// // No of rows and columns and the no of points are defined #define ROW 2048 #define COL 2048 #define NOP 2048 // Definition for matrix multiplication // Thread block size #define BLOCK_SIZE 1 // Basic Matrix dimensions // (chosen as multiples of the thread block size for simplicity) #define WA (24 * BLOCK_SIZE) // Matrix A width #define HA (7 * BLOCK_SIZE) // Matrix A height #define WB (3584 * BLOCK_SIZE) // Matrix B width #define HB WA // Matrix B height #define WC WB // Matrix C width #define HC HA // Matrix C height // Used in matrixMul kernel #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] //************************************************************************************/ // Start of Function prototype // Function to perform block correlation on input data vector void block_corr(int, int, int, cufftComplex *, cufftComplex *, cufftComplex *, cufftComplex *, cufftComplex *, cuComplex *, cuComplex *, cuComplex *, cuComplex *, cuComplex *, cuComplex *, cuComplex *, cuComplex *); // Function to Flip matrix in up/down direction void flipud(cuComplex **, int, int, cuComplex **); // Function to Flip matrix in left/right direction void fliplr(cuComplex *, int, cuComplex *); // Function to swap data in blocks of 512 __global__ void swap_data(cufftComplex *, cufftComplex *, int); // Function to launch kernel for range data processing __global__ void process_range(cuComplex *, cuComplex *, cuComplex *, cuComplex *, int); // Function to launch kernel for azimuth data processing __global__ void process_az(cuComplex *, cuComplex *, cuComplex *, int); // Function to matrix multiplication kernel __global__ void matrixMul(cuComplex*, cuComplex*, cuComplex*, int, int); // Function to normalize azimuth data after ifft __global__ void divide_by_N_azimuth(cufftComplex *, int); // Function to normalize range data after ifft __global__ void divide_by_N_range(cufftComplex *, int); // Function to populate the C matrix with data void populate_C(cuComplex *, int, cuComplex*); // End of function prototype //************************************************************************************/ // Start of main() function int main() { int i, j, k, w, m, flag = 0; int N = NOP / 4; double length = 1349; m = (int)floor(length * 8 / NOP); m = (int)(m + 1)*(NOP / 8); cudaError_t cuda_error; /*Reading Raw Image file from image.txt and storing it in a[*][*] */ int row = ROW / 2; // this is done to read only half the image as per logic int col = COL; FILE *fp1; fp1 = fopen("image.txt", "r"); fseek(fp1, row * COL * 4 * sizeof(cuComplex), SEEK_SET); cuComplex **a = (cuComplex **)calloc(row, sizeof(cuComplex *)); printf("Reading complex image\n"); for (i = 0; i < row; i++) { a[i] = (cuComplex *)calloc(col, sizeof(cuComplex)); for (j = 0; j < col; j++) { fscanf(fp1, "%f%f", &a[i][j].x, &a[i][j].y); } } fclose(fp1); printf("finished reading image\n"); /*============================================================================*/ /* RANGE IMAGE PROCESSING */ /*============================================================================*/ /* Reading transmit data from x_data.txt and storing it to x_data[*] */ fp1 = fopen("x_data.txt", "r"); fseek(fp1, 0, SEEK_END); int fileLen; fileLen = ftell(fp1); fileLen = fileLen / (4 * sizeof(cuComplex)); // 4*sizeof(cuComplex) is the size of one complex data fseek(fp1, 0, SEEK_SET); cuComplex * x_data = (cuComplex *)calloc(fileLen, sizeof(cuComplex)); for (i = 0; i < fileLen; i++) { fscanf(fp1, "%f%f", &x_data[i].x, &x_data[i].y); } fclose(fp1); /*Calculate cuFFT of transmit data ( x_data[*] ) */ // Rearrange the block of x_data : 3-1 to 1-3 blocks ... one block of 512 points. cuComplex * tx_temp = (cuComplex *)calloc(3 * N, sizeof(cuComplex)); j = 2 * N; for (i = 0; i < N; i++) { tx_temp[i].x = x_data[j].x; tx_temp[i].y = x_data[j].y; tx_temp[i + N].x = x_data[j - N].x; tx_temp[i + N].y = x_data[j - N].y; tx_temp[i + (2 * N)].x = x_data[j - (2 * N)].x; tx_temp[i + (2 * N)].y = x_data[j - (2 * N)].y; j++; } // Making 12 blocks of data from given 3 blocks // each block is of size 1024 = 2*512 = 2 * Previous block size cuComplex * tx_new = (cuComplex *)calloc(12 * 2 * N, sizeof(cuComplex)); int p = 0; for (w = 0; w < 4; w++) { for (i = 0; i < 3; i++) { for (k = (i*N); k < (i + 1)*N; k++) { tx_new[p] = tx_temp[k]; p++; } for (k = (i + 1)*N; k < (i + 2)*N; k++) { tx_new[p].x = 0.0; tx_new[p].y = 0.0; p++; } } } cudaError_t error; // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Calculating cuFFFT of transmit data (tx_new[*]) cufftComplex * d_signal_tx; int mem_size = sizeof(cufftComplex)*N * 12 * 2; // Memory allocation & Transfer on Device cuda_error = cudaMalloc((void**)&d_signal_tx, mem_size); if (cuda_error != cudaSuccess) { printf("error in Cuda Malloc...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } cuda_error = cudaMemcpy(d_signal_tx, tx_new, mem_size, cudaMemcpyHostToDevice); if (cuda_error != cudaSuccess) { printf("error in Cuda Mem Copy of d_signal_tx...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Finding cuFFT by declaring a plan cufftHandle plan; cufftPlan1d(&plan, 2 * 512, CUFFT_C2C, 12); cufftExecC2C(plan, (cufftComplex *)d_signal_tx, (cufftComplex *)d_signal_tx, CUFFT_FORWARD); // FFT results are stored back in to d_signal_tx on device /*Calculate cuFFT of received data ( y_data[*] ) */ /*y_data[*] contains SINGLE ROW of received image data (a[*][*]) */ cuComplex * y_data = (cuComplex *)calloc(NOP, sizeof(cuComplex)); cuComplex * corr_output = (cuComplex *)calloc(2 * NOP, sizeof(cuComplex)); cuComplex * rx_new = (cuComplex *)calloc(N * 12 * 2, sizeof(cuComplex)); // Passing the variable to divide by 2 on device // This can be defined directly on the device - Do it later cufftComplex h_tmp; h_tmp.x = 0.5; h_tmp.y = 0; int mem_size1 = sizeof(cufftComplex); cufftComplex *d_tmp; cuda_error = cudaMalloc((void**)&d_tmp, mem_size1); if (cuda_error != cudaSuccess) { printf("error in Cuda Malloc of d_out...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } cuda_error = cudaMemcpy(d_tmp, &h_tmp, mem_size1, cudaMemcpyHostToDevice); if (cuda_error != cudaSuccess) { printf("error in Cuda Memcpy of d_tmp...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } // CUDA Malloc of Receive data (y_data -> d_signal_rx) & O/P data (d_out) cufftComplex * d_signal_rx, *d_out, *d_tmp_out; // d_tmp_out is used to temporary store the data on device cuda_error = cudaMalloc((void**)&d_signal_rx, mem_size); if (cuda_error != cudaSuccess) { printf("error in Cuda Malloc of d_signal_rx...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } cuda_error = cudaMalloc((void**)&d_out, mem_size); if (cuda_error != cudaSuccess) { printf("error in Cuda Malloc of d_out...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } cuda_error = cudaMalloc((void**)&d_tmp_out, mem_size); if (cuda_error != cudaSuccess) { printf("error in Cuda Malloc of d_tmp_out...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } // M is the number of blocks of transmit data. int M = 4; int row_size = M + m / N; int col_size = 2 * (M*m / N); // Defining a constant 'A' matrix cuComplex * A = (cuComplex *)calloc(row_size*col_size, sizeof(cuComplex)); A[0].x = 1; // All imaginary values of 'A' matrix elements are zero for (i = 25; i < 28; i++) A[i].x = 1; for (i = 52; i < 57; i++) A[i].x = 1; for (i = 81; i < 87; i++) A[i].x = 1; for (i = 111; i < 116; i++) A[i].x = 1; for (i = 140; i < 143; i++) A[i].x = 1; A[167].x = 1; cuComplex *C = (cuComplex *)calloc(24 * 7 * N, sizeof(cuComplex)); cuComplex *fft_temp = (cuComplex *)calloc(12 * 2 * N, sizeof(cuComplex)); // memory allocation for matrix multiplication // allocate host memory for matrices A and B unsigned int mem_size_A = sizeof(cuComplex) * WA * HA; unsigned int mem_size_B = sizeof(cuComplex) * WB * HB; unsigned int mem_size_C = sizeof(cuComplex) * WC * HC; // allocate device memory cuComplex *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A, mem_size_A); cudaMalloc((void**)&d_B, mem_size_B); cudaMalloc((void**)&d_C, mem_size_C); // allocate host memory for the result cuComplex* h_C = (cuComplex*)malloc(mem_size_C); // end of matrix mult memory allocation cuComplex **range_image = (cuComplex **)calloc(row, sizeof(cuComplex *)); cuComplex **range_image_flip = (cuComplex **)calloc(row, sizeof(cuComplex *)); /*Starting of Range Image Processing */ i = 0; for (j = 0; j < row; j++) { for (k = 0; k < NOP; k++) { y_data[k] = cuConjf(a[j][k]); } // Block formation of receive data of block size = 1024 points p = 0; for (w = 0; w < 4; w++) { for (int x = 0; x < 3; x++) { for (k = (w*N); k < (w + 1)*N; k++) { rx_new[p] = y_data[k]; p++; } for (k = (w + 1)*N; k < (w + 2)*N; k++) { rx_new[p].x = 0.0; rx_new[p].y = 0.0; p++; } } } // Compute Block Correlation of the Transmit & Receive data. cuda_error = cudaMemcpy(d_signal_rx, rx_new, mem_size, cudaMemcpyHostToDevice); if (cuda_error != cudaSuccess) { printf("error in Cuda Mem Copy of d_signal_rx...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } block_corr(flag, N, m, d_signal_tx, d_signal_rx, d_out, d_tmp, d_tmp_out, corr_output, A, C, fft_temp, d_A, d_B, d_C, h_C); range_image[j] = (cuComplex *)calloc(col, sizeof(cuComplex)); range_image_flip[j] = (cuComplex *)calloc(col, sizeof(cuComplex)); int z = NOP; for (k = 0; k < NOP; k++) { range_image[i][k] = corr_output[z]; z++; } i++; } printf("Finished range image processing\n"); /*END OF RANGE IMAGE PROCESSING*/ /*============================================================================*/ /* RANGE IMAGE PROCESSING */ /*============================================================================*/ //azimuth processing commences /* fp1=fopen("range_image1.txt","r"); for(i=0;i<row;i++) { for(j=0;j<col;j++) { fscanf(fp1,"%f%f",&range_image[i][j].x,&range_image[i][j].y); } } fclose(fp1);*/ flipud(range_image, row, col, range_image_flip); flag = 1; if (flag == 1) { int nrow = 1024; double L = 701; N = nrow / 4; m = (int)floor(L / N); m = (int)(m + 1)*N; fp1 = fopen("x_data_az.txt", "r"); fseek(fp1, 0, SEEK_END); fileLen = ftell(fp1); fileLen = fileLen / (4 * sizeof(cuComplex)); // 4*sizeof(cuComplex) is the size of one complex data fseek(fp1, 0, SEEK_SET); cuComplex * x_data = (cuComplex *)calloc(fileLen, sizeof(cuComplex)); for (i = 0; i < fileLen; i++) { fscanf(fp1, "%f%f", &x_data[i].x, &x_data[i].y); } fclose(fp1); cuComplex * x_flip_data = (cuComplex *)calloc(N, sizeof(cuComplex)); cuComplex * x_temp_data = (cuComplex *)calloc(N, sizeof(cuComplex)); cuComplex * rx_new = (cuComplex *)calloc(24 * N, sizeof(cuComplex)); cuComplex * tx_temp = (cuComplex *)calloc(3 * N, sizeof(cuComplex)); cuComplex * tx_new = (cuComplex *)calloc(12 * 2 * N, sizeof(cuComplex)); j = 2 * N; for (i = 0; i < N; i++) { tx_temp[i].x = x_data[j].x; tx_temp[i].y = x_data[j].y; tx_temp[i + N].x = x_data[j - N].x; tx_temp[i + N].y = x_data[j - N].y; tx_temp[i + (2 * N)].x = x_data[j - (2 * N)].x; tx_temp[i + (2 * N)].y = x_data[j - (2 * N)].y; j++; } mem_size = sizeof(cufftComplex)*N * 12 * 2; int p = 0; int z, q; for (w = 0; w < 4; w++) { for (i = 0; i < 3; i++) { q = 0; for (z = i*N; z < (i + 1)*N; z++) { x_temp_data[q] = cuConjf(tx_temp[z]); q++; } fliplr(x_temp_data, N, x_flip_data); for (k = 0; k < N; k++) { tx_new[p] = x_flip_data[k]; p++; } for (k = 0; k < N; k++) { tx_new[p].x = 0.0; tx_new[p].y = 0.0; p++; } } } cuda_error = cudaMemcpy(d_signal_tx, tx_new, mem_size, cudaMemcpyHostToDevice); if (cuda_error != cudaSuccess) { printf("error in Cuda Mem Copy...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } cufftPlan1d(&plan, 512, CUFFT_C2C, 12); cufftExecC2C(plan, (cufftComplex *)d_signal_tx, (cufftComplex *)d_signal_tx, CUFFT_INVERSE); // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(cuComplex) * size_A; unsigned int size_B = (WB / 2) * HB; unsigned int mem_size_B = sizeof(cuComplex) * size_B; // allocate device memory cuComplex* d_A; cudaMalloc((void**)&d_A, mem_size_A); cuComplex* d_B; cudaMalloc((void**)&d_B, mem_size_B); // allocate device memory for result unsigned int size_C = (WC / 2) * HC; unsigned int mem_size_C = sizeof(cuComplex) * size_C; cuComplex* d_C; cudaMalloc((void**)&d_C, mem_size_C); // allocate host memory for the result cuComplex* h_C = (cuComplex*)malloc(mem_size_C); // end of matrix multi memory allocation i = 0; for (j = 0; j < col; j++) { for (k = 0; k < row; k++) { y_data[k] = range_image_flip[k][j]; } p = 0; for (w = 0; w < 4; w++) { for (int x = 0; x < 3; x++) { for (k = (w*N); k < (w + 1)*N; k++) { rx_new[p] = y_data[k]; p++; } for (k = (w + 1)*N; k < (w + 2)*N; k++) { rx_new[p].x = 0.0; rx_new[p].y = 0.0; p++; } } } cuda_error = cudaMemcpy(d_signal_rx, rx_new, mem_size, cudaMemcpyHostToDevice); if (cuda_error != cudaSuccess) { printf("error in Cuda Mem Copy...\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } block_corr(flag, N, m, d_signal_tx, d_signal_rx, d_out, d_tmp, d_tmp_out, corr_output, A, C, fft_temp, d_A, d_B, d_C, h_C); w = N; for (k = 0; k < 4 * N; k++) { range_image[k][i] = corr_output[w]; w++; } i++; } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } printf("\nProcessing time: %f (ms)\n\n", msecTotal); // data is written in azimuth file seperately bcos azimuth image is stored in col major format // and not in row major format fp1 = fopen("azimuth_image.txt", "w"); for (i = 0; i < row; i++) { for (j = 0; j < col; j++) { fprintf(fp1, "%lg\t", cuCabsf(range_image[i][j])); } } fclose(fp1); } // Memory free allocated for (i = 0; i < row; i++) { free(range_image[i]); free(range_image_flip[i]); } free(range_image); free(range_image_flip); free(C); free(A); free(fft_temp); free(h_C); cudaFree(d_signal_tx); cudaFree(d_signal_rx); cudaFree(d_out); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; } // End of main() // Start of user defined functions // fliplr function implementation of one row and 'n' col of data void fliplr(cuComplex *in, int col, cuComplex *out) { int i, k; if (col % 2 != 0) { k = col; for (i = 0; i < col / 2; i++) { out[i] = in[k - 1 - i]; } k = col - 1; for (i = col / 2; i < col; i++) { out[i] = in[k - i]; } } else { k = col; for (i = 0; i < col; i++) { out[i] = in[k - 1 - i]; } } } void flipud(cuComplex **in, int row, int col, cuComplex **out) { int i, j, k; if (row % 2 != 0) { k = row; for (i = 0; i < row / 2; i++) { for (j = 0; j < col; j++) { out[i][j] = in[k - 1 - i][j]; } } k = row - 1; for (i = row / 2 + 1; i < row; i++) { for (j = 0; j < col; j++) { out[i][j] = in[k - i][j]; } } for (j = 0; j < col; j++) { out[row / 2][j] = in[row / 2][j]; } } else { k = row; for (i = 0; i < row; i++) { for (j = 0; j < col; j++) { out[i][j] = in[k - 1 - i][j]; } } } } __global__ void matrixMul(cuComplex* C, cuComplex* A, cuComplex* B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread cuComplex Csub; Csub.x = 0; Csub.y = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ cuComplex As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ cuComplex Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix for (int k = 0; k < BLOCK_SIZE; ++k) Csub = cuCaddf(Csub, cuCmulf(AS(ty, k), BS(k, tx))); //Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } __global__ void divide_by_N_azimuth(cufftComplex *d_out, int N) { //step 1: d_out signal normalization, after cuFFT inverse of d_out from host. int thread_ID = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_ID < N) { d_out[thread_ID].x = d_out[thread_ID].x / (2 * 256); d_out[thread_ID].y = d_out[thread_ID].y / (2 * 256); } __syncthreads(); } __global__ void divide_by_N_range(cufftComplex *d_out, int N) { //step 1: d_out signal normalization, after cuFFT inverse of d_out from host. int thread_ID = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_ID < N) { d_out[thread_ID].x = d_out[thread_ID].x / (2 * 512); d_out[thread_ID].y = d_out[thread_ID].y / (2 * 512); } __syncthreads(); } __global__ void process_range(cuComplex *tx_new, cuComplex *rx_new, cuComplex *d_out, cuComplex *d_tmp, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (index < N) { d_out[index] = cuCmulf(d_tmp[0], cuCmulf(cuConjf(tx_new[index]), rx_new[index])); } __syncthreads(); } __global__ void swap_data(cufftComplex *d_tmp_out, cufftComplex *d_out, int N) { int thread_ID = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (thread_ID < N) { if (blockIdx.x % 2 == 0) { d_tmp_out[thread_ID] = d_out[thread_ID + 512]; } else { d_tmp_out[thread_ID] = d_out[thread_ID - 512]; } } __syncthreads(); } __global__ void process_az(cuComplex *tx_new, cuComplex *rx_new, cuComplex *d_out, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if (index < N) { d_out[index] = cuConjf(cuCmulf(cuConjf(rx_new[index]), tx_new[index])); } __syncthreads(); } void populate_C(cuComplex * C, int N, cuComplex* fft_temp) { int i, j = 0; int w; j = 0; w = 0; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 8 * N; w = N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 15 * N; w = 2 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 22 * N; w = 6 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 30 * N; w = 3 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 37 * N; w = 7 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 44 * N; w = 4 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 51 * N; w = 8 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 58 * N; w = 12 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 66 * N; w = 5 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 73 * N; w = 9 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 80 * N; w = 10 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 87 * N; w = 13 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 94 * N; w = 14 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 101 * N; w = 18 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 109 * N; w = 11 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 116 * N; w = 15 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 123 * N; w = 16 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 130 * N; w = 19 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 137 * N; w = 20 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 145 * N; w = 17 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 152 * N; w = 21 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 159 * N; w = 22 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } j = 167 * N; w = 23 * N; for (i = 0; i < N; i++) { C[j] = fft_temp[i + w]; j++; } } void block_corr(int flag, int N, int m, cufftComplex *d_signal_tx, cufftComplex *d_signal_rx, cufftComplex *d_out, cufftComplex *d_tmp, cufftComplex *d_tmp_out, cuComplex *corr_output, cuComplex *A, cuComplex *C, cuComplex *fft_temp, cuComplex *d_A, cuComplex *d_B, cuComplex *d_C, cuComplex *h_C) { cudaError_t cuda_error; int mem_size = sizeof(cufftComplex)*N * 12 * 2; cufftHandle plan1; dim3 dim_block(512, 1, 1); dim3 dim_grid(12, 2, 1); if (flag == 0) { cufftPlan1d(&plan1, 2 * 512, CUFFT_C2C, 12); cufftExecC2C(plan1, (cufftComplex *)d_signal_rx, (cufftComplex *)d_signal_rx, CUFFT_FORWARD); process_range << <dim_grid, dim_block >> >(d_signal_tx, d_signal_rx, d_out, d_tmp, 24 * N); cudaThreadSynchronize(); cuda_error = cudaGetLastError(); if (cuda_error != cudaSuccess) { printf("error in launching kernel processdata_kernel.\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } cufftPlan1d(&plan1, 2 * 512, CUFFT_C2C, 12); cufftExecC2C(plan1, (cufftComplex *)d_out, (cufftComplex *)d_out, CUFFT_INVERSE); divide_by_N_range << <dim_grid, dim_block >> >(d_out, 24 * N); cudaThreadSynchronize(); cuda_error = cudaGetLastError(); if (cuda_error != cudaSuccess) { printf("error in launching kernel process_range_kernel.\n"); printf("%s\n", cudaGetErrorString(cuda_error)); } // kernel call to swap data after ifft swap_data << <dim_grid, dim_block >> >(d_tmp_out, d_out, 24 * N); cudaThreadSynchronize(); cufftDestroy(plan1); cudaMemcpy(fft_temp, d_tmp_out, mem_size, cudaMemcpyDeviceToHost); } if (flag == 1) { cufftPlan1d(&plan1, 512, CUFFT_C2C, 12); cufftExecC2C(plan1, (cufftComplex *)d_signal_rx, (cufftComplex *)d_signal_rx, CUFFT_FORWARD); process_az << <12, 512 >> >(d_signal_tx, d_signal_rx, d_out, 24 * N); cudaThreadSynchronize(); cufftExecC2C(plan1, (cufftComplex *)d_out, (cufftComplex *)d_out, CUFFT_INVERSE); divide_by_N_azimuth << <dim_grid, dim_block >> >(d_out, 24 * N); cudaThreadSynchronize(); // we need not swipe back the data which was not done in range processing // bcos the swipe data ops is not required in azimuth processing cudaMemcpy(fft_temp, d_out, mem_size, cudaMemcpyDeviceToHost); } int i, j = 0, k; int M = 4; int row_size = M + m / N; int col_size = 2 * (M*m / N); populate_C(C, N, fft_temp); // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(cuComplex) * size_A; unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(cuComplex) * size_B; unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(cuComplex) * size_C; if (flag == 1) { mem_size_B = mem_size_B / 2; mem_size_C = mem_size_C / 2; } cudaMemcpy(d_A, A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, C, mem_size_B, cudaMemcpyHostToDevice); // setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); if (flag == 1) { matrixMul << < grid, threads >> >(d_C, d_A, d_B, WA, WB / 2); cudaThreadSynchronize(); } else { matrixMul << < grid, threads >> >(d_C, d_A, d_B, WA, WB); cudaThreadSynchronize(); } // copy result from device to host cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); k = 0; for (i = 0; i < 7 * N; i += N) { for (j = i + N; j < i + 2 * N; j++) { corr_output[j] = h_C[k]; k++; } k = k + 7 * N; } } // End of user defined functions
9b61e79732e3688149c551b8d3a7d6e2fe49032c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel_hip.cuh" #define N 5 __global__ void gpuSquareKernel(float* d_in, float* d_out) { int tid = threadIdx.x; float temp = d_in[tid]; d_out[tid] = temp * temp; } void gpuSquare(float* h_in, float* h_out) { float *d_in, *d_out; hipMalloc((void**)&d_in, N * sizeof(float)); hipMalloc((void**)&d_out, N * sizeof(float)); hipMemcpy(d_in, h_in, N * sizeof(float), hipMemcpyHostToDevice); gpuSquareKernel << <1, N >> > (d_in, d_out); hipMemcpy(h_out, d_out, N * sizeof(float), hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_out); }
9b61e79732e3688149c551b8d3a7d6e2fe49032c.cu
#include "kernel.cuh" #define N 5 __global__ void gpuSquareKernel(float* d_in, float* d_out) { int tid = threadIdx.x; float temp = d_in[tid]; d_out[tid] = temp * temp; } void gpuSquare(float* h_in, float* h_out) { float *d_in, *d_out; cudaMalloc((void**)&d_in, N * sizeof(float)); cudaMalloc((void**)&d_out, N * sizeof(float)); cudaMemcpy(d_in, h_in, N * sizeof(float), cudaMemcpyHostToDevice); gpuSquareKernel << <1, N >> > (d_in, d_out); cudaMemcpy(h_out, d_out, N * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); }
db5c99065a250506aa6a7c1a568898c324cdb69b.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <c10/macros/Macros.h> #include <THH/THH.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> // Warp reduce kernels to reduce N groups of data into N numbers, where N = warpSize / width. // width should be a power of 2 and should be less than warpSize. template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduce(scalar_t x, int width=C10_WARP_SIZE){ for (unsigned offset = width/2; offset > 0; offset /= 2){ x += __shfl_down_sync(0xffffffff, x, offset, width); } return x; } inline int largestPowerOfTwo(int x){ int y = 1; while (y <= x) y <<= 1; return y >> 1; } // Helper class to calculate pointer offset that can be shared by different flavors of kernels. // For fwd, batch offset and stride are different for packing and non-packing mode. struct OffsetCalFwd{ __device__ __forceinline__ OffsetCalFwd( int64_t batch, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t gLen, int64_t hiddenSize, bool packOutput) : batch(batch), batchOffset(batchOffset), maxFLen(maxFLen), maxGLen(maxGLen), gLen(gLen), hiddenSize(hiddenSize), packOutput(packOutput) {} int64_t batch; const int64_t *batchOffset; int64_t maxFLen; int64_t maxGLen; int64_t gLen; int64_t hiddenSize; bool packOutput; __device__ __forceinline__ int64_t getBatchOffset(){ return packOutput ? ((batch==0) ? 0 : batchOffset[batch-1])*hiddenSize : batch*maxFLen*maxGLen*hiddenSize; } __device__ __forceinline__ int64_t getStrideF(){ return packOutput ? gLen*hiddenSize : maxGLen*hiddenSize; } }; // Helper class to calculate pointer offset that can be shared by different flavors of kernels // For bwd, batch offset and stride are different for packing and non-packing mode. // The reducion is done for two input tensors. Therefore, generating two sets of offsets // according to bwdFasterDim can lead to a unified implementation in the actual kernel. struct OffsetCalBwd{ __device__ __forceinline__ OffsetCalBwd( int64_t batch, const int64_t *batchOffset, const int *fLen, const int *gLen, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, bool bwdFasterDim) : batch(batch), batchOffset(batchOffset), maxFLen(maxFLen), maxGLen(maxGLen), fLen(fLen), gLen(gLen), hiddenSize(hiddenSize), packOutput(packOutput), bwdFasterDim(bwdFasterDim) {} int64_t batch; const int64_t *batchOffset; const int *fLen; const int *gLen; int64_t maxFLen; int64_t maxGLen; int64_t hiddenSize; bool packOutput; bool bwdFasterDim; // whether doing bwd on the faster moving dimension __device__ __forceinline__ int64_t getBatchOffset(){ return packOutput ? ((batch==0) ? 0 : batchOffset[batch-1])*hiddenSize : batch*maxFLen*maxGLen*hiddenSize; } __device__ __forceinline__ int64_t getMaxXLen(){ return bwdFasterDim ? maxGLen : maxFLen; } __device__ __forceinline__ auto getMyXLen() -> decltype(gLen[batch]){ return bwdFasterDim ? gLen[batch] : fLen[batch]; } __device__ __forceinline__ auto getMyYLen() -> decltype(gLen[batch]){ return bwdFasterDim ? fLen[batch] : gLen[batch]; } __device__ __forceinline__ int64_t getStrideX(){ return bwdFasterDim ? hiddenSize : ((packOutput ? gLen[batch] : maxGLen) * hiddenSize); } __device__ __forceinline__ int64_t getStrideY(){ return bwdFasterDim ? ((packOutput ? gLen[batch] : maxGLen) * hiddenSize) : hiddenSize; } }; // Vanila transducer joint forward kernel // Detail of this joint function can be found in: // [1] Sequence Transduction with Recurrent Neural Networks. // f is a tensor of shape [batch, T, H] // g is a tensor of shape [batch, U, H] // the transducer joint does // sum = f.unsqueeze(dim=2) + g.unsqueeze(dim=1) // The resultant tensor is of shape [batch, T, U, H] // Each thread block is working on one "batch" of data in the output tensor, [batch, t, u, :] // This joint function can optionally pack the output where the output tensor with a shape of // [B, T, U, H] is packed into [B_packed, H]. // Don't-care region (t > fLen) or (u > gLen) is removed. // To enable packing, the starting offset for each batch need to be specified with batchOffset. template <typename scalar_t, class OffsetCal> __global__ void transducer_joint_forward( const scalar_t *f, const scalar_t *g, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, scalar_t *sum) { const int batch = blockIdx.z; const int t = blockIdx.y; const int u = blockIdx.x; const auto myFLen = fLen[batch]; const auto myGLen = gLen[batch]; OffsetCal offsetCal(batch, batchOffset, maxFLen, maxGLen, myGLen, hiddenSize, packOutput); const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideF = offsetCal.getStrideF(); scalar_t const *myF = f + batch*maxFLen*hiddenSize + t*hiddenSize; scalar_t const *myG = g + batch*maxGLen*hiddenSize + u*hiddenSize; scalar_t *mySum = sum + myBatchOffset + t*strideF + u * hiddenSize; if (t < myFLen and u < myGLen){ #pragma unroll for (int h = threadIdx.x; h < hiddenSize; h += blockDim.x){ if (h < hiddenSize){ mySum[h] = myF[h] + myG[h]; } } } else if (packOutput == false and t < maxFLen and u < maxGLen){ // Need to write finite data to don't-care region because we instantiate the result tensor // with torch::empty for performance reasons. Even though it is don't-care region, the // contents need to be finite, otherwise could lead to NaN in WGRAD. // In packing mode, this write is no longer necessary as we remove the don't-care region // from the output. // Picking -1 (over 0) here for ease of testing. #pragma unroll for (int h = threadIdx.x; h < hiddenSize; h += blockDim.x){ if (h < hiddenSize){ mySum[h] = -1; } } } } // Tiled version of the joint forward kernel // Detail of this joint function can be found in: // [1] Sequence Transduction with Recurrent Neural Networks. // f is a tensor of shape [batch, T, H] // g is a tensor of shape [batch, U, H] // the transducer joint does // sum = f.unsqueeze(dim=2) + g.unsqueeze(dim=1) // The resultant tensor is of shape [batch, T, U, H] // Each thread is working on a tile of the shape of tileF x tileG in the result tensor. // The input for the tile is first loaded in the register and is reused tileG and tileF times. // This joint function can optionally pack the output where the output tensor with a shape of // [B, T, U, H] is packed into [B_packed, H]. // Don't-care region (t > fLen) or (u > gLen) is removed. // To enable packing, the starting offset for each batch need to be specified with batchOffset. template <typename scalar_t, int tileF, int tileG, class OffsetCal> __global__ void transducer_joint_tiled_forward( const scalar_t *f, const scalar_t *g, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, int64_t hiddenPerBlock, bool packOutput, scalar_t *sum) { const int batch = blockIdx.z; const int t = blockIdx.y * tileF; const int hiddenBlock = (hiddenSize + hiddenPerBlock - 1) / hiddenPerBlock; const int u = blockIdx.x / hiddenBlock * tileG; const int hOffset = (blockIdx.x % hiddenBlock) * hiddenPerBlock; const int h = threadIdx.x; const auto myFLen = fLen[batch]; const auto myGLen = gLen[batch]; OffsetCal offsetCal(batch, batchOffset, maxFLen, maxGLen, myGLen, hiddenSize, packOutput); const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideF = offsetCal.getStrideF(); scalar_t const *myF = f + batch*maxFLen*hiddenSize + t*hiddenSize + hOffset; scalar_t const *myG = g + batch*maxGLen*hiddenSize + u*hiddenSize + hOffset; scalar_t *mySum = sum + myBatchOffset + t*strideF + u*hiddenSize + hOffset; if (t < myFLen and u < myGLen and hOffset+h < hiddenSize){ // register buffers for tiled input reuse scalar_t fBuffer[tileF], gBuffer[tileG]; for (int i = 0; i < tileF; ++i){ if (t + i < myFLen) fBuffer[i] = myF[i*hiddenSize + h]; } for (int j = 0; j < tileG; ++j){ if (u + j < myGLen) gBuffer[j] = myG[j*hiddenSize + h]; } #pragma unroll for (int i = 0; i < tileF; ++i){ if (t + i < myFLen){ #pragma unroll for (int j = 0; j < tileG; ++j){ if (u + j < myGLen) mySum[i*strideF + j*hiddenSize + h] = fBuffer[i] + gBuffer[j]; else if (packOutput == false and u + j < maxGLen) mySum[i*strideF + j*hiddenSize + h] = -1; } } else if (packOutput == false and t + i < maxFLen){ // Again need to write finite data to don't-care region #pragma unroll for (int j = 0; j < tileG; ++j){ if (u + j < maxGLen) mySum[i*strideF + j*hiddenSize + h] = -1; } } } } else if (packOutput == false and t < maxFLen and u < maxGLen and hOffset+h < hiddenSize){ // Only need to ensure the finity in normal mode #pragma unroll for (int i = 0; i < tileF; ++i){ if (t + i < maxFLen){ #pragma unroll for (int j = 0; j < tileG; ++j){ if (u + j < maxGLen) mySum[i*strideF + j*hiddenSize + h] = -1; } } } } } // Bwd operation (reduction) on one input tensor. Since the operation performed for the two input // tensors are exactly the same, only one kernel is needed, and the different indexing offsets // and strides are handled by OffsetCalBwd. // When packing is enabled in the fwd op, unpacking is needed to restore the gradients in a // non-packed form. template <typename scalar_t, typename acc_t, class OffsetCal> __device__ void transducer_joint_single_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, bool bwdFasterDim, // whether bwd on the faster moving dimension (u) scalar_t *inGrad, int yBlockOffset=0) { const int batch = blockIdx.z; // For the second input tensor, this offset need to be subtracted because the first yBlockOffset // sets of thread blocks are for the first input tensor. const int x = blockIdx.y-yBlockOffset; const int hOffset = blockIdx.x*C10_WARP_SIZE; const int wid = threadIdx.y; const int lid = threadIdx.x; const int numWarp = blockDim.y; extern __shared__ char smem8[]; auto smem = reinterpret_cast<acc_t*>(smem8); OffsetCal offsetCal(batch, batchOffset, fLen, gLen, maxFLen, maxGLen, hiddenSize, packOutput, bwdFasterDim); const auto maxXLen = offsetCal.getMaxXLen(); const auto myXLen = offsetCal.getMyXLen(); const auto myYLen = offsetCal.getMyYLen(); scalar_t *myInGrad = inGrad + batch*maxXLen*hiddenSize + x*hiddenSize + hOffset; if (x < myXLen){ const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideX = offsetCal.getStrideX(); const auto strideY = offsetCal.getStrideY(); scalar_t const *myGrad = grad + myBatchOffset + x*strideX + hOffset; // Each warp reduces numYPerWarp "y" first acc_t warpSum = 0; auto numYPerWarp = (myYLen+numWarp-1)/numWarp; for (int warpY = 0; warpY < numYPerWarp; ++warpY){ auto y = wid*numYPerWarp + warpY; if (y < myYLen and (hOffset+lid) < hiddenSize) warpSum += myGrad[y*strideY + lid]; } // transpose partial sum in SMEM and reduce further using warpReduce smem[lid*numWarp + wid] = warpSum; __syncthreads(); auto sum = smem[wid*C10_WARP_SIZE + lid]; sum = warpReduce(sum, numWarp); // a a b b c c d d // a a b b c c d d // a a b b c c d d // a a b b c c d d // example of 4 warps (a, b, c, d) with 8 threads per warp // Each warp need 8 / 4 = 2 threads to write the results. if (hOffset+wid*C10_WARP_SIZE/numWarp+lid/numWarp < hiddenSize){ if (lid % numWarp == 0){ myInGrad[wid*C10_WARP_SIZE/numWarp + lid/numWarp] = sum; } } } else if (wid == 0 and hOffset + lid < hiddenSize){ // Need to ensure the grad is zero for don't care region myInGrad[lid] = 0; } } // Actual bwd (reduction) kernel get launched. // Call transducer_joint_single_backward twice on two input tensors. // The two bwd ops are launched together, the first op uses blockIdx.y < maxFLen, and the second op // uses the rest. template <typename scalar_t, typename acc_t, class OffsetCal> __global__ void transducer_joint_combined_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, scalar_t *fGrad, scalar_t *gGrad) { if (blockIdx.y < maxFLen){ transducer_joint_single_backward<scalar_t, acc_t, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, false, fGrad); } else{ transducer_joint_single_backward<scalar_t, acc_t, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, true, gGrad, maxFLen); } } // Vectorized version of transducer_joint_single_backward // Doing exact same operation as transducer_joint_single_backward except the load and store are // vectorized. // When packing is enabled in the fwd op, unpacking is needed to restore the gradients in a // non-packed form. template <typename scalar_t, typename acc_t, typename vec_t, int V, class OffsetCal> __device__ void transducer_joint_single_vec_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, bool bwdFasterDim, scalar_t *inGrad, int yBlockOffset=0){ const int batch = blockIdx.z; const int x = blockIdx.y - yBlockOffset; const int hOffset = blockIdx.x*C10_WARP_SIZE*V; const int wid = threadIdx.y; const int lid = threadIdx.x; const int numWarp = blockDim.y; OffsetCal offsetCal(batch, batchOffset, fLen, gLen, maxFLen, maxGLen, hiddenSize, packOutput, bwdFasterDim); const auto maxXLen = offsetCal.getMaxXLen(); const auto myXLen = offsetCal.getMyXLen(); const auto myYLen = offsetCal.getMyYLen(); scalar_t *myInGrad = inGrad + batch*maxXLen*hiddenSize + x*hiddenSize + hOffset; extern __shared__ char smem8[]; auto smem = reinterpret_cast<acc_t*>(smem8); acc_t warpSum[V]; scalar_t inBuffer[V]; scalar_t outBuffer[V]; auto myInGradVec = reinterpret_cast<vec_t*>(myInGrad); auto outBufferVec = reinterpret_cast<vec_t*>(outBuffer); if (x < myXLen){ const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideX = offsetCal.getStrideX(); const auto strideY = offsetCal.getStrideY(); const scalar_t *myGrad = grad + myBatchOffset + x*strideX + hOffset; for (int i = 0; i < V; ++i) warpSum[i] = 0; // Each warp reduces numYPerWarp "y" first auto numYPerWarp = (myYLen+numWarp-1)/numWarp; for (int warpY = 0; warpY < numYPerWarp; ++warpY){ auto y = wid*numYPerWarp + warpY; auto myGradVec = reinterpret_cast<vec_t const *>(myGrad + y*strideY); auto inBufferVec = reinterpret_cast<vec_t*>(inBuffer); if (hOffset + lid*V < hiddenSize and y < myYLen){ *inBufferVec = myGradVec[lid]; // vectorized load #pragma unroll for (int i = 0; i < V; ++i){ warpSum[i] += inBuffer[i]; } } } // transpose partial sum in SMEM and reduce further using warpReduce for (int i = 0; i < V; ++i){ smem[lid*numWarp + wid] = warpSum[i]; __syncthreads(); auto sum = smem[wid*C10_WARP_SIZE + lid]; if (hOffset+(wid*C10_WARP_SIZE/numWarp)*V < hiddenSize){ sum = warpReduce(sum, numWarp); if (lid % numWarp == 0){ outBuffer[i] = sum; } } __syncthreads(); } // a a b b c c d d // a a b b c c d d // a a b b c c d d // a a b b c c d d // example of 4 warps (a, b, c, d) with 8 threads per warp // Each warp need 8 / 4 = 2 threads to write the results. if (lid % numWarp == 0 and hOffset+(wid*C10_WARP_SIZE/numWarp + lid/numWarp)*V < hiddenSize) myInGradVec[wid*C10_WARP_SIZE/numWarp + lid/numWarp] = *outBufferVec; } else if (wid == 0 and hOffset + lid*V < hiddenSize){ // Need to ensure the grad is zero for don't care region myInGradVec[lid] = 0; } } // Vecotrized version of transducer_joint_combined_backward // Call transducer_joint_single_vec_backward twice on two input tensors. // The two bwd ops are launched together, the first op uses blockIdx.y < maxFLen, and the second op // uses the rest. template <typename scalar_t, typename acc_t, typename vec_t, int V, class OffsetCal> __global__ void transducer_joint_combined_vec_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, scalar_t *fGrad, scalar_t *gGrad) { if (blockIdx.y < maxFLen){ transducer_joint_single_vec_backward<scalar_t, acc_t, vec_t, V, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, false, fGrad); } else{ transducer_joint_single_vec_backward<scalar_t, acc_t, vec_t, V, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, true, gGrad, maxFLen); } } torch::Tensor transducer_joint_cuda_forward( torch::Tensor f, torch::Tensor g, torch::Tensor fLen, torch::Tensor gLen, torch::Tensor batchOffset, int64_t packedBatch, int opt, bool packOutput, int tileSize){ auto tensorOpt = f.options(); auto dtype = f.scalar_type(); const auto batchSize = f.size(0); const auto maxFLen = f.size(1); const auto maxGLen = g.size(1); const auto hiddenSize = f.size(2); int64_t *batchOffsetPtr = nullptr; torch::Tensor sum; if (!packOutput){ sum = torch::empty({batchSize, maxFLen, maxGLen, hiddenSize}, tensorOpt); batchOffsetPtr = nullptr; } else{ sum = torch::empty({packedBatch, hiddenSize}, tensorOpt); batchOffsetPtr = batchOffset.data_ptr<int64_t>(); } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); TORCH_CHECK(opt == 0 or opt == 1, "Got an invalid optimization level ", opt); // Simple heuristics const int numThread = ::min(128, (static_cast<int>(hiddenSize)+C10_WARP_SIZE-1) / C10_WARP_SIZE * C10_WARP_SIZE); AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_joint_forward", ([&] { if (opt == 0){ // vanilla kernel const int threads = numThread; const dim3 blocks(maxGLen, maxFLen, batchSize); hipLaunchKernelGGL(( transducer_joint_forward<scalar_t, OffsetCalFwd>) , dim3(blocks), dim3(threads), 0, stream, f.data_ptr<scalar_t>(), g.data_ptr<scalar_t>(), fLen.data_ptr<int>(), gLen.data_ptr<int>(), batchOffsetPtr, maxFLen, maxGLen, hiddenSize, packOutput, sum.data_ptr<scalar_t>()); } if (opt == 1){ // tiled version. For simplicity, assume tileF == tileG, even though the kernel can // support more general cases. const int threads = numThread; const int hiddenPerBlock = numThread; const int hiddenBlock = (hiddenSize + hiddenPerBlock - 1) / hiddenPerBlock; const dim3 blocks( (maxGLen+tileSize-1)/tileSize * hiddenBlock, (maxFLen+tileSize-1)/tileSize, batchSize); TORCH_CHECK(tileSize == 1 or tileSize == 2 or tileSize == 4, "Expected tileSize to be in [1, 2, 4], but got ", tileSize); switch (tileSize) { #define LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(tile) case tile:\ hipLaunchKernelGGL(( transducer_joint_tiled_forward<scalar_t, tile, tile, OffsetCalFwd>)\ , dim3(blocks), dim3(threads), 0, stream, \ f.data_ptr<scalar_t>(),\ g.data_ptr<scalar_t>(),\ fLen.data_ptr<int>(),\ gLen.data_ptr<int>(),\ batchOffsetPtr,\ maxFLen,\ maxGLen,\ hiddenSize,\ hiddenPerBlock,\ packOutput,\ sum.data_ptr<scalar_t>());\ break; LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(1); LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(2); LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(4); } } })); THCudaCheck(hipGetLastError()); return sum; } std::vector<torch::Tensor> transducer_joint_cuda_backward( torch::Tensor grad, torch::Tensor fLen, torch::Tensor gLen, torch::Tensor batchOffset, int maxFLen, int maxGLen, bool packOutput){ auto tensorOpt = grad.options(); auto dtype = grad.scalar_type(); const int batchSize = fLen.size(0); const int hiddenSize = grad.size(-1); const auto deviceProperties = at::cuda::getCurrentDeviceProperties(); const int maxNumWarp = deviceProperties->maxThreadsPerBlock / C10_WARP_SIZE; torch::Tensor fGrad = torch::empty({batchSize, maxFLen, hiddenSize}, tensorOpt); torch::Tensor gGrad = torch::empty({batchSize, maxGLen, hiddenSize}, tensorOpt); int64_t *batchOffsetPtr = (!packOutput) ? nullptr : batchOffset.data_ptr<int64_t>(); // The number "y" I would like each thread to work on const int workPerThread = 32; // Since the bwd for f and g have the same thread block size, we need to use the max of the two. int numWarp = largestPowerOfTwo((::max(maxFLen, maxGLen) + workPerThread-1) / workPerThread); // Would like to have at least 2 warps numWarp = ::max(2, numWarp); // cap on the maximum number of warps allowed numWarp = ::min(maxNumWarp, numWarp); // Need smem for transposing the partial sum. The partial sum is in a matrix of the shape // numWarp x warpSize const int smemSize = numWarp * C10_WARP_SIZE; const dim3 threads(C10_WARP_SIZE, numWarp, 1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_joint_cuda_backward_kernel", ([&] { auto gradPtr = grad.data_ptr<scalar_t>(); auto fLenPtr = fLen.data_ptr<int>(); auto gLenPtr = gLen.data_ptr<int>(); auto fGradPtr = fGrad.data_ptr<scalar_t>(); auto gGradPtr = gGrad.data_ptr<scalar_t>(); // resolve the acc_t type using acc_t = at::acc_type<scalar_t, true>; using vec_t = uint64_t; constexpr int vectFactor = sizeof(vec_t) / sizeof(scalar_t); constexpr int vecAlignment = std::alignment_of<vec_t>::value; // if all input and output tensors meet the alignment requirement bool memAlign = (reinterpret_cast<uint64_t>(gradPtr) % vecAlignment == 0) and (reinterpret_cast<uint64_t>(fGradPtr) % vecAlignment == 0) and (reinterpret_cast<uint64_t>(gGradPtr) % vecAlignment == 0); if (vectFactor > 1 and hiddenSize%vectFactor == 0 and memAlign){ // If vectorization helps and the alignment requirement is met, use the vectorized // kernel. For simplicity, hiddenSize needs to be a multiple vecFactor. const dim3 blocks( (hiddenSize+C10_WARP_SIZE*vectFactor-1)/(C10_WARP_SIZE*vectFactor), maxFLen+maxGLen, batchSize); hipLaunchKernelGGL(( transducer_joint_combined_vec_backward <scalar_t, acc_t, vec_t, vectFactor, OffsetCalBwd>) , dim3(blocks), dim3(threads), smemSize*sizeof(acc_t), 0, gradPtr, fLenPtr, gLenPtr, batchOffsetPtr, maxFLen, maxGLen, hiddenSize, packOutput, fGradPtr, gGradPtr); } else{ const dim3 blocks((hiddenSize+C10_WARP_SIZE-1)/C10_WARP_SIZE, maxFLen + maxGLen, batchSize); hipLaunchKernelGGL(( transducer_joint_combined_backward<scalar_t, acc_t, OffsetCalBwd>) , dim3(blocks), dim3(threads), smemSize*sizeof(acc_t), 0, gradPtr, fLenPtr, gLenPtr, batchOffsetPtr, maxFLen, maxGLen, hiddenSize, packOutput, fGradPtr, gGradPtr); } })); return {fGrad, gGrad}; }
db5c99065a250506aa6a7c1a568898c324cdb69b.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <c10/macros/Macros.h> #include <THC/THC.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> // Warp reduce kernels to reduce N groups of data into N numbers, where N = warpSize / width. // width should be a power of 2 and should be less than warpSize. template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduce(scalar_t x, int width=C10_WARP_SIZE){ for (unsigned offset = width/2; offset > 0; offset /= 2){ x += __shfl_down_sync(0xffffffff, x, offset, width); } return x; } inline int largestPowerOfTwo(int x){ int y = 1; while (y <= x) y <<= 1; return y >> 1; } // Helper class to calculate pointer offset that can be shared by different flavors of kernels. // For fwd, batch offset and stride are different for packing and non-packing mode. struct OffsetCalFwd{ __device__ __forceinline__ OffsetCalFwd( int64_t batch, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t gLen, int64_t hiddenSize, bool packOutput) : batch(batch), batchOffset(batchOffset), maxFLen(maxFLen), maxGLen(maxGLen), gLen(gLen), hiddenSize(hiddenSize), packOutput(packOutput) {} int64_t batch; const int64_t *batchOffset; int64_t maxFLen; int64_t maxGLen; int64_t gLen; int64_t hiddenSize; bool packOutput; __device__ __forceinline__ int64_t getBatchOffset(){ return packOutput ? ((batch==0) ? 0 : batchOffset[batch-1])*hiddenSize : batch*maxFLen*maxGLen*hiddenSize; } __device__ __forceinline__ int64_t getStrideF(){ return packOutput ? gLen*hiddenSize : maxGLen*hiddenSize; } }; // Helper class to calculate pointer offset that can be shared by different flavors of kernels // For bwd, batch offset and stride are different for packing and non-packing mode. // The reducion is done for two input tensors. Therefore, generating two sets of offsets // according to bwdFasterDim can lead to a unified implementation in the actual kernel. struct OffsetCalBwd{ __device__ __forceinline__ OffsetCalBwd( int64_t batch, const int64_t *batchOffset, const int *fLen, const int *gLen, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, bool bwdFasterDim) : batch(batch), batchOffset(batchOffset), maxFLen(maxFLen), maxGLen(maxGLen), fLen(fLen), gLen(gLen), hiddenSize(hiddenSize), packOutput(packOutput), bwdFasterDim(bwdFasterDim) {} int64_t batch; const int64_t *batchOffset; const int *fLen; const int *gLen; int64_t maxFLen; int64_t maxGLen; int64_t hiddenSize; bool packOutput; bool bwdFasterDim; // whether doing bwd on the faster moving dimension __device__ __forceinline__ int64_t getBatchOffset(){ return packOutput ? ((batch==0) ? 0 : batchOffset[batch-1])*hiddenSize : batch*maxFLen*maxGLen*hiddenSize; } __device__ __forceinline__ int64_t getMaxXLen(){ return bwdFasterDim ? maxGLen : maxFLen; } __device__ __forceinline__ auto getMyXLen() -> decltype(gLen[batch]){ return bwdFasterDim ? gLen[batch] : fLen[batch]; } __device__ __forceinline__ auto getMyYLen() -> decltype(gLen[batch]){ return bwdFasterDim ? fLen[batch] : gLen[batch]; } __device__ __forceinline__ int64_t getStrideX(){ return bwdFasterDim ? hiddenSize : ((packOutput ? gLen[batch] : maxGLen) * hiddenSize); } __device__ __forceinline__ int64_t getStrideY(){ return bwdFasterDim ? ((packOutput ? gLen[batch] : maxGLen) * hiddenSize) : hiddenSize; } }; // Vanila transducer joint forward kernel // Detail of this joint function can be found in: // [1] Sequence Transduction with Recurrent Neural Networks. // f is a tensor of shape [batch, T, H] // g is a tensor of shape [batch, U, H] // the transducer joint does // sum = f.unsqueeze(dim=2) + g.unsqueeze(dim=1) // The resultant tensor is of shape [batch, T, U, H] // Each thread block is working on one "batch" of data in the output tensor, [batch, t, u, :] // This joint function can optionally pack the output where the output tensor with a shape of // [B, T, U, H] is packed into [B_packed, H]. // Don't-care region (t > fLen) or (u > gLen) is removed. // To enable packing, the starting offset for each batch need to be specified with batchOffset. template <typename scalar_t, class OffsetCal> __global__ void transducer_joint_forward( const scalar_t *f, const scalar_t *g, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, scalar_t *sum) { const int batch = blockIdx.z; const int t = blockIdx.y; const int u = blockIdx.x; const auto myFLen = fLen[batch]; const auto myGLen = gLen[batch]; OffsetCal offsetCal(batch, batchOffset, maxFLen, maxGLen, myGLen, hiddenSize, packOutput); const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideF = offsetCal.getStrideF(); scalar_t const *myF = f + batch*maxFLen*hiddenSize + t*hiddenSize; scalar_t const *myG = g + batch*maxGLen*hiddenSize + u*hiddenSize; scalar_t *mySum = sum + myBatchOffset + t*strideF + u * hiddenSize; if (t < myFLen and u < myGLen){ #pragma unroll for (int h = threadIdx.x; h < hiddenSize; h += blockDim.x){ if (h < hiddenSize){ mySum[h] = myF[h] + myG[h]; } } } else if (packOutput == false and t < maxFLen and u < maxGLen){ // Need to write finite data to don't-care region because we instantiate the result tensor // with torch::empty for performance reasons. Even though it is don't-care region, the // contents need to be finite, otherwise could lead to NaN in WGRAD. // In packing mode, this write is no longer necessary as we remove the don't-care region // from the output. // Picking -1 (over 0) here for ease of testing. #pragma unroll for (int h = threadIdx.x; h < hiddenSize; h += blockDim.x){ if (h < hiddenSize){ mySum[h] = -1; } } } } // Tiled version of the joint forward kernel // Detail of this joint function can be found in: // [1] Sequence Transduction with Recurrent Neural Networks. // f is a tensor of shape [batch, T, H] // g is a tensor of shape [batch, U, H] // the transducer joint does // sum = f.unsqueeze(dim=2) + g.unsqueeze(dim=1) // The resultant tensor is of shape [batch, T, U, H] // Each thread is working on a tile of the shape of tileF x tileG in the result tensor. // The input for the tile is first loaded in the register and is reused tileG and tileF times. // This joint function can optionally pack the output where the output tensor with a shape of // [B, T, U, H] is packed into [B_packed, H]. // Don't-care region (t > fLen) or (u > gLen) is removed. // To enable packing, the starting offset for each batch need to be specified with batchOffset. template <typename scalar_t, int tileF, int tileG, class OffsetCal> __global__ void transducer_joint_tiled_forward( const scalar_t *f, const scalar_t *g, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, int64_t hiddenPerBlock, bool packOutput, scalar_t *sum) { const int batch = blockIdx.z; const int t = blockIdx.y * tileF; const int hiddenBlock = (hiddenSize + hiddenPerBlock - 1) / hiddenPerBlock; const int u = blockIdx.x / hiddenBlock * tileG; const int hOffset = (blockIdx.x % hiddenBlock) * hiddenPerBlock; const int h = threadIdx.x; const auto myFLen = fLen[batch]; const auto myGLen = gLen[batch]; OffsetCal offsetCal(batch, batchOffset, maxFLen, maxGLen, myGLen, hiddenSize, packOutput); const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideF = offsetCal.getStrideF(); scalar_t const *myF = f + batch*maxFLen*hiddenSize + t*hiddenSize + hOffset; scalar_t const *myG = g + batch*maxGLen*hiddenSize + u*hiddenSize + hOffset; scalar_t *mySum = sum + myBatchOffset + t*strideF + u*hiddenSize + hOffset; if (t < myFLen and u < myGLen and hOffset+h < hiddenSize){ // register buffers for tiled input reuse scalar_t fBuffer[tileF], gBuffer[tileG]; for (int i = 0; i < tileF; ++i){ if (t + i < myFLen) fBuffer[i] = myF[i*hiddenSize + h]; } for (int j = 0; j < tileG; ++j){ if (u + j < myGLen) gBuffer[j] = myG[j*hiddenSize + h]; } #pragma unroll for (int i = 0; i < tileF; ++i){ if (t + i < myFLen){ #pragma unroll for (int j = 0; j < tileG; ++j){ if (u + j < myGLen) mySum[i*strideF + j*hiddenSize + h] = fBuffer[i] + gBuffer[j]; else if (packOutput == false and u + j < maxGLen) mySum[i*strideF + j*hiddenSize + h] = -1; } } else if (packOutput == false and t + i < maxFLen){ // Again need to write finite data to don't-care region #pragma unroll for (int j = 0; j < tileG; ++j){ if (u + j < maxGLen) mySum[i*strideF + j*hiddenSize + h] = -1; } } } } else if (packOutput == false and t < maxFLen and u < maxGLen and hOffset+h < hiddenSize){ // Only need to ensure the finity in normal mode #pragma unroll for (int i = 0; i < tileF; ++i){ if (t + i < maxFLen){ #pragma unroll for (int j = 0; j < tileG; ++j){ if (u + j < maxGLen) mySum[i*strideF + j*hiddenSize + h] = -1; } } } } } // Bwd operation (reduction) on one input tensor. Since the operation performed for the two input // tensors are exactly the same, only one kernel is needed, and the different indexing offsets // and strides are handled by OffsetCalBwd. // When packing is enabled in the fwd op, unpacking is needed to restore the gradients in a // non-packed form. template <typename scalar_t, typename acc_t, class OffsetCal> __device__ void transducer_joint_single_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, bool bwdFasterDim, // whether bwd on the faster moving dimension (u) scalar_t *inGrad, int yBlockOffset=0) { const int batch = blockIdx.z; // For the second input tensor, this offset need to be subtracted because the first yBlockOffset // sets of thread blocks are for the first input tensor. const int x = blockIdx.y-yBlockOffset; const int hOffset = blockIdx.x*C10_WARP_SIZE; const int wid = threadIdx.y; const int lid = threadIdx.x; const int numWarp = blockDim.y; extern __shared__ char smem8[]; auto smem = reinterpret_cast<acc_t*>(smem8); OffsetCal offsetCal(batch, batchOffset, fLen, gLen, maxFLen, maxGLen, hiddenSize, packOutput, bwdFasterDim); const auto maxXLen = offsetCal.getMaxXLen(); const auto myXLen = offsetCal.getMyXLen(); const auto myYLen = offsetCal.getMyYLen(); scalar_t *myInGrad = inGrad + batch*maxXLen*hiddenSize + x*hiddenSize + hOffset; if (x < myXLen){ const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideX = offsetCal.getStrideX(); const auto strideY = offsetCal.getStrideY(); scalar_t const *myGrad = grad + myBatchOffset + x*strideX + hOffset; // Each warp reduces numYPerWarp "y" first acc_t warpSum = 0; auto numYPerWarp = (myYLen+numWarp-1)/numWarp; for (int warpY = 0; warpY < numYPerWarp; ++warpY){ auto y = wid*numYPerWarp + warpY; if (y < myYLen and (hOffset+lid) < hiddenSize) warpSum += myGrad[y*strideY + lid]; } // transpose partial sum in SMEM and reduce further using warpReduce smem[lid*numWarp + wid] = warpSum; __syncthreads(); auto sum = smem[wid*C10_WARP_SIZE + lid]; sum = warpReduce(sum, numWarp); // a a b b c c d d // a a b b c c d d // a a b b c c d d // a a b b c c d d // example of 4 warps (a, b, c, d) with 8 threads per warp // Each warp need 8 / 4 = 2 threads to write the results. if (hOffset+wid*C10_WARP_SIZE/numWarp+lid/numWarp < hiddenSize){ if (lid % numWarp == 0){ myInGrad[wid*C10_WARP_SIZE/numWarp + lid/numWarp] = sum; } } } else if (wid == 0 and hOffset + lid < hiddenSize){ // Need to ensure the grad is zero for don't care region myInGrad[lid] = 0; } } // Actual bwd (reduction) kernel get launched. // Call transducer_joint_single_backward twice on two input tensors. // The two bwd ops are launched together, the first op uses blockIdx.y < maxFLen, and the second op // uses the rest. template <typename scalar_t, typename acc_t, class OffsetCal> __global__ void transducer_joint_combined_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, scalar_t *fGrad, scalar_t *gGrad) { if (blockIdx.y < maxFLen){ transducer_joint_single_backward<scalar_t, acc_t, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, false, fGrad); } else{ transducer_joint_single_backward<scalar_t, acc_t, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, true, gGrad, maxFLen); } } // Vectorized version of transducer_joint_single_backward // Doing exact same operation as transducer_joint_single_backward except the load and store are // vectorized. // When packing is enabled in the fwd op, unpacking is needed to restore the gradients in a // non-packed form. template <typename scalar_t, typename acc_t, typename vec_t, int V, class OffsetCal> __device__ void transducer_joint_single_vec_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, bool bwdFasterDim, scalar_t *inGrad, int yBlockOffset=0){ const int batch = blockIdx.z; const int x = blockIdx.y - yBlockOffset; const int hOffset = blockIdx.x*C10_WARP_SIZE*V; const int wid = threadIdx.y; const int lid = threadIdx.x; const int numWarp = blockDim.y; OffsetCal offsetCal(batch, batchOffset, fLen, gLen, maxFLen, maxGLen, hiddenSize, packOutput, bwdFasterDim); const auto maxXLen = offsetCal.getMaxXLen(); const auto myXLen = offsetCal.getMyXLen(); const auto myYLen = offsetCal.getMyYLen(); scalar_t *myInGrad = inGrad + batch*maxXLen*hiddenSize + x*hiddenSize + hOffset; extern __shared__ char smem8[]; auto smem = reinterpret_cast<acc_t*>(smem8); acc_t warpSum[V]; scalar_t inBuffer[V]; scalar_t outBuffer[V]; auto myInGradVec = reinterpret_cast<vec_t*>(myInGrad); auto outBufferVec = reinterpret_cast<vec_t*>(outBuffer); if (x < myXLen){ const auto myBatchOffset = offsetCal.getBatchOffset(); const auto strideX = offsetCal.getStrideX(); const auto strideY = offsetCal.getStrideY(); const scalar_t *myGrad = grad + myBatchOffset + x*strideX + hOffset; for (int i = 0; i < V; ++i) warpSum[i] = 0; // Each warp reduces numYPerWarp "y" first auto numYPerWarp = (myYLen+numWarp-1)/numWarp; for (int warpY = 0; warpY < numYPerWarp; ++warpY){ auto y = wid*numYPerWarp + warpY; auto myGradVec = reinterpret_cast<vec_t const *>(myGrad + y*strideY); auto inBufferVec = reinterpret_cast<vec_t*>(inBuffer); if (hOffset + lid*V < hiddenSize and y < myYLen){ *inBufferVec = myGradVec[lid]; // vectorized load #pragma unroll for (int i = 0; i < V; ++i){ warpSum[i] += inBuffer[i]; } } } // transpose partial sum in SMEM and reduce further using warpReduce for (int i = 0; i < V; ++i){ smem[lid*numWarp + wid] = warpSum[i]; __syncthreads(); auto sum = smem[wid*C10_WARP_SIZE + lid]; if (hOffset+(wid*C10_WARP_SIZE/numWarp)*V < hiddenSize){ sum = warpReduce(sum, numWarp); if (lid % numWarp == 0){ outBuffer[i] = sum; } } __syncthreads(); } // a a b b c c d d // a a b b c c d d // a a b b c c d d // a a b b c c d d // example of 4 warps (a, b, c, d) with 8 threads per warp // Each warp need 8 / 4 = 2 threads to write the results. if (lid % numWarp == 0 and hOffset+(wid*C10_WARP_SIZE/numWarp + lid/numWarp)*V < hiddenSize) myInGradVec[wid*C10_WARP_SIZE/numWarp + lid/numWarp] = *outBufferVec; } else if (wid == 0 and hOffset + lid*V < hiddenSize){ // Need to ensure the grad is zero for don't care region myInGradVec[lid] = 0; } } // Vecotrized version of transducer_joint_combined_backward // Call transducer_joint_single_vec_backward twice on two input tensors. // The two bwd ops are launched together, the first op uses blockIdx.y < maxFLen, and the second op // uses the rest. template <typename scalar_t, typename acc_t, typename vec_t, int V, class OffsetCal> __global__ void transducer_joint_combined_vec_backward( const scalar_t *grad, const int *fLen, const int *gLen, const int64_t *batchOffset, int64_t maxFLen, int64_t maxGLen, int64_t hiddenSize, bool packOutput, scalar_t *fGrad, scalar_t *gGrad) { if (blockIdx.y < maxFLen){ transducer_joint_single_vec_backward<scalar_t, acc_t, vec_t, V, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, false, fGrad); } else{ transducer_joint_single_vec_backward<scalar_t, acc_t, vec_t, V, OffsetCal>( grad, fLen, gLen, batchOffset, maxFLen, maxGLen, hiddenSize, packOutput, true, gGrad, maxFLen); } } torch::Tensor transducer_joint_cuda_forward( torch::Tensor f, torch::Tensor g, torch::Tensor fLen, torch::Tensor gLen, torch::Tensor batchOffset, int64_t packedBatch, int opt, bool packOutput, int tileSize){ auto tensorOpt = f.options(); auto dtype = f.scalar_type(); const auto batchSize = f.size(0); const auto maxFLen = f.size(1); const auto maxGLen = g.size(1); const auto hiddenSize = f.size(2); int64_t *batchOffsetPtr = nullptr; torch::Tensor sum; if (!packOutput){ sum = torch::empty({batchSize, maxFLen, maxGLen, hiddenSize}, tensorOpt); batchOffsetPtr = nullptr; } else{ sum = torch::empty({packedBatch, hiddenSize}, tensorOpt); batchOffsetPtr = batchOffset.data_ptr<int64_t>(); } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); TORCH_CHECK(opt == 0 or opt == 1, "Got an invalid optimization level ", opt); // Simple heuristics const int numThread = std::min(128, (static_cast<int>(hiddenSize)+C10_WARP_SIZE-1) / C10_WARP_SIZE * C10_WARP_SIZE); AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_joint_forward", ([&] { if (opt == 0){ // vanilla kernel const int threads = numThread; const dim3 blocks(maxGLen, maxFLen, batchSize); transducer_joint_forward<scalar_t, OffsetCalFwd> <<<blocks, threads, 0, stream>>>( f.data_ptr<scalar_t>(), g.data_ptr<scalar_t>(), fLen.data_ptr<int>(), gLen.data_ptr<int>(), batchOffsetPtr, maxFLen, maxGLen, hiddenSize, packOutput, sum.data_ptr<scalar_t>()); } if (opt == 1){ // tiled version. For simplicity, assume tileF == tileG, even though the kernel can // support more general cases. const int threads = numThread; const int hiddenPerBlock = numThread; const int hiddenBlock = (hiddenSize + hiddenPerBlock - 1) / hiddenPerBlock; const dim3 blocks( (maxGLen+tileSize-1)/tileSize * hiddenBlock, (maxFLen+tileSize-1)/tileSize, batchSize); TORCH_CHECK(tileSize == 1 or tileSize == 2 or tileSize == 4, "Expected tileSize to be in [1, 2, 4], but got ", tileSize); switch (tileSize) { #define LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(tile) case tile:\ transducer_joint_tiled_forward<scalar_t, tile, tile, OffsetCalFwd>\ <<<blocks, threads, 0, stream>>>(\ f.data_ptr<scalar_t>(),\ g.data_ptr<scalar_t>(),\ fLen.data_ptr<int>(),\ gLen.data_ptr<int>(),\ batchOffsetPtr,\ maxFLen,\ maxGLen,\ hiddenSize,\ hiddenPerBlock,\ packOutput,\ sum.data_ptr<scalar_t>());\ break; LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(1); LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(2); LAUNCH_TRANSDUCER_JOINT_TILED_FORWARD(4); } } })); THCudaCheck(cudaGetLastError()); return sum; } std::vector<torch::Tensor> transducer_joint_cuda_backward( torch::Tensor grad, torch::Tensor fLen, torch::Tensor gLen, torch::Tensor batchOffset, int maxFLen, int maxGLen, bool packOutput){ auto tensorOpt = grad.options(); auto dtype = grad.scalar_type(); const int batchSize = fLen.size(0); const int hiddenSize = grad.size(-1); const auto deviceProperties = at::cuda::getCurrentDeviceProperties(); const int maxNumWarp = deviceProperties->maxThreadsPerBlock / C10_WARP_SIZE; torch::Tensor fGrad = torch::empty({batchSize, maxFLen, hiddenSize}, tensorOpt); torch::Tensor gGrad = torch::empty({batchSize, maxGLen, hiddenSize}, tensorOpt); int64_t *batchOffsetPtr = (!packOutput) ? nullptr : batchOffset.data_ptr<int64_t>(); // The number "y" I would like each thread to work on const int workPerThread = 32; // Since the bwd for f and g have the same thread block size, we need to use the max of the two. int numWarp = largestPowerOfTwo((std::max(maxFLen, maxGLen) + workPerThread-1) / workPerThread); // Would like to have at least 2 warps numWarp = std::max(2, numWarp); // cap on the maximum number of warps allowed numWarp = std::min(maxNumWarp, numWarp); // Need smem for transposing the partial sum. The partial sum is in a matrix of the shape // numWarp x warpSize const int smemSize = numWarp * C10_WARP_SIZE; const dim3 threads(C10_WARP_SIZE, numWarp, 1); AT_DISPATCH_FLOATING_TYPES_AND_HALF(dtype, "transducer_joint_cuda_backward_kernel", ([&] { auto gradPtr = grad.data_ptr<scalar_t>(); auto fLenPtr = fLen.data_ptr<int>(); auto gLenPtr = gLen.data_ptr<int>(); auto fGradPtr = fGrad.data_ptr<scalar_t>(); auto gGradPtr = gGrad.data_ptr<scalar_t>(); // resolve the acc_t type using acc_t = at::acc_type<scalar_t, true>; using vec_t = uint64_t; constexpr int vectFactor = sizeof(vec_t) / sizeof(scalar_t); constexpr int vecAlignment = std::alignment_of<vec_t>::value; // if all input and output tensors meet the alignment requirement bool memAlign = (reinterpret_cast<uint64_t>(gradPtr) % vecAlignment == 0) and (reinterpret_cast<uint64_t>(fGradPtr) % vecAlignment == 0) and (reinterpret_cast<uint64_t>(gGradPtr) % vecAlignment == 0); if (vectFactor > 1 and hiddenSize%vectFactor == 0 and memAlign){ // If vectorization helps and the alignment requirement is met, use the vectorized // kernel. For simplicity, hiddenSize needs to be a multiple vecFactor. const dim3 blocks( (hiddenSize+C10_WARP_SIZE*vectFactor-1)/(C10_WARP_SIZE*vectFactor), maxFLen+maxGLen, batchSize); transducer_joint_combined_vec_backward <scalar_t, acc_t, vec_t, vectFactor, OffsetCalBwd> <<<blocks, threads, smemSize*sizeof(acc_t)>>>( gradPtr, fLenPtr, gLenPtr, batchOffsetPtr, maxFLen, maxGLen, hiddenSize, packOutput, fGradPtr, gGradPtr); } else{ const dim3 blocks((hiddenSize+C10_WARP_SIZE-1)/C10_WARP_SIZE, maxFLen + maxGLen, batchSize); transducer_joint_combined_backward<scalar_t, acc_t, OffsetCalBwd> <<<blocks, threads, smemSize*sizeof(acc_t)>>>( gradPtr, fLenPtr, gLenPtr, batchOffsetPtr, maxFLen, maxGLen, hiddenSize, packOutput, fGradPtr, gGradPtr); } })); return {fGrad, gGrad}; }
56269514c5ad7f741c2a8b9bfb3bf897ebe444a4.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <hip/hip_runtime.h> #include <rocblas.h> #include <time.h> #define random(a, b) (rand() % (b - a) + a) void FillMatrix(float *matrix, int row, int col); void PrintMatrix(float *A, float *B, float *C, int m, int n, int k); __global__ void MatrixMulCUDA(const float *A, const float *B, float *C, int m, int n, int k, int ThreadBlockSize) { const int tid = threadIdx.x; const int row = tid; for (int i = row; i < m; i = i + ThreadBlockSize) { for (int j = 0; j < k; ++j) { int temp = 0; for (int z = 0; z < n; ++z) temp += A[i * n + z] * B[z * k + j]; C[i * k + j] = temp; } } } int main(int argc, char **argv) { if (argc != 4) { printf("Wrong Input!\n"); return 1; } int m = atoi(argv[1]); int n = atoi(argv[2]); int k = atoi(argv[3]); float *A, *B, *C; A = new float[m * n]; B = new float[n * k]; C = new float[m * k]; FillMatrix(A, m, n); FillMatrix(B, n, k); float elapsedTime; float *cuda_A, *cuda_B, *cuda_C; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipMalloc((void **)&cuda_A, sizeof(float) * m * n); hipMalloc((void **)&cuda_B, sizeof(float) * n * k); hipMalloc((void **)&cuda_C, sizeof(float) * m * k); hipMemcpy(cuda_A, A, sizeof(float) * m * n, hipMemcpyHostToDevice); hipMemcpy(cuda_B, B, sizeof(float) * n * k, hipMemcpyHostToDevice); float alpha = 1; float beta = 0; hipblasHandle_t handle; hipblasCreate(&handle); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, k, //B m, //A n, //A &alpha, cuda_B, k, cuda_A, n, &beta, cuda_C, k); hipMemcpy(C, cuda_C, sizeof(float) * m * k, hipMemcpyDeviceToHost); hipFree(cuda_A); hipFree(cuda_B); hipFree(cuda_C); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Calculation time is %.10f ms\n", elapsedTime); // PrintMatrix(A, B, C, m, n, k); delete[] A; delete[] C; delete[] B; return 0; } void FillMatrix(float *matrix, int row, int col) { for (int i = 0; i < row; ++i) for (int j = 0; j < col; ++j) matrix[i * col + j] = random(0, 9); } void PrintMatrix(float *A, float *B, float *C, int m, int n, int k) { printf("Matrix A:\n"); for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) printf("%f ", A[i * n + j]); printf("\n"); } printf("Matrix B:\n"); for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) printf("%f ", B[i * k + j]); printf("\n"); } printf("Matrix C:\n"); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) printf("%f ", C[i * k + j]); printf("\n"); } }
56269514c5ad7f741c2a8b9bfb3bf897ebe444a4.cu
#include <cstdio> #include <cstdlib> #include <cuda_runtime.h> #include <cublas_v2.h> #include <time.h> #define random(a, b) (rand() % (b - a) + a) void FillMatrix(float *matrix, int row, int col); void PrintMatrix(float *A, float *B, float *C, int m, int n, int k); __global__ void MatrixMulCUDA(const float *A, const float *B, float *C, int m, int n, int k, int ThreadBlockSize) { const int tid = threadIdx.x; const int row = tid; for (int i = row; i < m; i = i + ThreadBlockSize) { for (int j = 0; j < k; ++j) { int temp = 0; for (int z = 0; z < n; ++z) temp += A[i * n + z] * B[z * k + j]; C[i * k + j] = temp; } } } int main(int argc, char **argv) { if (argc != 4) { printf("Wrong Input!\n"); return 1; } int m = atoi(argv[1]); int n = atoi(argv[2]); int k = atoi(argv[3]); float *A, *B, *C; A = new float[m * n]; B = new float[n * k]; C = new float[m * k]; FillMatrix(A, m, n); FillMatrix(B, n, k); float elapsedTime; float *cuda_A, *cuda_B, *cuda_C; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMalloc((void **)&cuda_A, sizeof(float) * m * n); cudaMalloc((void **)&cuda_B, sizeof(float) * n * k); cudaMalloc((void **)&cuda_C, sizeof(float) * m * k); cudaMemcpy(cuda_A, A, sizeof(float) * m * n, cudaMemcpyHostToDevice); cudaMemcpy(cuda_B, B, sizeof(float) * n * k, cudaMemcpyHostToDevice); float alpha = 1; float beta = 0; cublasHandle_t handle; cublasCreate(&handle); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, k, //矩阵B的列数 m, //矩阵A的行数 n, //矩阵A的列数 &alpha, cuda_B, k, cuda_A, n, &beta, cuda_C, k); cudaMemcpy(C, cuda_C, sizeof(float) * m * k, cudaMemcpyDeviceToHost); cudaFree(cuda_A); cudaFree(cuda_B); cudaFree(cuda_C); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Calculation time is %.10f ms\n", elapsedTime); // PrintMatrix(A, B, C, m, n, k); delete[] A; delete[] C; delete[] B; return 0; } void FillMatrix(float *matrix, int row, int col) { for (int i = 0; i < row; ++i) for (int j = 0; j < col; ++j) matrix[i * col + j] = random(0, 9); } void PrintMatrix(float *A, float *B, float *C, int m, int n, int k) { printf("Matrix A:\n"); for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) printf("%f ", A[i * n + j]); printf("\n"); } printf("Matrix B:\n"); for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) printf("%f ", B[i * k + j]); printf("\n"); } printf("Matrix C:\n"); for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) printf("%f ", C[i * k + j]); printf("\n"); } }
b69428d78d88f6d1defd6cb4c1505bcef170562e.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/hip/Normalization.cuh> inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return self.is_contiguous(at::MemoryFormat::ChannelsLast) || self.ndimension() == 2; } namespace at { namespace native { std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } }); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto input_options = self.options(); // Accumulate in higher precision if input is half/bfloat16 if (self.scalar_type() == at::ScalarType::Half || self.scalar_type() == at::ScalarType::BFloat16) { input_options = input_options.dtype(ScalarType::Float); } Tensor save_mean, save_invstd; if (train) { save_mean = at::empty({n_input}, input_options); save_invstd = at::empty({n_input}, input_options); } else { save_mean = at::empty({0}, input_options); save_invstd = at::empty({0}, input_options); } at::native::batch_norm_cuda_out( self, weight, bias, running_mean, running_var, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); const Tensor& save_mean = c10::value_or_else(save_mean_opt, [] {return Tensor();}); const Tensor& save_invstd = c10::value_or_else(save_invstd_opt, [] {return Tensor();}); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } }); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { return batch_norm_stats_channels_last_cuda_template<scalar_t>(self, epsilon); } else { return batch_norm_stats_cuda_template<scalar_t, int32_t>(self, epsilon); } } else { return batch_norm_stats_cuda_template<scalar_t, int64_t>(self, epsilon); } }); } Tensor batch_norm_elemt_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); auto output = at::empty_like(self, self.suggest_memory_format()); at::native::batch_norm_elemt_cuda_out(self, weight, bias, mean, invstd, epsilon, output); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ batch_norm_elemt_channels_last_cuda_template(output, self, weight, bias, mean, invstd, epsilon); return output; } AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } } }); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); // self is grad_output if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_reduce_cuda_channels_last_template(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); // <sigh> Some workloads depend on passing in half input and float stats, which is // usually handled by cuDNN. However, the JIT sometimes replaces cuDNN calls with this // one so it needs to support the same case, or people start to complain. bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int32_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int32_t>(self, running_mean, running_var, momentum); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int64_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int64_t>(self, running_mean, running_var, momentum); } } }); } } } // namespace at::native
b69428d78d88f6d1defd6cb4c1505bcef170562e.cu
#include <ATen/native/cuda/Normalization.cuh> inline bool batch_norm_use_channels_last_kernels(const at::Tensor& self) { return self.is_contiguous(at::MemoryFormat::ChannelsLast) || self.ndimension() == 2; } namespace at { namespace native { std::tuple<Tensor&, Tensor&, Tensor&> batch_norm_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon, Tensor& output, Tensor& save_mean, Tensor& save_invstd) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int32_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_cuda_template<scalar_t, float, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } else { batch_norm_cuda_template<scalar_t, scalar_t, int64_t>(output, save_mean, save_invstd, self, weight, bias, running_mean, running_var, train, momentum, epsilon); } } }); return std::tuple<Tensor&, Tensor&, Tensor&>(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, bool train, double momentum, double epsilon) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto output = at::empty_like(self, at::MemoryFormat::Contiguous); int64_t n_input = self.size(1); auto input_options = self.options(); // Accumulate in higher precision if input is half/bfloat16 if (self.scalar_type() == at::ScalarType::Half || self.scalar_type() == at::ScalarType::BFloat16) { input_options = input_options.dtype(ScalarType::Float); } Tensor save_mean, save_invstd; if (train) { save_mean = at::empty({n_input}, input_options); save_invstd = at::empty({n_input}, input_options); } else { save_mean = at::empty({0}, input_options); save_invstd = at::empty({0}, input_options); } at::native::batch_norm_cuda_out( self, weight, bias, running_mean, running_var, train, momentum, epsilon, output, save_mean, save_invstd); return std::make_tuple(output, save_mean, save_invstd); } std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cuda(const Tensor& grad_out, const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, const c10::optional<Tensor>& save_mean_opt, const c10::optional<Tensor>& save_invstd_opt, bool train, double epsilon, std::array<bool,3> grad_input_mask) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); const Tensor& save_mean = c10::value_or_else(save_mean_opt, [] {return Tensor();}); const Tensor& save_invstd = c10::value_or_else(save_invstd_opt, [] {return Tensor();}); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_cuda", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int32_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_cuda_template<scalar_t, float, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } else { return batch_norm_backward_cuda_template<scalar_t, scalar_t, int64_t>(grad_out, self, weight, running_mean, running_var, save_mean, save_invstd, train, epsilon, grad_input_mask); } } }); } std::tuple<Tensor, Tensor> batch_norm_stats_cuda(const Tensor& self, double epsilon) { bool use_channels_last_kernel = batch_norm_use_channels_last_kernels(self); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_stats_cuda", [&] { if (cuda::detail::canUse32BitIndexMath(self)) { if (use_channels_last_kernel) { return batch_norm_stats_channels_last_cuda_template<scalar_t>(self, epsilon); } else { return batch_norm_stats_cuda_template<scalar_t, int32_t>(self, epsilon); } } else { return batch_norm_stats_cuda_template<scalar_t, int64_t>(self, epsilon); } }); } Tensor batch_norm_elemt_cuda(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); auto output = at::empty_like(self, self.suggest_memory_format()); at::native::batch_norm_elemt_cuda_out(self, weight, bias, mean, invstd, epsilon, output); return output; } Tensor& batch_norm_elemt_cuda_out(const Tensor& self, const c10::optional<Tensor>& weight_opt, const c10::optional<Tensor>& bias_opt, const Tensor& mean, const Tensor& invstd, double epsilon, Tensor& output) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); const Tensor& bias = c10::value_or_else(bias_opt, [] {return Tensor();}); if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ batch_norm_elemt_channels_last_cuda_template(output, self, weight, bias, mean, invstd, epsilon); return output; } AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int32_t>(output, self, weight, bias, mean, invstd, epsilon); } } else { if (is_half_float || is_bfloat16_float) { batch_norm_elemt_cuda_template<scalar_t, float, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } else { batch_norm_elemt_cuda_template<scalar_t, scalar_t, int64_t>(output, self, weight, bias, mean, invstd, epsilon); } } }); return output; } // accepting input(self) here to determine template data types, since running_mean/running_var are optional std::tuple<Tensor, Tensor> batch_norm_gather_stats_cuda(const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum, double epsilon, int64_t count) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); std::vector<int64_t> counts(mean.size(0), count); Tensor counts_ = at::from_blob((void*)counts.data(), {(int64_t)counts.size()}, self.options().dtype(at::kLong).device(at::kCPU)); counts_ = counts_.to(self.device()).to(running_mean.defined() ? running_mean.dtype() : self.dtype()); return batch_norm_gather_stats_with_counts_cuda(self, mean, invstd, running_mean, running_var, momentum, epsilon, counts_); } std::tuple<Tensor, Tensor> batch_norm_gather_stats_with_counts_cuda( const Tensor& self, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& running_mean_opt /* optional */, const c10::optional<Tensor>& running_var_opt /* optional */, double momentum, double epsilon, const Tensor& counts) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); auto scalar_type = running_mean.defined() ? running_mean.scalar_type() : self.scalar_type(); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "batch_norm_update_stats_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (cuda::detail::canUse32BitIndexMath(self)) { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int32_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } else { return batch_norm_gather_stats_cuda_template<scalar_t, accscalar_t, int64_t>(mean, invstd, running_mean, running_var, momentum, epsilon, counts); } }); } std::tuple<Tensor, Tensor, Tensor, Tensor> batch_norm_backward_reduce_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, bool input_g, bool weight_g, bool bias_g) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); // self is grad_output if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_reduce_cuda_channels_last_template(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_reduce", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_reduce_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } else { return batch_norm_backward_reduce_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, input_g, weight_g, bias_g); } } }); } Tensor batch_norm_backward_elemt_cuda(const Tensor& self, const Tensor& input, const Tensor& mean, const Tensor& invstd, const c10::optional<Tensor>& weight_opt, const Tensor& sum_dy, const Tensor& sum_dy_xmu, const Tensor& count) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& weight = c10::value_or_else(weight_opt, [] {return Tensor();}); if (at::cuda::detail::canUse32BitIndexMath(self) && batch_norm_use_channels_last_kernels(self)){ return batch_norm_backward_elemt_channels_last_cuda_template(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward_elemt", [&] { auto mean_st = mean.dtype(); auto invstd_st = invstd.dtype(); TORCH_CHECK(mean_st == invstd_st, "mean and invstd need to have the same data types"); bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int32_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_backward_elemt_cuda_template<scalar_t, float, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } else { return batch_norm_backward_elemt_cuda_template<scalar_t, scalar_t, int64_t>(self, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count); } } }); } std::tuple<Tensor, Tensor> batch_norm_update_stats_cuda( const Tensor& self, const c10::optional<Tensor>& running_mean_opt, const c10::optional<Tensor>& running_var_opt, double momentum) { // See [Note: hacky wrapper removal for optional tensor] const Tensor& running_mean = c10::value_or_else(running_mean_opt, [] {return Tensor();}); const Tensor& running_var = c10::value_or_else(running_var_opt, [] {return Tensor();}); return AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "batch_norm_backward", [&] { auto mean_st = running_mean.dtype(); auto var_st = running_var.dtype(); TORCH_CHECK(mean_st == var_st, "running_mean and running_var need to have the same data types"); // <sigh> Some workloads depend on passing in half input and float stats, which is // usually handled by cuDNN. However, the JIT sometimes replaces cuDNN calls with this // one so it needs to support the same case, or people start to complain. bool is_half_float = std::is_same<scalar_t, at::Half>::value && mean_st == at::kFloat; bool is_bfloat16_float = std::is_same<scalar_t, at::BFloat16>::value && mean_st == at::kFloat; if (cuda::detail::canUse32BitIndexMath(self)) { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int32_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int32_t>(self, running_mean, running_var, momentum); } } else { if (is_half_float || is_bfloat16_float) { return batch_norm_update_stats_cuda_template<scalar_t, float, int64_t>(self, running_mean, running_var, momentum); } else { return batch_norm_update_stats_cuda_template<scalar_t, scalar_t, int64_t>(self, running_mean, running_var, momentum); } } }); } } } // namespace at::native
gpu_pc_v2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************** * * This experiment optimizes packet classification * in the following aspects: * 1. Thread assignment * 2. Memory coalescing * * Experiment Assumptions: * 1. 510 Non-overlapping intervals * 2. 1024 Rules (510 * 1024 element BVs) * 3. Number of packets varies, 1 kernel * 4. All packets are already on CPU memory * 5. All fields needs prefix/range match * ********************************************************/ #include <iostream> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <rocblas.h> #define FIELD 6 #define RULE 31 #define ALLRULE 128 #define WSIZE 32 #define int_count ALLRULE/sizeof(long int) #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) using namespace std; void header_gen(int**, int**, int, int); void tree_gen(int**, int, int); void bv_gen(long int**, long int*, int); void data_test(int**, int**, long int**, int*, int, int); __global__ void packet_classify(int* gpu_tree, int* gpu_headers, long int* gpu_bv, int* gpu_match_result, long int* gpu_merge_result, int packet_num, int block_dim){ __shared__ int gpu_tree_shared[FIELD*RULE]; int level = 0; while(level * block_dim + threadIdx.x < FIELD * RULE){ gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x]; level++; } __syncthreads(); //int index = blockDim.x * blockIdx.x + threadIdx.x; //int tree_idx = index / packet_num * RULE; int i = 0; if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * FIELD){ while (i < RULE){ i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2; } gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE; } __syncthreads(); if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * int_count){ int index = blockDim.x * blockIdx.x + threadIdx.x; int packetIdx = index/int_count; gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*FIELD]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*FIELD+1]*int_count + index%int_count]; /* gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count]; */ } }; int main(int argc, char** argv){ if(argc!=4){ cout<<"usage ./openflow *Packet_num *Grid_dim *Block_dim"<<endl; return 0; } int packet_num = atoi(argv[1]); int grid_dim = atoi(argv[2]); int block_dim = atoi(argv[3]); // if (grid_dim*block_dim != packet_num*FIELD){ // cout<<"ERROR: Total number of threads must equal packet_num * FIELD"<<endl; // return 1; // } cout<<"grid_dim: "<<grid_dim<<", block_dim: "<<block_dim<<", packet_num: "<<packet_num<<endl; cout<<"============================ Experiment Starts ============================"<<endl; /******************************************************** * Preparing Data: * 1. Generate random headers * 2. Generate BVs * 3. Generate random packets * 4. Deliberately make some rule-matching packets ********************************************************/ srand(time(NULL)); int** tree = new int*[FIELD]; for(int i = 0; i < FIELD; i++){ tree[i] = new int[RULE]; } int** headers = new int*[FIELD]; for (int i = 0; i < FIELD; i++){ headers[i] = new int[packet_num]; } long int** bv = new long int*[FIELD*(RULE+1)]; for(int i = 0; i < FIELD*(RULE+1); i++){ bv[i] = new long int[ALLRULE / sizeof(long int)]; } long int* bv_final = new long int[packet_num * int_count]; int* match_result = new int[packet_num * FIELD]; long int* merge_results = new long int[int_count*packet_num]; tree_gen(tree, FIELD, RULE); header_gen(headers, tree, FIELD, packet_num); bv_gen(bv, bv_final, packet_num); //data_test(tree, headers, bv, bv_final, packet_num, 3); /******************************************************** * Flatten All the 2D Arrays ********************************************************/ int* tree_flatten = new int[RULE*FIELD]; int* headers_flatten = new int[packet_num*FIELD]; long int* bv_flatten = new long int[FIELD*(RULE+1) * ALLRULE / sizeof(long int)]; for (int i = 0; i < FIELD; i++){ for (int j = 0; j < RULE; j++){ tree_flatten[i*RULE+j] = tree[i][j]; } } for (int i = 0; i < FIELD; i++){ for (int j = 0; j < packet_num; j++){ headers_flatten[i*packet_num + j] = headers[i][j]; } } for (int i = 0; i < FIELD*(RULE+1); i++){ for (int j = 0; j < ALLRULE / sizeof(long int); j++){ bv_flatten[i*ALLRULE / sizeof(long int) + j] = bv[i][j]; } } /******************************************************** * Declare cuda events for statistical purposes: * 1. time_memcpyH2D * 2. time_memcpyD2H * 3. time_pc ********************************************************/ float time1, time2, time3; hipEvent_t time_memcpyH2D_start, time_memcpyH2D_stop, time_memcpyD2H_start, time_memcpyD2H_stop, time_comp_start, time_comp_stop; hipEventCreate(&time_memcpyH2D_start); hipEventCreate(&time_memcpyH2D_stop); hipEventCreate(&time_memcpyD2H_start); hipEventCreate(&time_memcpyD2H_stop); hipEventCreate(&time_comp_start); hipEventCreate(&time_comp_stop); /******************************************************** * Allocate Space in Device: * 1. gpu_tree * 2. gpu_bv * 3. gpu_bv_final * 4. gpu_headers ********************************************************/ dim3 dimGrid(grid_dim,1); dim3 dimBlock(block_dim,1); int* gpu_tree; int* gpu_headers; int* gpu_match_result; long int* gpu_merge_results; long int* gpu_bv; hipMalloc((void**)&gpu_tree, sizeof(int*)*size_t(FIELD*RULE)); cudaCheckErrors("hipMalloc gpu_tree"); hipMalloc((void**)&gpu_headers, sizeof(int)*FIELD*packet_num); cudaCheckErrors("hipMalloc gpu_headers"); hipMalloc((void**)&gpu_bv, sizeof(long int) * FIELD*(RULE+1) * int_count); cudaCheckErrors("hipMalloc gpu_bv"); hipMalloc((void**)&gpu_match_result, sizeof(int)*packet_num*FIELD); cudaCheckErrors("hipMalloc gpu_match_result"); hipMalloc((void**)&gpu_merge_results, sizeof(long int)*packet_num*int_count); cudaCheckErrors("hipMalloc gpu_merge_results"); hipEventRecord(time_memcpyH2D_start, 0); hipMemcpy(gpu_tree, tree_flatten, sizeof(int)*RULE*FIELD, hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy gpu_tree"); hipMemcpy(gpu_headers, headers_flatten, sizeof(int)*FIELD*packet_num, hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy gpu_headers"); hipMemcpy(gpu_bv, bv_flatten, sizeof(long int) * FIELD*(RULE+1) * int_count, hipMemcpyHostToDevice); cudaCheckErrors("hipMemcpy gpu_bv"); hipEventRecord(time_memcpyH2D_stop, 0); hipEventSynchronize(time_memcpyH2D_stop); hipEventElapsedTime(&time1, time_memcpyH2D_start, time_memcpyH2D_stop); hipEventDestroy(time_memcpyH2D_stop); hipEventDestroy(time_memcpyH2D_start); cout<<endl<<"* 1. Time for memcpy H2D: "<<time1<<"ms, Total bytes copied: "<<endl; cout<<" -> Tree: "<< sizeof(int)*RULE*FIELD<<" Bytes"<<endl; cout<<" -> Headers: "<< sizeof(long int)*FIELD*packet_num<<" Bytes"<<endl; cout<<" -> Bv: "<< sizeof(long int) * FIELD*(RULE+1) * int_count<<" Bytes"<<endl; cout<<" -> Total Memory Copy: "<< sizeof(int)*RULE*FIELD + sizeof(long int)*FIELD*packet_num + sizeof(long int) * FIELD*(RULE+1) * int_count<<" Bytes"<<endl; /******************************************************** * Main Packet Classification Process: * 1. Function Call * 2. Timing * 3. Memory copy back (gpu_bv_final) ********************************************************/ hipEventRecord(time_comp_start, 0); hipLaunchKernelGGL(( packet_classify), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_tree, gpu_headers, gpu_bv, gpu_match_result, gpu_merge_results, packet_num, block_dim); cudaCheckErrors("Kernel fail"); hipEventRecord(time_comp_stop, 0); hipEventSynchronize(time_comp_stop); hipEventElapsedTime(&time2, time_comp_start, time_comp_stop); hipEventDestroy(time_comp_stop); hipEventDestroy(time_comp_start); cout<<endl<<"* 2. Time for GPU computation: "<<time2<<"ms, GPU throughput: "<<packet_num/time2/1000<<" MPPS"<<endl; hipEventRecord(time_memcpyD2H_start, 0); hipMemcpy(bv_final, gpu_merge_results, sizeof(long int) * packet_num * int_count, hipMemcpyDeviceToHost); hipEventRecord(time_memcpyD2H_stop, 0); hipEventSynchronize(time_memcpyD2H_stop); hipEventElapsedTime(&time3, time_memcpyD2H_start, time_memcpyD2H_stop); hipEventDestroy(time_memcpyD2H_stop); hipEventDestroy(time_memcpyD2H_start); cout<<endl<<"* 3. Time for memcpy H2D: "<<time3<<"ms, Total bytes copied: "<<endl; cout<<" -> Bv_final: "<< sizeof(long int) * packet_num * int_count<<" Bytes"<<endl<<endl; cout<<endl<<">>>>>> Total GPU throughput: "<<packet_num/(time1 + time2 + time3)/1000<<" MPPS"<<endl; //data_test(tree, headers, bv, bv_final, packet_num, 8); /******************************************************** * Clear Memory: * 1. Dynamic allocations on host * 2. cudaFrees ********************************************************/ hipFree(gpu_tree); cudaCheckErrors("Free gpu_tree fail"); hipFree(gpu_bv); cudaCheckErrors("Free bv fail"); hipFree(gpu_headers); cudaCheckErrors("Free gpu_headers fail"); hipFree(gpu_match_result); cudaCheckErrors("Free gpu_match_result fail"); hipFree(gpu_merge_results); cudaCheckErrors("Free gpu_merge_results fail"); for (int i = 0; i < FIELD; i++){ delete tree[i]; } for(int i = 0; i < FIELD; i++){ delete headers[i]; } for(int i = 0; i < FIELD*(RULE+1); i++){ delete bv[i]; } delete tree; delete bv; delete headers; delete bv_final; delete match_result; delete tree_flatten; delete headers_flatten; delete bv_flatten; delete merge_results; cout<<"============================ Experiment Ends ============================"<<endl; return 0; } void tree_gen(int** tree, int field, int rule){ for(int i = 0; i < field; i++){ tree[i][0] = rand() % 100; int temp[rule]; temp[0] = tree[i][0]; for (int j = 1; j < rule; j++){ temp[j] = temp[j-1] + rand() % 20 + 1; } int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2); int step_index = level; while (step_index >= 1){ int step = pow(2, (level - step_index + 1)); while (temp_index >= 0){ tree[i][tree_index] = temp[temp_index]; temp_index -= step; tree_index--; } step_index--; temp_index = rule - 1 - (pow(2, level - step_index) - 1); } } } void header_gen(int** headers, int** tree, int field, int packet_num){ for (int i = 0; i < field; i++){ for(int j = 0; j < packet_num; j++){ headers[i][j] = rand() % 6000; } } } void bv_gen(long int ** bv, long int* bv_final, int packet_num){ for (int i = 0; i < ALLRULE / sizeof(long int); i++){ for (int j = 0; j < FIELD*(RULE+1); j++){ bv[j][i] = rand() % 1000000; } } for(int i = 0; i < packet_num; i++){ bv_final[i] = -1; } } void data_test(int** tree, int** headers, long int** bv, int* bv_final, int packet_num, int type){ if (type > 15 | type == 0){ return; } if (type % 2 == 1){ cout<<"Tree: "<<endl; for(int i = 0; i < RULE; i++){ cout<<"Line: "<<i<<": "; for(int j = 0; j < FIELD; j++){ cout<<tree[j][i]<<" "; } cout<<endl; } } if (type % 4 == 2 | type % 4 == 3){ cout<<endl<<"Headers: "<<endl; for(int i = 0; i < packet_num; i++){ cout<<"Header "<<i<<": "; for(int j = 0; j < FIELD; j++){ cout<<headers[j][i]<<" "; } cout<<endl; } } if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){ cout<<endl<<"bv: "<<endl; for(int i = 0; i < ALLRULE; i++){ cout<<"Line "<<i<<": "; for (int j = 0; j < FIELD*(RULE+1); j++){ cout<<bv[j][i]<<" "; } cout<<endl; } } if (type > 7){ cout<<endl<<"bv_final: "<<endl; for(int i = 0; i < packet_num; i++){ cout<<bv_final[i]<<" "; } cout<<endl; } cout<<"============== End of Print =============="<<endl; }
gpu_pc_v2.cu
/******************************************************** * * This experiment optimizes packet classification * in the following aspects: * 1. Thread assignment * 2. Memory coalescing * * Experiment Assumptions: * 1. 510 Non-overlapping intervals * 2. 1024 Rules (510 * 1024 element BVs) * 3. Number of packets varies, 1 kernel * 4. All packets are already on CPU memory * 5. All fields needs prefix/range match * ********************************************************/ #include <iostream> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <cublas.h> #define FIELD 6 #define RULE 31 #define ALLRULE 128 #define WSIZE 32 #define int_count ALLRULE/sizeof(long int) #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) using namespace std; void header_gen(int**, int**, int, int); void tree_gen(int**, int, int); void bv_gen(long int**, long int*, int); void data_test(int**, int**, long int**, int*, int, int); __global__ void packet_classify(int* gpu_tree, int* gpu_headers, long int* gpu_bv, int* gpu_match_result, long int* gpu_merge_result, int packet_num, int block_dim){ __shared__ int gpu_tree_shared[FIELD*RULE]; int level = 0; while(level * block_dim + threadIdx.x < FIELD * RULE){ gpu_tree_shared[level * block_dim + threadIdx.x] = gpu_tree[level * block_dim + threadIdx.x]; level++; } __syncthreads(); //int index = blockDim.x * blockIdx.x + threadIdx.x; //int tree_idx = index / packet_num * RULE; int i = 0; if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * FIELD){ while (i < RULE){ i = 2 * i + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] <= gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 1 + (gpu_headers[blockDim.x * blockIdx.x + threadIdx.x] > gpu_tree_shared[(blockDim.x * blockIdx.x + threadIdx.x) / packet_num * RULE+i]) * 2; } gpu_match_result[blockDim.x * blockIdx.x + threadIdx.x] = i - RULE; } __syncthreads(); if (blockDim.x * blockIdx.x + threadIdx.x < packet_num * int_count){ int index = blockDim.x * blockIdx.x + threadIdx.x; int packetIdx = index/int_count; gpu_merge_result[index] = gpu_bv[gpu_match_result[packetIdx*FIELD]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*FIELD+1]*int_count + index%int_count]; /* gpu_bv[gpu_match_result[packetIdx*15+2]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+3]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+4]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+5]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+6]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+7]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+8]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+9]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+10]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+11]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+12]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+13]*int_count + index%int_count] & gpu_bv[gpu_match_result[packetIdx*15+14]*int_count + index%int_count]; */ } }; int main(int argc, char** argv){ if(argc!=4){ cout<<"usage ./openflow *Packet_num *Grid_dim *Block_dim"<<endl; return 0; } int packet_num = atoi(argv[1]); int grid_dim = atoi(argv[2]); int block_dim = atoi(argv[3]); // if (grid_dim*block_dim != packet_num*FIELD){ // cout<<"ERROR: Total number of threads must equal packet_num * FIELD"<<endl; // return 1; // } cout<<"grid_dim: "<<grid_dim<<", block_dim: "<<block_dim<<", packet_num: "<<packet_num<<endl; cout<<"============================ Experiment Starts ============================"<<endl; /******************************************************** * Preparing Data: * 1. Generate random headers * 2. Generate BVs * 3. Generate random packets * 4. Deliberately make some rule-matching packets ********************************************************/ srand(time(NULL)); int** tree = new int*[FIELD]; for(int i = 0; i < FIELD; i++){ tree[i] = new int[RULE]; } int** headers = new int*[FIELD]; for (int i = 0; i < FIELD; i++){ headers[i] = new int[packet_num]; } long int** bv = new long int*[FIELD*(RULE+1)]; for(int i = 0; i < FIELD*(RULE+1); i++){ bv[i] = new long int[ALLRULE / sizeof(long int)]; } long int* bv_final = new long int[packet_num * int_count]; int* match_result = new int[packet_num * FIELD]; long int* merge_results = new long int[int_count*packet_num]; tree_gen(tree, FIELD, RULE); header_gen(headers, tree, FIELD, packet_num); bv_gen(bv, bv_final, packet_num); //data_test(tree, headers, bv, bv_final, packet_num, 3); /******************************************************** * Flatten All the 2D Arrays ********************************************************/ int* tree_flatten = new int[RULE*FIELD]; int* headers_flatten = new int[packet_num*FIELD]; long int* bv_flatten = new long int[FIELD*(RULE+1) * ALLRULE / sizeof(long int)]; for (int i = 0; i < FIELD; i++){ for (int j = 0; j < RULE; j++){ tree_flatten[i*RULE+j] = tree[i][j]; } } for (int i = 0; i < FIELD; i++){ for (int j = 0; j < packet_num; j++){ headers_flatten[i*packet_num + j] = headers[i][j]; } } for (int i = 0; i < FIELD*(RULE+1); i++){ for (int j = 0; j < ALLRULE / sizeof(long int); j++){ bv_flatten[i*ALLRULE / sizeof(long int) + j] = bv[i][j]; } } /******************************************************** * Declare cuda events for statistical purposes: * 1. time_memcpyH2D * 2. time_memcpyD2H * 3. time_pc ********************************************************/ float time1, time2, time3; cudaEvent_t time_memcpyH2D_start, time_memcpyH2D_stop, time_memcpyD2H_start, time_memcpyD2H_stop, time_comp_start, time_comp_stop; cudaEventCreate(&time_memcpyH2D_start); cudaEventCreate(&time_memcpyH2D_stop); cudaEventCreate(&time_memcpyD2H_start); cudaEventCreate(&time_memcpyD2H_stop); cudaEventCreate(&time_comp_start); cudaEventCreate(&time_comp_stop); /******************************************************** * Allocate Space in Device: * 1. gpu_tree * 2. gpu_bv * 3. gpu_bv_final * 4. gpu_headers ********************************************************/ dim3 dimGrid(grid_dim,1); dim3 dimBlock(block_dim,1); int* gpu_tree; int* gpu_headers; int* gpu_match_result; long int* gpu_merge_results; long int* gpu_bv; cudaMalloc((void**)&gpu_tree, sizeof(int*)*size_t(FIELD*RULE)); cudaCheckErrors("cudaMalloc gpu_tree"); cudaMalloc((void**)&gpu_headers, sizeof(int)*FIELD*packet_num); cudaCheckErrors("cudaMalloc gpu_headers"); cudaMalloc((void**)&gpu_bv, sizeof(long int) * FIELD*(RULE+1) * int_count); cudaCheckErrors("cudaMalloc gpu_bv"); cudaMalloc((void**)&gpu_match_result, sizeof(int)*packet_num*FIELD); cudaCheckErrors("cudaMalloc gpu_match_result"); cudaMalloc((void**)&gpu_merge_results, sizeof(long int)*packet_num*int_count); cudaCheckErrors("cudaMalloc gpu_merge_results"); cudaEventRecord(time_memcpyH2D_start, 0); cudaMemcpy(gpu_tree, tree_flatten, sizeof(int)*RULE*FIELD, cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy gpu_tree"); cudaMemcpy(gpu_headers, headers_flatten, sizeof(int)*FIELD*packet_num, cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy gpu_headers"); cudaMemcpy(gpu_bv, bv_flatten, sizeof(long int) * FIELD*(RULE+1) * int_count, cudaMemcpyHostToDevice); cudaCheckErrors("cudaMemcpy gpu_bv"); cudaEventRecord(time_memcpyH2D_stop, 0); cudaEventSynchronize(time_memcpyH2D_stop); cudaEventElapsedTime(&time1, time_memcpyH2D_start, time_memcpyH2D_stop); cudaEventDestroy(time_memcpyH2D_stop); cudaEventDestroy(time_memcpyH2D_start); cout<<endl<<"* 1. Time for memcpy H2D: "<<time1<<"ms, Total bytes copied: "<<endl; cout<<" -> Tree: "<< sizeof(int)*RULE*FIELD<<" Bytes"<<endl; cout<<" -> Headers: "<< sizeof(long int)*FIELD*packet_num<<" Bytes"<<endl; cout<<" -> Bv: "<< sizeof(long int) * FIELD*(RULE+1) * int_count<<" Bytes"<<endl; cout<<" -> Total Memory Copy: "<< sizeof(int)*RULE*FIELD + sizeof(long int)*FIELD*packet_num + sizeof(long int) * FIELD*(RULE+1) * int_count<<" Bytes"<<endl; /******************************************************** * Main Packet Classification Process: * 1. Function Call * 2. Timing * 3. Memory copy back (gpu_bv_final) ********************************************************/ cudaEventRecord(time_comp_start, 0); packet_classify<<<dimGrid, dimBlock>>>(gpu_tree, gpu_headers, gpu_bv, gpu_match_result, gpu_merge_results, packet_num, block_dim); cudaCheckErrors("Kernel fail"); cudaEventRecord(time_comp_stop, 0); cudaEventSynchronize(time_comp_stop); cudaEventElapsedTime(&time2, time_comp_start, time_comp_stop); cudaEventDestroy(time_comp_stop); cudaEventDestroy(time_comp_start); cout<<endl<<"* 2. Time for GPU computation: "<<time2<<"ms, GPU throughput: "<<packet_num/time2/1000<<" MPPS"<<endl; cudaEventRecord(time_memcpyD2H_start, 0); cudaMemcpy(bv_final, gpu_merge_results, sizeof(long int) * packet_num * int_count, cudaMemcpyDeviceToHost); cudaEventRecord(time_memcpyD2H_stop, 0); cudaEventSynchronize(time_memcpyD2H_stop); cudaEventElapsedTime(&time3, time_memcpyD2H_start, time_memcpyD2H_stop); cudaEventDestroy(time_memcpyD2H_stop); cudaEventDestroy(time_memcpyD2H_start); cout<<endl<<"* 3. Time for memcpy H2D: "<<time3<<"ms, Total bytes copied: "<<endl; cout<<" -> Bv_final: "<< sizeof(long int) * packet_num * int_count<<" Bytes"<<endl<<endl; cout<<endl<<">>>>>> Total GPU throughput: "<<packet_num/(time1 + time2 + time3)/1000<<" MPPS"<<endl; //data_test(tree, headers, bv, bv_final, packet_num, 8); /******************************************************** * Clear Memory: * 1. Dynamic allocations on host * 2. cudaFrees ********************************************************/ cudaFree(gpu_tree); cudaCheckErrors("Free gpu_tree fail"); cudaFree(gpu_bv); cudaCheckErrors("Free bv fail"); cudaFree(gpu_headers); cudaCheckErrors("Free gpu_headers fail"); cudaFree(gpu_match_result); cudaCheckErrors("Free gpu_match_result fail"); cudaFree(gpu_merge_results); cudaCheckErrors("Free gpu_merge_results fail"); for (int i = 0; i < FIELD; i++){ delete tree[i]; } for(int i = 0; i < FIELD; i++){ delete headers[i]; } for(int i = 0; i < FIELD*(RULE+1); i++){ delete bv[i]; } delete tree; delete bv; delete headers; delete bv_final; delete match_result; delete tree_flatten; delete headers_flatten; delete bv_flatten; delete merge_results; cout<<"============================ Experiment Ends ============================"<<endl; return 0; } void tree_gen(int** tree, int field, int rule){ for(int i = 0; i < field; i++){ tree[i][0] = rand() % 100; int temp[rule]; temp[0] = tree[i][0]; for (int j = 1; j < rule; j++){ temp[j] = temp[j-1] + rand() % 20 + 1; } int temp_index = rule-1, tree_index = rule -1, level = log(rule+1) / log(2); int step_index = level; while (step_index >= 1){ int step = pow(2, (level - step_index + 1)); while (temp_index >= 0){ tree[i][tree_index] = temp[temp_index]; temp_index -= step; tree_index--; } step_index--; temp_index = rule - 1 - (pow(2, level - step_index) - 1); } } } void header_gen(int** headers, int** tree, int field, int packet_num){ for (int i = 0; i < field; i++){ for(int j = 0; j < packet_num; j++){ headers[i][j] = rand() % 6000; } } } void bv_gen(long int ** bv, long int* bv_final, int packet_num){ for (int i = 0; i < ALLRULE / sizeof(long int); i++){ for (int j = 0; j < FIELD*(RULE+1); j++){ bv[j][i] = rand() % 1000000; } } for(int i = 0; i < packet_num; i++){ bv_final[i] = -1; } } void data_test(int** tree, int** headers, long int** bv, int* bv_final, int packet_num, int type){ if (type > 15 | type == 0){ return; } if (type % 2 == 1){ cout<<"Tree: "<<endl; for(int i = 0; i < RULE; i++){ cout<<"Line: "<<i<<": "; for(int j = 0; j < FIELD; j++){ cout<<tree[j][i]<<" "; } cout<<endl; } } if (type % 4 == 2 | type % 4 == 3){ cout<<endl<<"Headers: "<<endl; for(int i = 0; i < packet_num; i++){ cout<<"Header "<<i<<": "; for(int j = 0; j < FIELD; j++){ cout<<headers[j][i]<<" "; } cout<<endl; } } if (type % 8 == 4 | type % 8 == 5 | type % 8 == 6 | type % 8 == 7){ cout<<endl<<"bv: "<<endl; for(int i = 0; i < ALLRULE; i++){ cout<<"Line "<<i<<": "; for (int j = 0; j < FIELD*(RULE+1); j++){ cout<<bv[j][i]<<" "; } cout<<endl; } } if (type > 7){ cout<<endl<<"bv_final: "<<endl; for(int i = 0; i < packet_num; i++){ cout<<bv_final[i]<<" "; } cout<<endl; } cout<<"============== End of Print =============="<<endl; }
ca0a679015b6dc1a237b42c5509e755444e7b319.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <sys/time.h> #include <time.h> #include <opencv2/opencv.hpp> #include <math.h> //#include <device_launch_parameters.h> //#include <hip/hip_runtime.h> //#include <helper_cuda.h> //#include <helper_timer.h> //#include <hip/device_functions.h> #define IS_NOT_EDGE(a) (a < min_val) #define IS_STRONG_EDGE(a) (a >= max_val) #define IS_WEAK_EDGE(a) (a >= min_val && a < max_val) using namespace cv; #define SPLIT_SIZE_X 32 #define SPLIT_SIZE_Y 24 #define LINK_SIZE_X 16 #define LINK_SIZE_Y 12 #define BLOCK_SIZE_X 36 #define BLOCK_SIZE_Y 28 /*canny using cuda*/ void CUDA_Canny(); __global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient); __device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output); __device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient); __global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output); __global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny); __device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited); __device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited); __device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j); __device__ short GetGradientDirection(int sobel_x, int sobel_y); void DisplayGradient(short* gradient, int width, int height); unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j); void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output); void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output); void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output); double GetTime() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } int main(void) { printf("CANNY_CUDA\n"); CUDA_Canny(); //system("pause"); return 0; } void CUDA_Canny() { int width = 640; int height = 480; dim3 block_size_extended(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 block_size_normal(SPLIT_SIZE_X, SPLIT_SIZE_Y); dim3 block_size_link(LINK_SIZE_X, LINK_SIZE_Y); dim3 grid_size(width / SPLIT_SIZE_X, height / SPLIT_SIZE_Y); dim3 grid_size_link(width / LINK_SIZE_X, height / LINK_SIZE_Y); Mat img_src, img_sobel, img_gradient, img_canny; VideoCapture camera(0); /*cpu memory*/ unsigned char* cpu_sobel = new unsigned char[width * height]; short* cpu_gradient = new short[width * height]; unsigned char* cpu_canny = new unsigned char[width * height]; /*gpu memory*/ unsigned char* gpu_img; hipMalloc(&gpu_img, width * height * sizeof(unsigned char)); unsigned char* gpu_sobel; hipMalloc(&gpu_sobel, width * height * sizeof(unsigned char)); short* gpu_gradient; hipMalloc(&gpu_gradient, width * height * sizeof(short)); unsigned char* gpu_canny; hipMalloc(&gpu_canny, width * height * sizeof(unsigned char)); double start_time, end_time; while (1) { camera >> img_src; //img_src = imread("F:/img_src/15.jpg"); resize(img_src, img_src, Size(width, height), 0, 0); cvtColor(img_src, img_src, CV_BGR2GRAY); //imshow("img_src", img_src); start_time = GetTime(); /*1.copy to gpu memory*/ hipMemcpy(gpu_img, img_src.data, width * height * sizeof(unsigned char), hipMemcpyHostToDevice); /*2.gauss filter*/ CUDA_GaussianAndSobel << <grid_size, block_size_extended >> > (gpu_img, width, height, gpu_sobel, gpu_gradient); hipDeviceSynchronize(); /*3.none max suppress*/ CUDA_NonMaxSuppress << <grid_size, block_size_normal >> > (gpu_sobel, width, height, gpu_gradient, gpu_sobel); hipDeviceSynchronize(); /*4.double threshold*/ CUDA_DoubleThreshold2 << <grid_size_link, dim3(1,1) >> > (gpu_sobel, width, height, 50, 90, gpu_canny); /*copy to cpu memory*/ hipMemcpy(cpu_sobel, gpu_canny, width * height * sizeof(unsigned char), hipMemcpyDeviceToHost); //DoubleThreshold(cpu_sobel, width, height, 50, 90, cpu_canny); //hipMemcpy(cpu_gradient, gpu_gradient, width * height * sizeof(short), hipMemcpyDeviceToHost); //NonMaxSuppress(cpu_sobel, width, height, cpu_gradient, cpu_sobel); end_time = GetTime(); printf("elapse : %.2fms\n", (end_time - start_time)*1000); img_canny = Mat(Size(width, height), CV_8UC1, cpu_sobel); resize(img_canny, img_canny, Size(640, 480), 0, 0); imshow("canny", img_canny); if ('q' == waitKey(1)) { destroyAllWindows(); free(cpu_sobel); cpu_sobel = NULL; free(cpu_canny); cpu_canny = NULL; free(cpu_gradient); cpu_gradient = NULL; hipFree(gpu_img); hipFree(gpu_sobel); hipFree(gpu_gradient); hipFree(gpu_canny); break; } } } __global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient) { __shared__ unsigned char cache[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)]; __shared__ unsigned char gauss[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)]; __shared__ unsigned char sobel[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)]; short gradient = 0; /*alloct img to cache*/ int raw_index = SPLIT_SIZE_X * SPLIT_SIZE_Y * blockIdx.y * gridDim.x + blockIdx.x * SPLIT_SIZE_X + SPLIT_SIZE_X * gridDim.x * threadIdx.y + threadIdx.x; int pixel_val = CUDA_GetPixelVal(img, width, height, raw_index / width - 2, raw_index % width - 2); int cache_index = blockDim.x * threadIdx.y + threadIdx.x; cache[cache_index] = pixel_val; __syncthreads(); /*gauss filter*/ CUDA_Gaussian(cache, blockDim.x, blockDim.y, cache_index, gauss); __syncthreads(); /*sobel filter*/ CUDA_Sobel(gauss, blockDim.x, blockDim.y, cache_index, sobel, &gradient); /*cute edge*/ if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2 || threadIdx.x <= 1 || threadIdx.x >= blockDim.x - 2) return; int new_id = blockIdx.y * SPLIT_SIZE_X * SPLIT_SIZE_Y * gridDim.x + (threadIdx.y - 2) * SPLIT_SIZE_X * gridDim.x + blockIdx.x * SPLIT_SIZE_X + (threadIdx.x - 2); /*store result*/ output_gradient[new_id] = gradient; output_sobel[new_id] = sobel[cache_index]; } __device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output) { int new_pixel_value = 0; new_pixel_value = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * 0.07511 + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * 0.07511 + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x ) * 0.20418 + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * 0.07511 + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * 0.07511; output[idx] = new_pixel_value; } __device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient) { int sobel_x = 0; int sobel_y = 0; int sobel = 0; sobel_x = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (1) + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * (2) + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * (-2) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (-1); sobel_y = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (-1) + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * (-2) + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * (2) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (1); sobel = sqrtf((float)(sobel_x * sobel_x + sobel_y * sobel_y)); sobel = sobel > 255 ? 255 : sobel; output_sobel[idx] = sobel; *gradient = GetGradientDirection(sobel_x, sobel_y); } __global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if (id >= width * height) return; int i = id / width; int j = id % width; float weight = 0; int g0, g1, g2, g3; int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id]; if (temp_gradient >= 0 && temp_gradient < 45) { weight = temp_gradient / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i , j + 1); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = CUDA_GetPixelVal(sobel, width, height, i , j - 1); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 45 && temp_gradient < 90) { weight = (90 - temp_gradient) / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j ); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j ); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 90 && temp_gradient < 135) { weight = (temp_gradient - 90) / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j ); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j ); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1); } else if (temp_gradient >= 135 && temp_gradient <= 180) { weight = (180 - temp_gradient) / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i , j - 1); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = CUDA_GetPixelVal(sobel, width, height, i , j + 1); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1); } int dot1 = g0 * (1 - weight) + g1 * weight; int dot2 = g2 * (1 - weight) + g3 * weight; if (sobel[id] >= dot1 && sobel[id] >= dot2) output[id] = sobel[id]; else output[id] = 0; } __global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny) { __shared__ unsigned char cache[LINK_SIZE_X * LINK_SIZE_Y]; __shared__ unsigned char output[LINK_SIZE_X * LINK_SIZE_Y]; __shared__ unsigned int weak_stack[LINK_SIZE_X * LINK_SIZE_Y]; __shared__ unsigned char visited[LINK_SIZE_X * LINK_SIZE_Y]; unsigned int stack_top = 0; memset(visited, 0, LINK_SIZE_X * LINK_SIZE_Y); int raw_index = LINK_SIZE_X * LINK_SIZE_Y * blockIdx.y * gridDim.x + blockIdx.x * LINK_SIZE_X + LINK_SIZE_X * gridDim.x * threadIdx.y + threadIdx.x; for (int i = 0; i < LINK_SIZE_Y; i++) { for (int j = 0; j < LINK_SIZE_X; j++) { cache[i * LINK_SIZE_X + j] = CUDA_GetPixelVal(sobel, width, height, raw_index / width + i, raw_index % width + j); } } CUDA_SubDoubleThreshold(cache, LINK_SIZE_X, LINK_SIZE_Y, min_val, max_val, weak_stack, &stack_top, output, visited); for (int i = 0; i < LINK_SIZE_Y; i++) { for (int j = 0; j < LINK_SIZE_X; j++) { int new_id = blockIdx.y * LINK_SIZE_X * LINK_SIZE_Y * gridDim.x + i * LINK_SIZE_X * gridDim.x + blockIdx.x * LINK_SIZE_X + j; canny[new_id] = output[i * LINK_SIZE_X + j]; } } } __device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited) { unsigned short center_index = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, stack_top, output, visited); while ((*stack_top) > 0) { center_index = weak_stack[(*stack_top) - 1]; (*stack_top)--; CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width - 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width , weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width + 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width - 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width + 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width - 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width , weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width + 1, weak_stack, stack_top, output, visited); //__syncthreads(); } } else if (IS_NOT_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 0; } else { if(visited[i * width + j] == 0) output[i * width + j] = 0; } } } } __device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited) { if (i < 0 || i >= height) return; if (j < 0 || j >= width) return; if (visited[i * width + j] == 1) return; visited[i * width + j] = 1; if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 255; stack[*top] = i * width + j; (*top)++; } else if(IS_WEAK_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 255; stack[*top] = i * width + j; (*top)++; } else { output[i * width + j] = 0; } } __device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j) { if (i >= height || i < 0) return 0; else if (j >= width || j < 0) return 0; return *(img + i * width + j); } __device__ short GetGradientDirection(int sobel_x, int sobel_y) { short gradient = (atan2f(sobel_x, sobel_y) / 3.1415926 * 180.0); //gradient = gradient < 0 ? gradient + 180 : gradient; return gradient; } void DisplayGradient(short* gradient, int width, int height) { Mat img = Mat::zeros(Size(width, height), CV_8UC3); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (abs(*(gradient + i * width + j)) >= 0 && abs(*(gradient + i * width + j)) < 45) { img.at<Vec3b>(i, j) = Vec3b(255, 0, 0); } else if (abs(*(gradient + i * width + j)) >= 45 && abs(*(gradient + i * width + j)) < 90) { img.at<Vec3b>(i, j) = Vec3b(0, 255, 0); } else if (abs(*(gradient + i * width + j)) >= 90 && abs(*(gradient + i * width + j)) < 135) { img.at<Vec3b>(i, j) = Vec3b(0, 0, 255); } else if (abs(*(gradient + i * width + j)) >= 135 && abs(*(gradient + i * width + j)) <= 180) { img.at<Vec3b>(i, j) = Vec3b(128, 128, 128); } } } imshow("gradient", img); } unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j) { if (i >= height || i < 0) return 0; else if (j >= width || j < 0) return 0; return *(img + i * width + j); } void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int id = i * width + j; float weight = 0; int g0, g1, g2, g3; int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id]; if (temp_gradient >= 0 && temp_gradient < 45) { weight = temp_gradient / 45.0; g0 = GetPixelVal(sobel, width, height, i, j + 1); g1 = GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = GetPixelVal(sobel, width, height, i, j - 1); g3 = GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 45 && temp_gradient < 90) { weight = (90 - temp_gradient) / 45.0; g0 = GetPixelVal(sobel, width, height, i - 1, j); g1 = GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = GetPixelVal(sobel, width, height, i + 1, j); g3 = GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 90 && temp_gradient < 135) { weight = (temp_gradient - 90) / 45.0; g0 = GetPixelVal(sobel, width, height, i - 1, j); g1 = GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = GetPixelVal(sobel, width, height, i + 1, j); g3 = GetPixelVal(sobel, width, height, i + 1, j + 1); } else if (temp_gradient >= 135 && temp_gradient <= 180) { weight = (180 - temp_gradient) / 45.0; g0 = GetPixelVal(sobel, width, height, i, j - 1); g1 = GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = GetPixelVal(sobel, width, height, i, j + 1); g3 = GetPixelVal(sobel, width, height, i + 1, j + 1); } int dot1 = g0 * (1 - weight) + g1 * weight; int dot2 = g2 * (1 - weight) + g3 * weight; if (sobel[id] > dot1 && sobel[id] > dot2) output[id] = sobel[id]; else output[id] = 0; } } } void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output) { unsigned short* weak_stack = new unsigned short[width * height]; unsigned short stack_top = 0; unsigned short center_index = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j))) { stack_top = 0; IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, &stack_top, output); while (stack_top > 0) { center_index = weak_stack[stack_top - 1]; stack_top--; IsWeakEdge(sobel, width, height, min_val, max_val, i - 1, j - 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i - 1, j , weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i - 1, j + 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i , j - 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i , j + 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i + 1, j - 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i + 1, j , weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i + 1, j + 1, weak_stack, &stack_top, output); } } else if (IS_NOT_EDGE(GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 0; } } } delete[] weak_stack; weak_stack = nullptr; } void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output) { if (IS_WEAK_EDGE(GetPixelVal(sobel, width, height, i, j)) || IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 255; stack[*top] = i * width + j; *top++; } else { output[i * width + j] = 0; } }
ca0a679015b6dc1a237b42c5509e755444e7b319.cu
#include <stdio.h> #include <sys/time.h> #include <time.h> #include <opencv2/opencv.hpp> #include <math.h> //#include <device_launch_parameters.h> //#include <cuda_runtime.h> //#include <helper_cuda.h> //#include <helper_timer.h> //#include <device_functions.h> #define IS_NOT_EDGE(a) (a < min_val) #define IS_STRONG_EDGE(a) (a >= max_val) #define IS_WEAK_EDGE(a) (a >= min_val && a < max_val) using namespace cv; #define SPLIT_SIZE_X 32 #define SPLIT_SIZE_Y 24 #define LINK_SIZE_X 16 #define LINK_SIZE_Y 12 #define BLOCK_SIZE_X 36 #define BLOCK_SIZE_Y 28 /*canny using cuda*/ void CUDA_Canny(); __global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient); __device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output); __device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient); __global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output); __global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny); __device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited); __device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited); __device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j); __device__ short GetGradientDirection(int sobel_x, int sobel_y); void DisplayGradient(short* gradient, int width, int height); unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j); void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output); void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output); void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output); double GetTime() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } int main(void) { printf("CANNY_CUDA\n"); CUDA_Canny(); //system("pause"); return 0; } void CUDA_Canny() { int width = 640; int height = 480; dim3 block_size_extended(BLOCK_SIZE_X, BLOCK_SIZE_Y); dim3 block_size_normal(SPLIT_SIZE_X, SPLIT_SIZE_Y); dim3 block_size_link(LINK_SIZE_X, LINK_SIZE_Y); dim3 grid_size(width / SPLIT_SIZE_X, height / SPLIT_SIZE_Y); dim3 grid_size_link(width / LINK_SIZE_X, height / LINK_SIZE_Y); Mat img_src, img_sobel, img_gradient, img_canny; VideoCapture camera(0); /*cpu memory*/ unsigned char* cpu_sobel = new unsigned char[width * height]; short* cpu_gradient = new short[width * height]; unsigned char* cpu_canny = new unsigned char[width * height]; /*gpu memory*/ unsigned char* gpu_img; cudaMalloc(&gpu_img, width * height * sizeof(unsigned char)); unsigned char* gpu_sobel; cudaMalloc(&gpu_sobel, width * height * sizeof(unsigned char)); short* gpu_gradient; cudaMalloc(&gpu_gradient, width * height * sizeof(short)); unsigned char* gpu_canny; cudaMalloc(&gpu_canny, width * height * sizeof(unsigned char)); double start_time, end_time; while (1) { camera >> img_src; //img_src = imread("F:/img_src/15.jpg"); resize(img_src, img_src, Size(width, height), 0, 0); cvtColor(img_src, img_src, CV_BGR2GRAY); //imshow("img_src", img_src); start_time = GetTime(); /*1.copy to gpu memory*/ cudaMemcpy(gpu_img, img_src.data, width * height * sizeof(unsigned char), cudaMemcpyHostToDevice); /*2.gauss filter*/ CUDA_GaussianAndSobel << <grid_size, block_size_extended >> > (gpu_img, width, height, gpu_sobel, gpu_gradient); cudaDeviceSynchronize(); /*3.none max suppress*/ CUDA_NonMaxSuppress << <grid_size, block_size_normal >> > (gpu_sobel, width, height, gpu_gradient, gpu_sobel); cudaDeviceSynchronize(); /*4.double threshold*/ CUDA_DoubleThreshold2 << <grid_size_link, dim3(1,1) >> > (gpu_sobel, width, height, 50, 90, gpu_canny); /*copy to cpu memory*/ cudaMemcpy(cpu_sobel, gpu_canny, width * height * sizeof(unsigned char), cudaMemcpyDeviceToHost); //DoubleThreshold(cpu_sobel, width, height, 50, 90, cpu_canny); //cudaMemcpy(cpu_gradient, gpu_gradient, width * height * sizeof(short), cudaMemcpyDeviceToHost); //NonMaxSuppress(cpu_sobel, width, height, cpu_gradient, cpu_sobel); end_time = GetTime(); printf("elapse : %.2fms\n", (end_time - start_time)*1000); img_canny = Mat(Size(width, height), CV_8UC1, cpu_sobel); resize(img_canny, img_canny, Size(640, 480), 0, 0); imshow("canny", img_canny); if ('q' == waitKey(1)) { destroyAllWindows(); free(cpu_sobel); cpu_sobel = NULL; free(cpu_canny); cpu_canny = NULL; free(cpu_gradient); cpu_gradient = NULL; cudaFree(gpu_img); cudaFree(gpu_sobel); cudaFree(gpu_gradient); cudaFree(gpu_canny); break; } } } __global__ void CUDA_GaussianAndSobel(unsigned char* img, int width, int height, unsigned char* output_sobel, short* output_gradient) { __shared__ unsigned char cache[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)]; __shared__ unsigned char gauss[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)]; __shared__ unsigned char sobel[(BLOCK_SIZE_X) * (BLOCK_SIZE_Y)]; short gradient = 0; /*alloct img to cache*/ int raw_index = SPLIT_SIZE_X * SPLIT_SIZE_Y * blockIdx.y * gridDim.x + blockIdx.x * SPLIT_SIZE_X + SPLIT_SIZE_X * gridDim.x * threadIdx.y + threadIdx.x; int pixel_val = CUDA_GetPixelVal(img, width, height, raw_index / width - 2, raw_index % width - 2); int cache_index = blockDim.x * threadIdx.y + threadIdx.x; cache[cache_index] = pixel_val; __syncthreads(); /*gauss filter*/ CUDA_Gaussian(cache, blockDim.x, blockDim.y, cache_index, gauss); __syncthreads(); /*sobel filter*/ CUDA_Sobel(gauss, blockDim.x, blockDim.y, cache_index, sobel, &gradient); /*cute edge*/ if (threadIdx.y <= 1 || threadIdx.y >= blockDim.y - 2 || threadIdx.x <= 1 || threadIdx.x >= blockDim.x - 2) return; int new_id = blockIdx.y * SPLIT_SIZE_X * SPLIT_SIZE_Y * gridDim.x + (threadIdx.y - 2) * SPLIT_SIZE_X * gridDim.x + blockIdx.x * SPLIT_SIZE_X + (threadIdx.x - 2); /*store result*/ output_gradient[new_id] = gradient; output_sobel[new_id] = sobel[cache_index]; } __device__ void CUDA_Gaussian(unsigned char* img, int width, int height, int idx, unsigned char* output) { int new_pixel_value = 0; new_pixel_value = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * 0.07511 + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * 0.07511 + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x ) * 0.20418 + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * 0.07511 + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * 0.12384 + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * 0.07511; output[idx] = new_pixel_value; } __device__ void CUDA_Sobel(unsigned char* img, int width, int height, int idx, unsigned char* output_sobel, short* gradient) { int sobel_x = 0; int sobel_y = 0; int sobel = 0; sobel_x = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (1) + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x ) * (2) + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x ) * (-2) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (-1); sobel_y = CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x - 1) * (-1) + CUDA_GetPixelVal(img, width, height, threadIdx.y - 1, threadIdx.x + 1) * (1) + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x - 1) * (-2) + CUDA_GetPixelVal(img, width, height, threadIdx.y , threadIdx.x + 1) * (2) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x - 1) * (-1) + CUDA_GetPixelVal(img, width, height, threadIdx.y + 1, threadIdx.x + 1) * (1); sobel = sqrtf((float)(sobel_x * sobel_x + sobel_y * sobel_y)); sobel = sobel > 255 ? 255 : sobel; output_sobel[idx] = sobel; *gradient = GetGradientDirection(sobel_x, sobel_y); } __global__ void CUDA_NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if (id >= width * height) return; int i = id / width; int j = id % width; float weight = 0; int g0, g1, g2, g3; int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id]; if (temp_gradient >= 0 && temp_gradient < 45) { weight = temp_gradient / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i , j + 1); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = CUDA_GetPixelVal(sobel, width, height, i , j - 1); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 45 && temp_gradient < 90) { weight = (90 - temp_gradient) / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j ); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j ); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 90 && temp_gradient < 135) { weight = (temp_gradient - 90) / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i - 1, j ); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = CUDA_GetPixelVal(sobel, width, height, i + 1, j ); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1); } else if (temp_gradient >= 135 && temp_gradient <= 180) { weight = (180 - temp_gradient) / 45.0; g0 = CUDA_GetPixelVal(sobel, width, height, i , j - 1); g1 = CUDA_GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = CUDA_GetPixelVal(sobel, width, height, i , j + 1); g3 = CUDA_GetPixelVal(sobel, width, height, i + 1, j + 1); } int dot1 = g0 * (1 - weight) + g1 * weight; int dot2 = g2 * (1 - weight) + g3 * weight; if (sobel[id] >= dot1 && sobel[id] >= dot2) output[id] = sobel[id]; else output[id] = 0; } __global__ void CUDA_DoubleThreshold2(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* canny) { __shared__ unsigned char cache[LINK_SIZE_X * LINK_SIZE_Y]; __shared__ unsigned char output[LINK_SIZE_X * LINK_SIZE_Y]; __shared__ unsigned int weak_stack[LINK_SIZE_X * LINK_SIZE_Y]; __shared__ unsigned char visited[LINK_SIZE_X * LINK_SIZE_Y]; unsigned int stack_top = 0; memset(visited, 0, LINK_SIZE_X * LINK_SIZE_Y); int raw_index = LINK_SIZE_X * LINK_SIZE_Y * blockIdx.y * gridDim.x + blockIdx.x * LINK_SIZE_X + LINK_SIZE_X * gridDim.x * threadIdx.y + threadIdx.x; for (int i = 0; i < LINK_SIZE_Y; i++) { for (int j = 0; j < LINK_SIZE_X; j++) { cache[i * LINK_SIZE_X + j] = CUDA_GetPixelVal(sobel, width, height, raw_index / width + i, raw_index % width + j); } } CUDA_SubDoubleThreshold(cache, LINK_SIZE_X, LINK_SIZE_Y, min_val, max_val, weak_stack, &stack_top, output, visited); for (int i = 0; i < LINK_SIZE_Y; i++) { for (int j = 0; j < LINK_SIZE_X; j++) { int new_id = blockIdx.y * LINK_SIZE_X * LINK_SIZE_Y * gridDim.x + i * LINK_SIZE_X * gridDim.x + blockIdx.x * LINK_SIZE_X + j; canny[new_id] = output[i * LINK_SIZE_X + j]; } } } __device__ void CUDA_SubDoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned int* weak_stack, unsigned int* stack_top, unsigned char* output, unsigned char* visited) { unsigned short center_index = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, stack_top, output, visited); while ((*stack_top) > 0) { center_index = weak_stack[(*stack_top) - 1]; (*stack_top)--; CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width - 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width , weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width - 1, center_index % width + 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width - 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width , center_index % width + 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width - 1, weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width , weak_stack, stack_top, output, visited); CUDA_IsWeakEdge(sobel, width, height, min_val, max_val, center_index / width + 1, center_index % width + 1, weak_stack, stack_top, output, visited); //__syncthreads(); } } else if (IS_NOT_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 0; } else { if(visited[i * width + j] == 0) output[i * width + j] = 0; } } } } __device__ void CUDA_IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned int* stack, unsigned int* top, unsigned char* output, unsigned char* visited) { if (i < 0 || i >= height) return; if (j < 0 || j >= width) return; if (visited[i * width + j] == 1) return; visited[i * width + j] = 1; if (IS_STRONG_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 255; stack[*top] = i * width + j; (*top)++; } else if(IS_WEAK_EDGE(CUDA_GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 255; stack[*top] = i * width + j; (*top)++; } else { output[i * width + j] = 0; } } __device__ unsigned char CUDA_GetPixelVal(unsigned char* img, int width, int height, int i, int j) { if (i >= height || i < 0) return 0; else if (j >= width || j < 0) return 0; return *(img + i * width + j); } __device__ short GetGradientDirection(int sobel_x, int sobel_y) { short gradient = (atan2f(sobel_x, sobel_y) / 3.1415926 * 180.0); //gradient = gradient < 0 ? gradient + 180 : gradient; return gradient; } void DisplayGradient(short* gradient, int width, int height) { Mat img = Mat::zeros(Size(width, height), CV_8UC3); for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (abs(*(gradient + i * width + j)) >= 0 && abs(*(gradient + i * width + j)) < 45) { img.at<Vec3b>(i, j) = Vec3b(255, 0, 0); } else if (abs(*(gradient + i * width + j)) >= 45 && abs(*(gradient + i * width + j)) < 90) { img.at<Vec3b>(i, j) = Vec3b(0, 255, 0); } else if (abs(*(gradient + i * width + j)) >= 90 && abs(*(gradient + i * width + j)) < 135) { img.at<Vec3b>(i, j) = Vec3b(0, 0, 255); } else if (abs(*(gradient + i * width + j)) >= 135 && abs(*(gradient + i * width + j)) <= 180) { img.at<Vec3b>(i, j) = Vec3b(128, 128, 128); } } } imshow("gradient", img); } unsigned char GetPixelVal(unsigned char* img, int width, int height, int i, int j) { if (i >= height || i < 0) return 0; else if (j >= width || j < 0) return 0; return *(img + i * width + j); } void NonMaxSuppress(unsigned char* sobel, int width, int height, short* gradient, unsigned char* output) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int id = i * width + j; float weight = 0; int g0, g1, g2, g3; int temp_gradient = gradient[id] < 0 ? gradient[id] + 180 : gradient[id]; if (temp_gradient >= 0 && temp_gradient < 45) { weight = temp_gradient / 45.0; g0 = GetPixelVal(sobel, width, height, i, j + 1); g1 = GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = GetPixelVal(sobel, width, height, i, j - 1); g3 = GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 45 && temp_gradient < 90) { weight = (90 - temp_gradient) / 45.0; g0 = GetPixelVal(sobel, width, height, i - 1, j); g1 = GetPixelVal(sobel, width, height, i - 1, j + 1); g2 = GetPixelVal(sobel, width, height, i + 1, j); g3 = GetPixelVal(sobel, width, height, i + 1, j - 1); } else if (temp_gradient >= 90 && temp_gradient < 135) { weight = (temp_gradient - 90) / 45.0; g0 = GetPixelVal(sobel, width, height, i - 1, j); g1 = GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = GetPixelVal(sobel, width, height, i + 1, j); g3 = GetPixelVal(sobel, width, height, i + 1, j + 1); } else if (temp_gradient >= 135 && temp_gradient <= 180) { weight = (180 - temp_gradient) / 45.0; g0 = GetPixelVal(sobel, width, height, i, j - 1); g1 = GetPixelVal(sobel, width, height, i - 1, j - 1); g2 = GetPixelVal(sobel, width, height, i, j + 1); g3 = GetPixelVal(sobel, width, height, i + 1, j + 1); } int dot1 = g0 * (1 - weight) + g1 * weight; int dot2 = g2 * (1 - weight) + g3 * weight; if (sobel[id] > dot1 && sobel[id] > dot2) output[id] = sobel[id]; else output[id] = 0; } } } void DoubleThreshold(unsigned char* sobel, int width, int height, int min_val, int max_val, unsigned char* output) { unsigned short* weak_stack = new unsigned short[width * height]; unsigned short stack_top = 0; unsigned short center_index = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if (IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j))) { stack_top = 0; IsWeakEdge(sobel, width, height, min_val, max_val, i, j, weak_stack, &stack_top, output); while (stack_top > 0) { center_index = weak_stack[stack_top - 1]; stack_top--; IsWeakEdge(sobel, width, height, min_val, max_val, i - 1, j - 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i - 1, j , weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i - 1, j + 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i , j - 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i , j + 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i + 1, j - 1, weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i + 1, j , weak_stack, &stack_top, output); IsWeakEdge(sobel, width, height, min_val, max_val, i + 1, j + 1, weak_stack, &stack_top, output); } } else if (IS_NOT_EDGE(GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 0; } } } delete[] weak_stack; weak_stack = nullptr; } void IsWeakEdge(unsigned char* sobel, int width, int height, int min_val, int max_val, int i, int j, unsigned short* stack, unsigned short* top, unsigned char* output) { if (IS_WEAK_EDGE(GetPixelVal(sobel, width, height, i, j)) || IS_STRONG_EDGE(GetPixelVal(sobel, width, height, i, j))) { output[i * width + j] = 255; stack[*top] = i * width + j; *top++; } else { output[i * width + j] = 0; } }
1896f0ce74593a54da8fea2d28f45b5de2ecb5fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/filler_op.h" namespace caffe2 { namespace { __global__ void FillRangeKernel(const int n, float* data) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = index; } } } template <> bool RangeFillOp<float, CUDAContext>::Fill( TensorCUDA* output) { int N = output->size(); hipLaunchKernelGGL(( FillRangeKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, output->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp<int, CUDAContext>); REGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp<CUDAContext>); REGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp<float, CUDAContext>); } // namespace caffe2
1896f0ce74593a54da8fea2d28f45b5de2ecb5fd.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/filler_op.h" namespace caffe2 { namespace { __global__ void FillRangeKernel(const int n, float* data) { CUDA_1D_KERNEL_LOOP(index, n) { data[index] = index; } } } template <> bool RangeFillOp<float, CUDAContext>::Fill( TensorCUDA* output) { int N = output->size(); FillRangeKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, output->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp<int, CUDAContext>); REGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp<CUDAContext>); REGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp<float, CUDAContext>); } // namespace caffe2
f3ea23d2e836532fe75f1885a772aa1992bdb8d1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_utils.h> #include <gtest/gtest.h> #include <vector> #include <cuml/cluster/dbscan.hpp> #include <cuml/common/cuml_allocator.hpp> #include <cuml/cuml.hpp> #include <cuml/datasets/make_blobs.hpp> #include <cuml/metrics/metrics.hpp> #include "linalg/cublas_wrappers.h" #include "linalg/transpose.h" #include "ml_utils.h" #include "test_utils.h" #include "common/device_buffer.hpp" namespace ML { using namespace MLCommon; using namespace Datasets; using namespace Metrics; using namespace std; template <typename T, typename IdxT> struct DbscanInputs { IdxT n_row; IdxT n_col; IdxT n_centers; T cluster_std; T eps; int min_pts; size_t max_bytes_per_batch; unsigned long long int seed; }; template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const DbscanInputs<T, IdxT>& dims) { return os; } template <typename T, typename IdxT> class DbscanTest : public ::testing::TestWithParam<DbscanInputs<T, IdxT>> { protected: void basicTest() { cumlHandle handle; params = ::testing::TestWithParam<DbscanInputs<T, IdxT>>::GetParam(); device_buffer<T> out(handle.getDeviceAllocator(), handle.getStream(), params.n_row * params.n_col); device_buffer<IdxT> l(handle.getDeviceAllocator(), handle.getStream(), params.n_row); make_blobs(handle, out.data(), l.data(), params.n_row, params.n_col, params.n_centers, nullptr, nullptr, params.cluster_std, true, -10.0f, 10.0f, 1234ULL); allocate(labels, params.n_row); allocate(labels_ref, params.n_row); MLCommon::copy(labels_ref, l.data(), params.n_row, handle.getStream()); CUDA_CHECK(hipStreamSynchronize(handle.getStream())); dbscanFit(handle, out.data(), params.n_row, params.n_col, params.eps, params.min_pts, labels, params.max_bytes_per_batch, false); CUDA_CHECK(hipStreamSynchronize(handle.getStream())); score = adjustedRandIndex(handle, labels_ref, labels, params.n_row, 0, params.n_centers - 1); if (score < 1.0) { std::cout << "y: " << arr2Str(labels_ref, 25, "labels_ref", handle.getStream()) << std::endl; std::cout << "y_hat: " << arr2Str(labels, 25, "labels", handle.getStream()) << std::endl; std::cout << "Score = " << score << std::endl; } } void SetUp() override { basicTest(); } void TearDown() override { CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(labels_ref)); } protected: DbscanInputs<T, IdxT> params; IdxT *labels, *labels_ref; double score; }; const std::vector<DbscanInputs<float, int>> inputsf2 = { {50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {50000, 16, 5l, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}}; const std::vector<DbscanInputs<float, int64_t>> inputsf3 = { {50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {50000, 16, 5l, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}}; const std::vector<DbscanInputs<double, int>> inputsd2 = { {50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {100, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}}; const std::vector<DbscanInputs<double, int64_t>> inputsd3 = { {50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {100, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}}; typedef DbscanTest<float, int> DbscanTestF_Int; TEST_P(DbscanTestF_Int, Result) { ASSERT_TRUE(score == 1.0); } typedef DbscanTest<float, int64_t> DbscanTestF_Int64; TEST_P(DbscanTestF_Int64, Result) { ASSERT_TRUE(score == 1.0); } typedef DbscanTest<double, int> DbscanTestD_Int; TEST_P(DbscanTestD_Int, Result) { ASSERT_TRUE(score == 1.0); } typedef DbscanTest<double, int64_t> DbscanTestD_Int64; TEST_P(DbscanTestD_Int64, Result) { ASSERT_TRUE(score == 1.0); } INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int64, ::testing::ValuesIn(inputsf3)); INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int64, ::testing::ValuesIn(inputsd3)); } // end namespace ML
f3ea23d2e836532fe75f1885a772aa1992bdb8d1.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_utils.h> #include <gtest/gtest.h> #include <vector> #include <cuml/cluster/dbscan.hpp> #include <cuml/common/cuml_allocator.hpp> #include <cuml/cuml.hpp> #include <cuml/datasets/make_blobs.hpp> #include <cuml/metrics/metrics.hpp> #include "linalg/cublas_wrappers.h" #include "linalg/transpose.h" #include "ml_utils.h" #include "test_utils.h" #include "common/device_buffer.hpp" namespace ML { using namespace MLCommon; using namespace Datasets; using namespace Metrics; using namespace std; template <typename T, typename IdxT> struct DbscanInputs { IdxT n_row; IdxT n_col; IdxT n_centers; T cluster_std; T eps; int min_pts; size_t max_bytes_per_batch; unsigned long long int seed; }; template <typename T, typename IdxT> ::std::ostream& operator<<(::std::ostream& os, const DbscanInputs<T, IdxT>& dims) { return os; } template <typename T, typename IdxT> class DbscanTest : public ::testing::TestWithParam<DbscanInputs<T, IdxT>> { protected: void basicTest() { cumlHandle handle; params = ::testing::TestWithParam<DbscanInputs<T, IdxT>>::GetParam(); device_buffer<T> out(handle.getDeviceAllocator(), handle.getStream(), params.n_row * params.n_col); device_buffer<IdxT> l(handle.getDeviceAllocator(), handle.getStream(), params.n_row); make_blobs(handle, out.data(), l.data(), params.n_row, params.n_col, params.n_centers, nullptr, nullptr, params.cluster_std, true, -10.0f, 10.0f, 1234ULL); allocate(labels, params.n_row); allocate(labels_ref, params.n_row); MLCommon::copy(labels_ref, l.data(), params.n_row, handle.getStream()); CUDA_CHECK(cudaStreamSynchronize(handle.getStream())); dbscanFit(handle, out.data(), params.n_row, params.n_col, params.eps, params.min_pts, labels, params.max_bytes_per_batch, false); CUDA_CHECK(cudaStreamSynchronize(handle.getStream())); score = adjustedRandIndex(handle, labels_ref, labels, params.n_row, 0, params.n_centers - 1); if (score < 1.0) { std::cout << "y: " << arr2Str(labels_ref, 25, "labels_ref", handle.getStream()) << std::endl; std::cout << "y_hat: " << arr2Str(labels, 25, "labels", handle.getStream()) << std::endl; std::cout << "Score = " << score << std::endl; } } void SetUp() override { basicTest(); } void TearDown() override { CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(labels_ref)); } protected: DbscanInputs<T, IdxT> params; IdxT *labels, *labels_ref; double score; }; const std::vector<DbscanInputs<float, int>> inputsf2 = { {50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {50000, 16, 5l, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}}; const std::vector<DbscanInputs<float, int64_t>> inputsf3 = { {50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {50000, 16, 5l, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}}; const std::vector<DbscanInputs<double, int>> inputsd2 = { {50000, 16, 5, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {100, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)13e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)13e3, 1234ULL}}; const std::vector<DbscanInputs<double, int64_t>> inputsd3 = { {50000, 16, 5, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {500, 16, 5, 0.01, 2, 2, (size_t)100, 1234ULL}, {1000, 1000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {100, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 10000, 10, 0.01, 2, 2, (size_t)9e3, 1234ULL}, {20000, 100, 5000, 0.01, 2, 2, (size_t)9e3, 1234ULL}}; typedef DbscanTest<float, int> DbscanTestF_Int; TEST_P(DbscanTestF_Int, Result) { ASSERT_TRUE(score == 1.0); } typedef DbscanTest<float, int64_t> DbscanTestF_Int64; TEST_P(DbscanTestF_Int64, Result) { ASSERT_TRUE(score == 1.0); } typedef DbscanTest<double, int> DbscanTestD_Int; TEST_P(DbscanTestD_Int, Result) { ASSERT_TRUE(score == 1.0); } typedef DbscanTest<double, int64_t> DbscanTestD_Int64; TEST_P(DbscanTestD_Int64, Result) { ASSERT_TRUE(score == 1.0); } INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestF_Int64, ::testing::ValuesIn(inputsf3)); INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int, ::testing::ValuesIn(inputsd2)); INSTANTIATE_TEST_CASE_P(DbscanTests, DbscanTestD_Int64, ::testing::ValuesIn(inputsd3)); } // end namespace ML
890514075e66bf4c72a07820794522f4f6212ad7.hip
// !!! This is a file automatically generated by hipify!!! #include "catch.hpp" #include <thrust/device_vector.h> //---------------------------------------------------------------------------------------- #define TEST_CUDA_CHECK_RETURN //---------------------------------------------------------------------------------------- #include "BaseCudaTestHandler.h" #include "../GPUPatternMining/Entities/TypeCount.h" #include "../GPUPatternMining/Prevalence/AnyLengthInstancesUniquePrevalenceProvider.h" //---------------------------------------------------------------------------------------- TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | init") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 2)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | simple 0") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 2)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { {0xA, 0xB} }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { {0xA0000},{ 0xB0000 } }; std::vector<unsigned int> itCliqueId = { 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | simple 1") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 2)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 } ,{ 0xB0000 },{ 0xB0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | simple 2") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 } ,{ 0xB0000 },{ 0xB0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 0") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } , { 0xA, 0xC } , { 0xB, 0xC } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 },{ 0xA0000 },{ 0xB0000 } ,{ 0xB0000 },{ 0xB0001 },{ 0xC0001 }, { 0xC0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0, 1, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f, 0.5f, 0.25f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 1") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } ,{ 0xA, 0xC } ,{ 0xB, 0xC } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 },{ 0xB0000 } ,{ 0xB0000 },{ 0xB0001 }, { 0xC0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; CUDA_CHECK_RETURN(hipDeviceSynchronize()); auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f, 0.f, 0.25f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 2") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 4)); counts->push_back(TypeCount(0xD, 5)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB, 0xC } ,{ 0xA, 0xC, 0xD } ,{ 0xB, 0xC, 0xD } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 },{ 0xA0000 },{ 0xB0000 } ,{ 0xB0000 },{ 0xB0001 },{ 0xC0001 },{ 0xC0001 } ,{ 0xC0000 },{ 0xC0000 },{ 0xD0001 },{ 0xD0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0, 1, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.25f, 0.2f, 0.2f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 3") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 4)); counts->push_back(TypeCount(0xD, 5)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB, 0xC } ,{ 0xA, 0xC, 0xD } ,{ 0xB, 0xC, 0xD } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xB0000 } ,{ 0xC0001 },{ 0xC0001 } ,{ 0xD0001 },{ 0xD0001 } }; std::vector<unsigned int> itCliqueId = { 1, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.0f, 0.2f, 0.2f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 4") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 4)); counts->push_back(TypeCount(0xD, 5)); counts->push_back(TypeCount(0xE, 5)); counts->push_back(TypeCount(0xF, 5)); counts->push_back(TypeCount(0x10, 10)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0x10 } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 } ,{ 0xB0001 } ,{ 0xC0001 } ,{ 0xD0001 } ,{ 0xE0001 } ,{ 0xF0001 } ,{ 0x100001 } }; std::vector<unsigned int> itCliqueId = { 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.1f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(hipDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); }
890514075e66bf4c72a07820794522f4f6212ad7.cu
#include "catch.hpp" #include <thrust/device_vector.h> //---------------------------------------------------------------------------------------- #define TEST_CUDA_CHECK_RETURN //---------------------------------------------------------------------------------------- #include "BaseCudaTestHandler.h" #include "../GPUPatternMining/Entities/TypeCount.h" #include "../GPUPatternMining/Prevalence/AnyLengthInstancesUniquePrevalenceProvider.h" //---------------------------------------------------------------------------------------- TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | init") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 2)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | simple 0") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 2)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { {0xA, 0xB} }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { {0xA0000},{ 0xB0000 } }; std::vector<unsigned int> itCliqueId = { 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | simple 1") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 2)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 } ,{ 0xB0000 },{ 0xB0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | simple 2") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 } ,{ 0xB0000 },{ 0xB0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 0") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } , { 0xA, 0xC } , { 0xB, 0xC } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 },{ 0xA0000 },{ 0xB0000 } ,{ 0xB0000 },{ 0xB0001 },{ 0xC0001 }, { 0xC0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0, 1, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f, 0.5f, 0.25f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 1") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 2)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB } ,{ 0xA, 0xC } ,{ 0xB, 0xC } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 },{ 0xB0000 } ,{ 0xB0000 },{ 0xB0001 }, { 0xC0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.5f, 0.f, 0.25f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 2") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 4)); counts->push_back(TypeCount(0xD, 5)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB, 0xC } ,{ 0xA, 0xC, 0xD } ,{ 0xB, 0xC, 0xD } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xA0000 },{ 0xA0000 },{ 0xB0000 } ,{ 0xB0000 },{ 0xB0001 },{ 0xC0001 },{ 0xC0001 } ,{ 0xC0000 },{ 0xC0000 },{ 0xD0001 },{ 0xD0001 } }; std::vector<unsigned int> itCliqueId = { 0, 0, 1, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.25f, 0.2f, 0.2f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 3") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 4)); counts->push_back(TypeCount(0xD, 5)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB, 0xC } ,{ 0xA, 0xC, 0xD } ,{ 0xB, 0xC, 0xD } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 },{ 0xB0000 } ,{ 0xC0001 },{ 0xC0001 } ,{ 0xD0001 },{ 0xD0001 } }; std::vector<unsigned int> itCliqueId = { 1, 2 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.0f, 0.2f, 0.2f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); } TEST_CASE_METHOD(BaseCudaTestHandler, "AnyLengthInstancesUniquePrevalenceProvider | multiple types 4") { auto counts = std::make_shared<TypesCounts>(); counts->push_back(TypeCount(0xA, 2)); counts->push_back(TypeCount(0xB, 4)); counts->push_back(TypeCount(0xC, 4)); counts->push_back(TypeCount(0xD, 5)); counts->push_back(TypeCount(0xE, 5)); counts->push_back(TypeCount(0xF, 5)); counts->push_back(TypeCount(0x10, 10)); auto keyProc = std::make_shared<GPUUIntKeyProcessor>(); auto typesCountsMap = getGpuTypesCountsMap(counts, keyProc.get()); auto counter = AnyLengthInstancesUniquePrevalenceProvider(typesCountsMap); CliquesCandidates candidates = { { 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0x10 } }; auto gpuCandidates = Entities::moveCliquesCandidatesToGpu(candidates); auto instanceTreeResult = std::make_shared<InstanceTree::InstanceTreeResult>(); std::vector<FeatureInstance> itInstances = { { 0xA0000 } ,{ 0xB0001 } ,{ 0xC0001 } ,{ 0xD0001 } ,{ 0xE0001 } ,{ 0xF0001 } ,{ 0x100001 } }; std::vector<unsigned int> itCliqueId = { 0 }; instanceTreeResult->instances = itInstances; instanceTreeResult->instancesCliqueId = itCliqueId; auto result = counter.getPrevalenceFromCandidatesInstances( gpuCandidates , instanceTreeResult ); std::vector<float> expected = { 0.1f }; thrust::host_vector<float> hResult = *result; CUDA_CHECK_RETURN(cudaDeviceSynchronize()); REQUIRE(std::equal(expected.begin(), expected.end(), hResult.begin())); }
c2efadd2acad18b834bd18bbaf1961fa5aa5d367.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "reference_calc.cpp" #include "utils.h" #define NUM_THREADS 1024 __global__ void reduce_min_kernel(const float* g_in, float* g_out, long int size) { extern __shared__ float sdata_min[]; int tid = threadIdx.x; int i = blockDim.x*blockIdx.x + tid; if(i>=size) return; sdata_min[tid] = g_in[i]; __syncthreads(); for(int s = blockDim.x/2; s > 0; s>>=1){ if(tid < s){ sdata_min[tid] = min(sdata_min[tid], sdata_min[tid+s]); } __syncthreads(); } if(tid == 0) g_out[blockIdx.x] = sdata_min[0]; } __global__ void reduce_max_kernel(const float* g_in, float* g_out, long int size) { extern __shared__ float sdata_min[]; int tid = threadIdx.x; int i = blockDim.x*blockIdx.x + tid; if(i>=size) return; sdata_min[tid] = g_in[i]; __syncthreads(); for(int s = blockDim.x/2; s > 0; s>>=1){ if(tid < s){ sdata_min[tid] = max(sdata_min[tid], sdata_min[tid+s]); } __syncthreads(); } if(tid == 0) g_out[blockIdx.x] = sdata_min[0]; } __global__ void histogram_kernel(const float* const d_logLuminance, unsigned int* d_histogram, float lumRange, float lumMin, int numBins) { int myId = threadIdx.x + blockIdx.x*blockDim.x; int bin = (d_logLuminance[myId] - lumMin)/lumRange * numBins; if (bin == numBins){ bin = bin-1; } atomicAdd(&(d_histogram[bin]),1); } __global__ void scan_kernel(unsigned int* out, unsigned int* in, const int n) { extern __shared__ float temp[]; int tid = threadIdx.x; temp[tid] = (tid>0) ? in[tid-1] : 0; __syncthreads(); for(int offset = 1; offset < n; offset *=2) { if(tid>=offset) temp[tid] += temp[tid - offset]; else temp[tid] = temp[tid]; } __syncthreads(); out[tid] = temp[tid]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum*/ float* d_inter_min; float* d_out_min; float* d_inter_max; float* d_out_max; const int maxThreads = 1024; int numBlocks = ((numCols*numRows)+maxThreads-1)/maxThreads; hipMalloc((void**) &d_inter_min, numBlocks*sizeof(float)); hipMalloc((void**) &d_out_min, sizeof(float)); hipMalloc((void**) &d_inter_max, numBlocks*sizeof(float)); hipMalloc((void**) &d_out_max, sizeof(float)); const dim3 gridSize = numBlocks; const dim3 blockSize = maxThreads; long int size1 = numRows*numCols; long int size2 = numBlocks; hipLaunchKernelGGL(( reduce_min_kernel), dim3(gridSize), dim3(blockSize), maxThreads*sizeof(float), 0, d_logLuminance, d_inter_min, size1); hipLaunchKernelGGL(( reduce_min_kernel), dim3(1), dim3(gridSize), numBlocks*sizeof(float), 0, d_inter_min, d_out_min, size2); hipMemcpy(&min_logLum, d_out_min, sizeof(float), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( reduce_max_kernel), dim3(gridSize), dim3(blockSize), maxThreads*sizeof(float), 0, d_logLuminance, d_inter_max, size1); hipLaunchKernelGGL(( reduce_max_kernel), dim3(1), dim3(gridSize), numBlocks*sizeof(float), 0, d_inter_max, d_out_max, size2); hipMemcpy(&max_logLum, d_out_max, sizeof(float), hipMemcpyDeviceToHost); float lumRange; lumRange = max_logLum - min_logLum; /* 2) subtract them to find the range*/ //float lumRange = max_logLum-min_logLum; std::cout << "Max, Min" << std::endl; std::cout << max_logLum << ", " << min_logLum << std::endl; std::cout << "The range" << std::endl; std::cout << lumRange << std::endl; /* 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins*/ const dim3 blockSizeHistogram(NUM_THREADS,1,1); const dim3 gridSizeHistogram( (numCols*numRows + blockSizeHistogram.x -1)/blockSizeHistogram.x,1,1); unsigned int *d_histogram; /* Move lumRange over to GPU checkCudaErrors(hipMalloc((void**) &d_lumRange, sizeof(float))); checkCudaErrors(hipMemcpy(d_lumRange, h_lumRange,sizeof(float), hipMemcpyHostToDevice));*/ // Allocate memory for d_histogram and initialize to 0. checkCudaErrors(hipMalloc((void**)&d_histogram,sizeof(unsigned int)*numBins)); checkCudaErrors(hipMemset(d_histogram,0,sizeof(unsigned int)*numBins)); // Launch histogram_kernel on the Device hipLaunchKernelGGL(( histogram_kernel), dim3(gridSizeHistogram),dim3(blockSizeHistogram), 0, 0, d_logLuminance, d_histogram, lumRange, min_logLum, numBins); /* 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ const dim3 blockSizeScan(numBins,1,1); const dim3 gridSizeScan(1,1,1); hipLaunchKernelGGL(( scan_kernel), dim3(gridSizeScan),dim3(blockSizeScan),sizeof(float)*blockSizeScan.x, 0, d_cdf, d_histogram, numBins); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); /* checkCudaErrors(hipFree(d_histogram)); checkCudaErrors(hipFree(d_max_inter)); checkCudaErrors(hipFree(d_max)); checkCudaErrors(hipFree(d_min_inter)); checkCudaErrors(hipFree(d_min));*/ }
c2efadd2acad18b834bd18bbaf1961fa5aa5d367.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Definition Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "reference_calc.cpp" #include "utils.h" #define NUM_THREADS 1024 __global__ void reduce_min_kernel(const float* g_in, float* g_out, long int size) { extern __shared__ float sdata_min[]; int tid = threadIdx.x; int i = blockDim.x*blockIdx.x + tid; if(i>=size) return; sdata_min[tid] = g_in[i]; __syncthreads(); for(int s = blockDim.x/2; s > 0; s>>=1){ if(tid < s){ sdata_min[tid] = min(sdata_min[tid], sdata_min[tid+s]); } __syncthreads(); } if(tid == 0) g_out[blockIdx.x] = sdata_min[0]; } __global__ void reduce_max_kernel(const float* g_in, float* g_out, long int size) { extern __shared__ float sdata_min[]; int tid = threadIdx.x; int i = blockDim.x*blockIdx.x + tid; if(i>=size) return; sdata_min[tid] = g_in[i]; __syncthreads(); for(int s = blockDim.x/2; s > 0; s>>=1){ if(tid < s){ sdata_min[tid] = max(sdata_min[tid], sdata_min[tid+s]); } __syncthreads(); } if(tid == 0) g_out[blockIdx.x] = sdata_min[0]; } __global__ void histogram_kernel(const float* const d_logLuminance, unsigned int* d_histogram, float lumRange, float lumMin, int numBins) { int myId = threadIdx.x + blockIdx.x*blockDim.x; int bin = (d_logLuminance[myId] - lumMin)/lumRange * numBins; if (bin == numBins){ bin = bin-1; } atomicAdd(&(d_histogram[bin]),1); } __global__ void scan_kernel(unsigned int* out, unsigned int* in, const int n) { extern __shared__ float temp[]; int tid = threadIdx.x; temp[tid] = (tid>0) ? in[tid-1] : 0; __syncthreads(); for(int offset = 1; offset < n; offset *=2) { if(tid>=offset) temp[tid] += temp[tid - offset]; else temp[tid] = temp[tid]; } __syncthreads(); out[tid] = temp[tid]; } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum*/ float* d_inter_min; float* d_out_min; float* d_inter_max; float* d_out_max; const int maxThreads = 1024; int numBlocks = ((numCols*numRows)+maxThreads-1)/maxThreads; cudaMalloc((void**) &d_inter_min, numBlocks*sizeof(float)); cudaMalloc((void**) &d_out_min, sizeof(float)); cudaMalloc((void**) &d_inter_max, numBlocks*sizeof(float)); cudaMalloc((void**) &d_out_max, sizeof(float)); const dim3 gridSize = numBlocks; const dim3 blockSize = maxThreads; long int size1 = numRows*numCols; long int size2 = numBlocks; reduce_min_kernel<<<gridSize, blockSize, maxThreads*sizeof(float)>>>(d_logLuminance, d_inter_min, size1); reduce_min_kernel<<<1, gridSize, numBlocks*sizeof(float)>>>(d_inter_min, d_out_min, size2); cudaMemcpy(&min_logLum, d_out_min, sizeof(float), cudaMemcpyDeviceToHost); reduce_max_kernel<<<gridSize, blockSize, maxThreads*sizeof(float)>>>(d_logLuminance, d_inter_max, size1); reduce_max_kernel<<<1, gridSize, numBlocks*sizeof(float)>>>(d_inter_max, d_out_max, size2); cudaMemcpy(&max_logLum, d_out_max, sizeof(float), cudaMemcpyDeviceToHost); float lumRange; lumRange = max_logLum - min_logLum; /* 2) subtract them to find the range*/ //float lumRange = max_logLum-min_logLum; std::cout << "Max, Min" << std::endl; std::cout << max_logLum << ", " << min_logLum << std::endl; std::cout << "The range" << std::endl; std::cout << lumRange << std::endl; /* 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins*/ const dim3 blockSizeHistogram(NUM_THREADS,1,1); const dim3 gridSizeHistogram( (numCols*numRows + blockSizeHistogram.x -1)/blockSizeHistogram.x,1,1); unsigned int *d_histogram; /* Move lumRange over to GPU checkCudaErrors(cudaMalloc((void**) &d_lumRange, sizeof(float))); checkCudaErrors(cudaMemcpy(d_lumRange, h_lumRange,sizeof(float), cudaMemcpyHostToDevice));*/ // Allocate memory for d_histogram and initialize to 0. checkCudaErrors(cudaMalloc((void**)&d_histogram,sizeof(unsigned int)*numBins)); checkCudaErrors(cudaMemset(d_histogram,0,sizeof(unsigned int)*numBins)); // Launch histogram_kernel on the Device histogram_kernel<<<gridSizeHistogram,blockSizeHistogram>>>(d_logLuminance, d_histogram, lumRange, min_logLum, numBins); /* 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ const dim3 blockSizeScan(numBins,1,1); const dim3 gridSizeScan(1,1,1); scan_kernel<<<gridSizeScan,blockSizeScan,sizeof(float)*blockSizeScan.x>>>(d_cdf, d_histogram, numBins); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); /* checkCudaErrors(cudaFree(d_histogram)); checkCudaErrors(cudaFree(d_max_inter)); checkCudaErrors(cudaFree(d_max)); checkCudaErrors(cudaFree(d_min_inter)); checkCudaErrors(cudaFree(d_min));*/ }
bb7674ecd4b6f0ab52fed40f1bd47fb928a05452.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> // function for checking the CUDA runtime API results. inline void checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { printf_s("Error: %s : %d", __FILE__, __LINE__); printf_s("CUDA Runtime Error: %d: %s\n", result, hipGetErrorString(result)); exit(1); } #endif } /** * Utility function for printing the contents of an array. **/ static void print_read_results(int *h_arr, int *d_arr, int N, const char *label) { int i; int maxNumToPrint = 10; int nToPrint = N > maxNumToPrint ? maxNumToPrint : N; checkCuda(hipMemcpy(h_arr, d_arr, nToPrint * sizeof(int), hipMemcpyDeviceToHost)); printf_s("Threads performing %s operations read values", label); for (i = 0; i < nToPrint; i++) { printf_s(" %d", h_arr[i]); } printf_s("\n"); } /** * This version of the kernel uses atomic operations to safely increment a * shared variable from multiple threads. **/ __global__ void atomics(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; values_read[tid] = atomicAdd(shared_var, 1); for (i = 0; i < iters; i++) { atomicAdd(shared_var, 1); } } /** * This version of the kernel performs the same increments as atomics() but in * an unsafe manner. **/ __global__ void unsafe(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; int old = *shared_var; *shared_var = old + 1; values_read[tid] = old; for (i = 0; i < iters; i++) { int old = *shared_var; *shared_var = old + 1; } } int main(int argc, char **argv) { int N = 64; int block = 32; int runs = 30; int iters = 100000; int r; int *d_shared_var; int h_shared_var_atomic, h_shared_var_unsafe; int *d_values_read_atomic; int *d_values_read_unsafe; int *h_values_read; checkCuda(hipMalloc((void **)&d_shared_var, sizeof(int))); checkCuda(hipMalloc((void **)&d_values_read_atomic, N * sizeof(int))); checkCuda(hipMalloc((void **)&d_values_read_unsafe, N * sizeof(int))); h_values_read = (int *)malloc(N * sizeof(int)); for (r = 0; r < runs; r++) { checkCuda(hipMemset(d_shared_var, 0x00, sizeof(int))); hipLaunchKernelGGL(( atomics) , dim3(N / block), dim3(block) , 0, 0, d_shared_var, d_values_read_atomic, N, iters); checkCuda(hipDeviceSynchronize()); checkCuda(hipMemcpy(&h_shared_var_atomic, d_shared_var, sizeof(int), hipMemcpyDeviceToHost)); checkCuda(hipMemset(d_shared_var, 0x00, sizeof(int))); hipLaunchKernelGGL(( unsafe) , dim3(N / block), dim3(block) , 0, 0, d_shared_var, d_values_read_unsafe, N, iters); checkCuda(hipDeviceSynchronize()); checkCuda(hipMemcpy(&h_shared_var_unsafe, d_shared_var, sizeof(int), hipMemcpyDeviceToHost)); } printf_s("In total, %d runs using atomic operations\n",runs); printf_s(" Using atomic operations also produced an output of %d\n", h_shared_var_atomic); printf_s("In total, %d runs using unsafe operations\n", runs); printf_s(" Using unsafe operations also produced an output of %d\n", h_shared_var_unsafe); print_read_results(h_values_read, d_values_read_atomic, N, "atomic"); print_read_results(h_values_read, d_values_read_unsafe, N, "unsafe"); return 0; }
bb7674ecd4b6f0ab52fed40f1bd47fb928a05452.cu
#include <stdio.h> #include <stdlib.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> // function for checking the CUDA runtime API results. inline void checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { printf_s("Error: %s : %d", __FILE__, __LINE__); printf_s("CUDA Runtime Error: %d: %s\n", result, cudaGetErrorString(result)); exit(1); } #endif } /** * Utility function for printing the contents of an array. **/ static void print_read_results(int *h_arr, int *d_arr, int N, const char *label) { int i; int maxNumToPrint = 10; int nToPrint = N > maxNumToPrint ? maxNumToPrint : N; checkCuda(cudaMemcpy(h_arr, d_arr, nToPrint * sizeof(int), cudaMemcpyDeviceToHost)); printf_s("Threads performing %s operations read values", label); for (i = 0; i < nToPrint; i++) { printf_s(" %d", h_arr[i]); } printf_s("\n"); } /** * This version of the kernel uses atomic operations to safely increment a * shared variable from multiple threads. **/ __global__ void atomics(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; values_read[tid] = atomicAdd(shared_var, 1); for (i = 0; i < iters; i++) { atomicAdd(shared_var, 1); } } /** * This version of the kernel performs the same increments as atomics() but in * an unsafe manner. **/ __global__ void unsafe(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; int old = *shared_var; *shared_var = old + 1; values_read[tid] = old; for (i = 0; i < iters; i++) { int old = *shared_var; *shared_var = old + 1; } } int main(int argc, char **argv) { int N = 64; int block = 32; int runs = 30; int iters = 100000; int r; int *d_shared_var; int h_shared_var_atomic, h_shared_var_unsafe; int *d_values_read_atomic; int *d_values_read_unsafe; int *h_values_read; checkCuda(cudaMalloc((void **)&d_shared_var, sizeof(int))); checkCuda(cudaMalloc((void **)&d_values_read_atomic, N * sizeof(int))); checkCuda(cudaMalloc((void **)&d_values_read_unsafe, N * sizeof(int))); h_values_read = (int *)malloc(N * sizeof(int)); for (r = 0; r < runs; r++) { checkCuda(cudaMemset(d_shared_var, 0x00, sizeof(int))); atomics <<<N / block, block >>>(d_shared_var, d_values_read_atomic, N, iters); checkCuda(cudaDeviceSynchronize()); checkCuda(cudaMemcpy(&h_shared_var_atomic, d_shared_var, sizeof(int), cudaMemcpyDeviceToHost)); checkCuda(cudaMemset(d_shared_var, 0x00, sizeof(int))); unsafe <<<N / block, block >>>(d_shared_var, d_values_read_unsafe, N, iters); checkCuda(cudaDeviceSynchronize()); checkCuda(cudaMemcpy(&h_shared_var_unsafe, d_shared_var, sizeof(int), cudaMemcpyDeviceToHost)); } printf_s("In total, %d runs using atomic operations\n",runs); printf_s(" Using atomic operations also produced an output of %d\n", h_shared_var_atomic); printf_s("In total, %d runs using unsafe operations\n", runs); printf_s(" Using unsafe operations also produced an output of %d\n", h_shared_var_unsafe); print_read_results(h_values_read, d_values_read_atomic, N, "atomic"); print_read_results(h_values_read, d_values_read_unsafe, N, "unsafe"); return 0; }
3afaf1bf27abaf77fdbd8287bf8c846e73c6a50e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> #include <iostream> #include <fstream> #include <sstream> #include <float.h> #include <math.h> #ifndef OUT_OF_BOUNDS_LABEL #define OUT_OF_BOUNDS_LABEL -1 #endif #ifndef BAD_TOPOLOGY_LABEL #define BAD_TOPOLOGY_LABEL -2 #endif #ifndef NUM_OF_CHANNELS #define NUM_OF_CHANNELS 3 #endif #ifndef USE_COUNTS #define USE_COUNTS 1 #endif #ifndef OUT_OF_BOUNDS_LABEL #define OUT_OF_BOUNDS_LABEL -1 #endif #define THREADS_PER_BLOCK 512 #include "s_m.h" #include "sp.h" #include <stdio.h> #ifndef WIN32 #include <unistd.h> #endif int tresh = -2; __device__ volatile int sem = 0; __device__ void acquire_semaphore(volatile int *lock){ while (atomicCAS((int *)lock, 0, 1) != 0); } __device__ void release_semaphore(volatile int *lock){ *lock = 0; __threadfence(); } __device__ __forceinline__ float atomicMaxFloat (float * addr, float value) { float old; old = (value >= 0) ? __int_as_float(atomicMax((int *)addr, __float_as_int(value))) : __uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value))); return old; } __device__ int mLock=0; __host__ void CudaCalcMergeCandidate(const float* image_gpu_double, int* split_merge_pairs, int* seg, bool* border, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim, const int nSPs_buffer, const int change, float i_std, float alpha){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); int num_block2 = ceil( double(nSPs_buffer) / double(THREADS_PER_BLOCK) ); dim3 BlockPerGrid2(num_block2,1); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); float a0 = 10000; float b0 = i_std * (a0) ; //b0 = 0.05*0.05*a0; int* mutex ; float alpha_hasting_ratio = alpha; hipMalloc((void **)&mutex, sizeof(int)); hipMemset(mutex, 0, sizeof(int)); hipLaunchKernelGGL(( init_sm), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, image_gpu_double,seg,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim,split_merge_pairs); hipLaunchKernelGGL(( calc_merge_candidate), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg,border,split_merge_pairs,nPixels, xdim, ydim, change); hipLaunchKernelGGL(( sum_by_label_sm), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, image_gpu_double,seg,sp_params,sp_gpu_helper_sm, nPixels, xdim); hipLaunchKernelGGL(( calc_bn), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, seg, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, b0); hipLaunchKernelGGL(( calc_marginal_liklelyhoood_of_sp), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer , a0, b0); hipLaunchKernelGGL(( calc_hasting_ratio), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, a0, b0, alpha_hasting_ratio, mutex); hipLaunchKernelGGL(( calc_hasting_ratio2), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, a0, b0, alpha_hasting_ratio, mutex); hipLaunchKernelGGL(( remove_sp), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, split_merge_pairs,sp_params,sp_gpu_helper_sm,nSPs_buffer); hipLaunchKernelGGL(( merge_sp), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg,border, split_merge_pairs, sp_params, sp_gpu_helper_sm, nPixels, xdim, ydim); } __host__ int CudaCalcSplitCandidate(const float* image_gpu_double, int* split_merge_pairs, int* seg, bool* border, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim, const int nSPs_buffer, int* seg_split1 ,int* seg_split2, int* seg_split3, int max_SP, int count, float i_std, float alpha){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); int num_block2 = ceil( double(nSPs_buffer) / double(THREADS_PER_BLOCK) ); dim3 BlockPerGrid2(num_block2,1); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); float a0 = 10000; float b0 = i_std * (a0) ; float alpha_hasting_ratio = alpha; int* mutex_2; int done = 1; int* max_sp; hipMalloc((void **)&max_sp, sizeof(int)); hipMalloc((void **)&mutex_2, sizeof(int)); // malloc of single value is also important int distance = 1; int offset = count%2+1; hipMemset(seg_split1, 0, nPixels*sizeof(int)); hipMemset(seg_split2, 0, nPixels*sizeof(int)); hipLaunchKernelGGL(( init_sm), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, image_gpu_double,seg,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim,split_merge_pairs); hipLaunchKernelGGL(( init_split), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, border,seg_split1,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim, ydim, offset , seg, max_sp, max_SP); hipLaunchKernelGGL(( init_split), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, border,seg_split2,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim,ydim, -offset, seg,max_sp, max_SP); hipLaunchKernelGGL(( split_sp), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg,seg_split1, split_merge_pairs, sp_params, sp_gpu_helper_sm, nPixels, xdim, ydim, max_SP); while(done) { hipMemset(mutex_2, 0, sizeof(int)); hipMemcpy(&done, mutex_2, sizeof(int), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( calc_split_candidate), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg_split1,border,distance, mutex_2, nPixels, xdim, ydim); distance++; hipMemcpy(&done, mutex_2, sizeof(int), hipMemcpyDeviceToHost); } done =1; distance = 1; while(done) { hipMemset(mutex_2, 0, sizeof(int)); hipMemcpy(&done, mutex_2, sizeof(int), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( calc_split_candidate), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg_split2 ,border,distance, mutex_2, nPixels, xdim, ydim); distance++; hipMemcpy(&done, mutex_2, sizeof(int), hipMemcpyDeviceToHost); } hipLaunchKernelGGL(( calc_seg_split), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg_split1,seg_split2, seg, seg_split3, nPixels, max_SP); hipLaunchKernelGGL(( sum_by_label_split), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, image_gpu_double,seg_split1,sp_params,sp_gpu_helper_sm, nPixels, xdim,max_SP); hipLaunchKernelGGL(( calc_bn_split), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, seg_split3, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, b0, max_SP); hipLaunchKernelGGL(( calc_marginal_liklelyhoood_of_sp_split), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer , a0, b0, max_SP); hipLaunchKernelGGL(( calc_hasting_ratio_split), dim3(BlockPerGrid2),dim3(ThreadPerBlock), 0, 0, image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, a0, b0, alpha_hasting_ratio, 0,max_SP, max_sp); hipLaunchKernelGGL(( split_sp), dim3(BlockPerGrid),dim3(ThreadPerBlock), 0, 0, seg,seg_split1, split_merge_pairs, sp_params, sp_gpu_helper_sm, nPixels, xdim, ydim, max_SP); hipMemcpy(&max_SP, max_sp, sizeof(int), hipMemcpyDeviceToHost); hipFree(max_sp); hipFree(mutex_2); return max_SP; } __global__ void init_sm(const float* image_gpu_double, const int* seg_gpu, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nsuperpixel_buffer, const int xdim,int* split_merge_pairs) { int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; //if (sp_params[k].valid == 0) return; sp_gpu_helper_sm[k].b_n.x = 0; sp_gpu_helper_sm[k].b_n.y = 0; sp_gpu_helper_sm[k].b_n.z = 0; sp_gpu_helper_sm[k].squares_i.x = 0; sp_gpu_helper_sm[k].squares_i.y = 0; sp_gpu_helper_sm[k].squares_i.z = 0; sp_gpu_helper_sm[k].mu_i_sum.x = 0; sp_gpu_helper_sm[k].mu_i_sum.y = 0; sp_gpu_helper_sm[k].mu_i_sum.z = 0; sp_gpu_helper_sm[k].count_f = 0; sp_gpu_helper_sm[k].count = 0; sp_gpu_helper_sm[k].hasting = -999999; //sp_params[k].count = 0; sp_gpu_helper_sm[k].merge = false; sp_gpu_helper_sm[k].remove = false; split_merge_pairs[k*2+1] = 0; split_merge_pairs[k*2] = 0; } __global__ void calc_merge_candidate(int* seg, bool* border, int* split_merge_pairs, const int nPixels, const int xdim, const int ydim, const int change){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; if(!border[idx]) return; int x = idx % xdim; int y = idx / xdim; int C = seg[idx]; // center int W; // north, south, east,west W = OUT_OF_BOUNDS_LABEL; // init if(change==1) { if ((y>1) && (y< ydim-2)) { W = __ldg(&seg[idx+ydim]); // left } } else { if ((x>1) && (x< xdim-2)) { W = __ldg(&seg[idx-1]); // left } } // If the nbr is different from the central pixel and is not out-of-bounds, // then it is a border pixel. if (W>0 && C!=W) { atomicMax(&split_merge_pairs[C*2+1],W); } return; } __global__ void calc_split_candidate(int* seg, bool* border,int distance, int* mutex, const int nPixels, const int xdim, const int ydim){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; if(border[idx]) return; int x = idx % xdim; int y = idx / xdim; int C = seg[idx]; // center if(C!=distance) return; if ((y>0)&&(idx-xdim>=0)){ if(!seg[idx-xdim]) { seg[idx-xdim] = distance +1 ; mutex[0] = 1; } } if ((x>0)&&(idx-1>=0)){ if(!seg[idx-1]) { seg[idx-1] = distance +1 ; mutex[0] = 1; } } if ((y<ydim-1)&&(idx+xdim<nPixels)){ if(!seg[idx+xdim]) { seg[idx+xdim] = distance +1 ; mutex[0] = 1; } } if ((x<xdim-1)&&(idx+1<nPixels)){ if(!seg[idx+1]) { seg[idx+1] = distance +1 ; mutex[0] = 1; } } return; } __global__ void init_split(const bool* border, int* seg_gpu, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nsuperpixel_buffer, const int xdim, const int ydim, const int offset, const int* seg, int* max_sp, int max_SP) { int k = threadIdx.x + blockIdx.x * blockDim.x; // the label *max_sp = max_SP+1; if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int x; int y; if((offset==1)||(offset==-1)) { x = int(sp_params[k].mu_s.x)+offset; y = int(sp_params[k].mu_s.y); } else { x = int(sp_params[k].mu_s.x); y = int(sp_params[k].mu_s.y)+offset; } int ind = y*xdim+x; if((ind<0)||(ind>xdim*ydim-1)) return; if(border[ind]) return; if (seg[ind]!=k) return; seg_gpu[ind] = 1; } __global__ void calc_seg_split(int* seg_split1, int* seg_split2,int* seg, int* seg_split3, const int nPixels, int max_SP) { int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; int seg_val = __ldg(&seg[t]); if(seg_split1[t]>__ldg(&seg_split2[t])) seg_val += max_SP; seg_split1[t] = seg_val; return; } __global__ void sum_by_label_sm(const float* image_gpu_double, const int* seg_gpu, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim) { // getting the index of the pixel int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; //get the label int k = __ldg(&seg_gpu[t]); float l = __ldg(& image_gpu_double[3*t]); float a = __ldg(& image_gpu_double[3*t+1]); float b = __ldg(& image_gpu_double[3*t+2]); //atomicAdd(&sp_params[k].count, 1); //TODO: Time it atomicAdd(&sp_gpu_helper_sm[k].squares_i.x, l*l); atomicAdd(&sp_gpu_helper_sm[k].squares_i.y, a*a); atomicAdd(&sp_gpu_helper_sm[k].squares_i.z,b*b); } __global__ void sum_by_label_split(const float* image_gpu_double, const int* seg, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, int max_SP) { // getting the index of the pixel int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; //get the label int k = __ldg(&seg[t]); float l = __ldg(& image_gpu_double[3*t]); float a = __ldg(& image_gpu_double[3*t+1]); float b = __ldg(& image_gpu_double[3*t+2]); atomicAdd(&sp_gpu_helper_sm[k].count, 1); //TODO: Time it atomicAdd(&sp_gpu_helper_sm[k].squares_i.x, l*l); atomicAdd(&sp_gpu_helper_sm[k].squares_i.y, a*a); atomicAdd(&sp_gpu_helper_sm[k].squares_i.z,b*b); atomicAdd(&sp_gpu_helper_sm[k].mu_i_sum.x, l); atomicAdd(&sp_gpu_helper_sm[k].mu_i_sum.y, a); atomicAdd(&sp_gpu_helper_sm[k].mu_i_sum.z, b); return; } __global__ void calc_bn(int* seg, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float b_0) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; // TODO: check if there is no neigh //get the label of neigh int f = split_merge_pairs[2*k+1]; //if (sp_params[f].valid == 0) return; //if (f<=0) return; float count_f = __ldg(&sp_params[f].count); float count_k = __ldg(&sp_params[k].count); float squares_f_x = __ldg(&sp_gpu_helper_sm[f].squares_i.x); float squares_f_y = __ldg(&sp_gpu_helper_sm[f].squares_i.y); float squares_f_z = __ldg(&sp_gpu_helper_sm[f].squares_i.z); float squares_k_x = __ldg(&sp_gpu_helper_sm[k].squares_i.x); float squares_k_y = __ldg(&sp_gpu_helper_sm[k].squares_i.y); float squares_k_z = __ldg(&sp_gpu_helper_sm[k].squares_i.z); float mu_f_x = __ldg(&sp_gpu_helper[f].mu_i_sum.x); float mu_f_y = __ldg(&sp_gpu_helper[f].mu_i_sum.y); float mu_f_z = __ldg(&sp_gpu_helper[f].mu_i_sum.z); float mu_k_x = __ldg(&sp_gpu_helper[k].mu_i_sum.x); float mu_k_y = __ldg(&sp_gpu_helper[k].mu_i_sum.y); float mu_k_z = __ldg(&sp_gpu_helper[k].mu_i_sum.z); //if ((k==105)||(k==42)) printf("Merger: %d, %d ,sq_x: %f , sq_y: %f , sq_z: %f\n", k, f,squares_k_x, squares_k_y, squares_k_z) ; int count_fk = count_f + count_k; sp_gpu_helper_sm[k].count_f = count_fk; //sp_gpu_helper_sm[k].count_f = sp_params[k].count + sp_params[f].count; sp_gpu_helper_sm[k].b_n.x = b_0 + 0.5 * ((squares_k_x) - ( mu_k_x*mu_k_x/ count_k)); sp_gpu_helper_sm[k].b_n_f.x = b_0 + 0.5 *( (squares_k_x+squares_f_x) - ( (mu_f_x + mu_k_x ) * (mu_f_x + mu_k_x ) / (count_fk))); sp_gpu_helper_sm[k].b_n.y = b_0 + 0.5 * ((squares_k_y) - ( mu_k_y*mu_k_y/ count_k)); sp_gpu_helper_sm[k].b_n_f.y = b_0 + 0.5 *( (squares_k_y+squares_f_y) - ( (mu_f_y + mu_k_y ) * (mu_f_y + mu_k_y ) / (count_fk))); sp_gpu_helper_sm[k].b_n.z = b_0 + 0.5 * ((squares_k_z) - ( mu_k_z*mu_k_z/ count_k)); sp_gpu_helper_sm[k].b_n_f.z = b_0 + 0.5 *( (squares_k_z+squares_f_z) - ( (mu_f_z + mu_k_z ) * (mu_f_z + mu_k_z ) / (count_fk))); if( sp_gpu_helper_sm[k].b_n.x<0) sp_gpu_helper_sm[k].b_n.x = 0.1; if( sp_gpu_helper_sm[k].b_n.y<0) sp_gpu_helper_sm[k].b_n.y = 0.1; if( sp_gpu_helper_sm[k].b_n.z<0) sp_gpu_helper_sm[k].b_n.z = 0.1; if( sp_gpu_helper_sm[k].b_n_f.x<0) sp_gpu_helper_sm[k].b_n_f.x = 0.1; if( sp_gpu_helper_sm[k].b_n_f.y<0) sp_gpu_helper_sm[k].b_n_f.y = 0.1; if( sp_gpu_helper_sm[k].b_n_f.z<0) sp_gpu_helper_sm[k].b_n_f.z = 0.1; } __global__ void calc_bn_split(int* seg, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float b_0, int max_SP) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; // TODO: check if there is no neigh //get the label of neigh int s = k + max_SP; if (s>=nsuperpixel_buffer) return; float count_f = __ldg(&sp_params[k].count); float count_k= __ldg(&sp_gpu_helper_sm[k].count); float count_s = __ldg(&sp_gpu_helper_sm[s].count); if((count_f<1)||( count_k<1)||(count_s<1)) return; float squares_s_x = __ldg(&sp_gpu_helper_sm[s].squares_i.x); float squares_s_y = __ldg(&sp_gpu_helper_sm[s].squares_i.y); float squares_s_z = __ldg(&sp_gpu_helper_sm[s].squares_i.z); float squares_k_x = __ldg(&sp_gpu_helper_sm[k].squares_i.x); float squares_k_y = __ldg(&sp_gpu_helper_sm[k].squares_i.y); float squares_k_z = __ldg(&sp_gpu_helper_sm[k].squares_i.z); float mu_s_x = __ldg(&sp_gpu_helper_sm[s].mu_i_sum.x); float mu_s_y = __ldg(&sp_gpu_helper_sm[s].mu_i_sum.y); float mu_s_z = __ldg(&sp_gpu_helper_sm[s].mu_i_sum.z); float mu_k_x = __ldg(&sp_gpu_helper_sm[k].mu_i_sum.x); float mu_k_y = __ldg(&sp_gpu_helper_sm[k].mu_i_sum.y); float mu_k_z = __ldg(&sp_gpu_helper_sm[k].mu_i_sum.z); float mu_f_x =__ldg(&sp_gpu_helper[k].mu_i_sum.x); float mu_f_y = __ldg(&sp_gpu_helper[k].mu_i_sum.y); float mu_f_z = __ldg(&sp_gpu_helper[k].mu_i_sum.z); sp_gpu_helper_sm[k].b_n.x = b_0 + 0.5 * ((squares_k_x) - ( (mu_k_x*mu_k_x)/ (count_k))); //sp_gpu_helper_sm[k].b_n.x = b_0 + (squares_k_x)+(mu_k_x*mu_k_x)/(count_k*count_k)-2*(mu_k_x*mu_k_x)/(count_k)+(mu_k_x*mu_k_x)/(count_k*count_k); sp_gpu_helper_sm[k].b_n.y = b_0 + 0.5 * ((squares_k_y) - ( mu_k_y*mu_k_y/ count_k)); //sp_gpu_helper_sm[k].b_n.y = b_0 + (squares_k_y)+(mu_k_y*mu_k_y)/(count_k*count_k)-2*(mu_k_y*mu_k_y)/(count_k)+(mu_k_y*mu_k_y)/(count_k*count_k); sp_gpu_helper_sm[k].b_n.z = b_0 + 0.5 * ((squares_k_z) - ( mu_k_z*mu_k_z/ count_k)); // sp_gpu_helper_sm[k].b_n.z = b_0 + (squares_k_z)+(mu_k_z*mu_k_z)/(count_k*count_k)-2*(mu_k_z*mu_k_z)/(count_k)+(mu_k_z*mu_k_z)/(count_k*count_k); sp_gpu_helper_sm[s].b_n.x = b_0 + 0.5 * ((squares_s_x) - ( mu_s_x*mu_s_x/ count_s)); sp_gpu_helper_sm[s].b_n.y = b_0 + 0.5 * ((squares_s_y) - ( mu_s_y*mu_s_y/ count_s)); sp_gpu_helper_sm[s].b_n.z = b_0 + 0.5 * ((squares_s_z) - ( mu_s_z*mu_s_z/ count_s)); /* sp_gpu_helper_sm[s].b_n.x = b_0 + (squares_s_x)+(mu_s_x*mu_s_x)/(count_s*count_s)-2*(mu_s_x*mu_s_x)/(count_s)+(mu_s_x*mu_s_x)/(count_k*count_k); sp_gpu_helper_sm[s].b_n.y = b_0 + (squares_s_y)+(mu_s_y*mu_s_y)/(count_s*count_s)-2*(mu_s_y*mu_s_y)/(count_s)+(mu_s_y*mu_s_y)/(count_k*count_k); sp_gpu_helper_sm[s].b_n.z = b_0 + (squares_s_z)+(mu_s_z*mu_s_z)/(count_s*count_s)-2*(mu_s_z*mu_s_z)/(count_s)+(mu_s_z*mu_s_z)/(count_k*count_k); */ sp_gpu_helper_sm[k].b_n_f.x = b_0 + 0.5 * ((squares_k_x+squares_s_x) - ( mu_f_x*mu_f_x/ count_f)); sp_gpu_helper_sm[k].b_n_f.y = b_0 + 0.5 * ((squares_k_y+squares_s_y) - ( mu_f_y*mu_f_y/ count_f)); sp_gpu_helper_sm[k].b_n_f.z = b_0 + 0.5 * ((squares_k_z+squares_s_z) - ( mu_f_z*mu_f_z/ count_f)); } __global__ void calc_marginal_liklelyhoood_of_sp_split(const float* image_gpu_double, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a_0, float b_0, int max_SP) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int s = k + max_SP; if (s>=nsuperpixel_buffer) return; float count_f = __ldg(&sp_params[k].count); float count_k= __ldg(&sp_gpu_helper_sm[k].count); float count_s = __ldg(&sp_gpu_helper_sm[s].count); if((count_f<1)||( count_k<1)||(count_s<1)) return; if (count_f!=count_k+count_s) return; // TODO: check if there is no neigh // TODO: check if num is the same //get the label //a_0 = 1100*(count_f); float a_n_k = a_0 + float(count_k)/2; float a_n_s = a_0+ float(count_s)/2; float a_n_f = a_0+ float(count_f)/2; float v_n_k = 1/float(count_k); float v_n_s = 1/float(count_s); float v_n_f = 1/float(count_f); /* v_n_k = 1; v_n_f =1; v_n_s=1;*/ float b_n_k_x = __ldg(&sp_gpu_helper_sm[k].b_n.x); float b_n_k_y = __ldg(&sp_gpu_helper_sm[k].b_n.y); float b_n_k_z = __ldg(&sp_gpu_helper_sm[k].b_n.z); float b_n_s_x = __ldg(&sp_gpu_helper_sm[s].b_n.x); float b_n_s_y = __ldg(&sp_gpu_helper_sm[s].b_n.y); float b_n_s_z = __ldg(&sp_gpu_helper_sm[s].b_n.z); float b_n_f_x = __ldg(&sp_gpu_helper_sm[k].b_n_f.x); float b_n_f_y = __ldg(&sp_gpu_helper_sm[k].b_n_f.y); float b_n_f_z = __ldg(&sp_gpu_helper_sm[k].b_n_f.z); a_0 =a_n_k; sp_gpu_helper_sm[k].numerator.x = a_0 * __logf(b_0) + lgammaf(a_n_k)+ 0.5*__logf(v_n_k); sp_gpu_helper_sm[k].denominator.x = a_n_k * __logf (b_n_k_x) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator.y = a_n_k * __logf (b_n_k_y) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator.z = a_n_k * __logf (b_n_k_z) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); a_0 =a_n_s; sp_gpu_helper_sm[s].numerator.x = a_0 * __logf(b_0) + lgammaf(a_n_s)+0.5*__logf(v_n_s); sp_gpu_helper_sm[s].denominator.x = a_n_s * __logf (b_n_s_x) + 0.5 * count_s * __logf (M_PI) + \ count_s * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[s].denominator.y = a_n_s * __logf (b_n_s_y) + 0.5 * count_s * __logf (M_PI) + \ count_s * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[s].denominator.z = a_n_s * __logf (b_n_s_z) + 0.5 * count_s * __logf (M_PI) + \ count_s * __logf (2) + lgammaf(a_0); a_0 =a_n_f; sp_gpu_helper_sm[k].numerator_f.x = a_0 * __logf(b_0) + lgammaf(a_n_f)+0.5*__logf(v_n_f); sp_gpu_helper_sm[k].denominator_f.x = a_n_f * __logf (b_n_f_x) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator_f.y = a_n_f * __logf (b_n_f_y) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator_f.z = a_n_f * __logf (b_n_f_z) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); } __global__ void calc_marginal_liklelyhoood_of_sp(const float* image_gpu_double, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a_0, float b_0) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; // TODO: check if there is no neigh // TODO: check if num is the same //get the label float count_k = __ldg(&sp_params[k].count); float count_f = __ldg(&sp_gpu_helper_sm[k].count_f); //if ((count_k==0)||(count_f==0)) return; //if (sp_params[k].valid == 0) return; //if (f==-1) return; float a_n = a_0 + float(count_k) / 2; float a_n_f = a_0+ float(count_f) / 2; // float v_n = 1 / float(num_pixels_in_sp); float v_n = 1/float(count_k); float v_n_f = 1/float(count_f); //printf("Merge: %f,%d, \n", sp_gpu_helper_sm[k].b_n.x, count_k); a_0 = a_n; sp_gpu_helper_sm[k].numerator.x = a_0 * __logf(b_0) + lgammaf(a_n)+0.5*__logf(v_n); sp_gpu_helper_sm[k].denominator.x = a_n* __logf ( __ldg(&sp_gpu_helper_sm[k].b_n.x)) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); //sp_gpu_helper_sm[k].numerator.y = a_0 * __logf (b_0) + lgammaf(a_0)+0.5*v_n; sp_gpu_helper_sm[k].denominator.y = a_n* __logf ( __ldg(&sp_gpu_helper_sm[k].b_n.y)) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgamma(a_0); //sp_gpu_helper_sm[k].numerator.z = a_0 * __logf(b_0) + lgammaf(a_0)+0.5*v_n; sp_gpu_helper_sm[k].denominator.z = a_n* __logf(__ldg(&sp_gpu_helper_sm[k].b_n.z)) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); a_0 = a_n_f; sp_gpu_helper_sm[k].numerator_f.x = a_0 * __logf (b_0) + lgammaf(a_n_f)+0.5*__logf(v_n_f); sp_gpu_helper_sm[k].denominator_f.x = a_n_f* __logf (__ldg(&sp_gpu_helper_sm[k].b_n_f.x)) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); //sp_gpu_helper_sm[k].numerator_f.y = a_0 * __logf (b_0) + lgammaf(a_0)+0.5*v_n_f; sp_gpu_helper_sm[k].denominator_f.y = a_n_f* __logf (__ldg(&sp_gpu_helper_sm[k].b_n_f.y)) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); //sp_gpu_helper_sm[k].numerator_f.z = a_0 * __logf (b_0) + lgammaf(a_0)+0.5*v_n_f; sp_gpu_helper_sm[k].denominator_f.z = a_n_f* __logf (__ldg(&sp_gpu_helper_sm[k].b_n_f.z)) + 0.5 * count_f* __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); } __global__ void calc_hasting_ratio(const float* image_gpu_double,int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a0, float b0, float alpha_hasting_ratio, int* mutex ) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int f = split_merge_pairs[2*k+1]; if (sp_params[f].valid == 0) return; if(f<=0) return; float count_k = __ldg(&sp_params[k].count); float count_f = __ldg(&sp_gpu_helper_sm[k].count_f); if ((count_k<1)||(count_f<1)) return; sp_gpu_helper_sm[k].merge = false; float num_k = __ldg(&sp_gpu_helper_sm[k].numerator.x); float total_marginal_1 = (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.x)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.y)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.z)); float num_f = __ldg(&sp_gpu_helper_sm[f].numerator.x); float total_marginal_2 = (num_f - __ldg(&sp_gpu_helper_sm[f].denominator.x)) + (num_f - __ldg(&sp_gpu_helper_sm[f].denominator.y)) + (num_f - __ldg(&sp_gpu_helper_sm[f].denominator.z)); float num_kf = __ldg(&sp_gpu_helper_sm[k].numerator_f.x); float total_marginal_f = (num_kf - __ldg(&sp_gpu_helper_sm[k].denominator_f.x)) + (num_kf - __ldg(&sp_gpu_helper_sm[k].denominator_f.y)) + (num_kf - __ldg(&sp_gpu_helper_sm[k].denominator_f.z)); float log_nominator = lgammaf(count_f) + total_marginal_f + lgammaf(alpha_hasting_ratio) + lgammaf(alpha_hasting_ratio / 2 + count_k) + lgammaf(alpha_hasting_ratio / 2 + count_f - count_k); float log_denominator = __logf(alpha_hasting_ratio) + lgammaf(count_k) + lgammaf(count_f - count_k) + total_marginal_1 + total_marginal_2 + lgammaf(alpha_hasting_ratio + count_f) + lgammaf(alpha_hasting_ratio / 2) + lgammaf(alpha_hasting_ratio / 2); log_denominator = __logf(alpha_hasting_ratio) + total_marginal_1 + total_marginal_2; log_nominator = total_marginal_f ; sp_gpu_helper_sm[k].hasting = log_nominator - log_denominator; return; } __global__ void calc_hasting_ratio2(const float* image_gpu_double,int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a0, float b0, float alpha_hasting_ratio, int* mutex ) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int f = split_merge_pairs[2*k+1]; if (sp_params[f].valid == 0) return; if(f<=0) return; if((sp_gpu_helper_sm[k].hasting ) > -2) { //printf("Want to merge k: %d, f: %d, splitmerge k %d, splitmerge f %d, %d\n", k, f, split_merge_pairs[2*k], split_merge_pairs[2*f], split_merge_pairs[2*f+1] ); if( k > atomicMax(&split_merge_pairs[2*f],k)) { //printf("Merge: %f \n",sp_gpu_helper_sm[k].hasting ); sp_gpu_helper_sm[k].merge = true; } } return; } __global__ void calc_hasting_ratio_split(const float* image_gpu_double,int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a0, float b0, float alpha_hasting_ratio, int* mutex, int max_SP, int* max_sp ) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int s = k + max_SP; if(s>=nsuperpixel_buffer) return; float count_f = __ldg(&sp_params[k].count); float count_k= __ldg(&sp_gpu_helper_sm[k].count); float count_s = __ldg(&sp_gpu_helper_sm[s].count); if((count_f<1)||( count_k<1)||(count_s<1)) return; float num_k = __ldg(&sp_gpu_helper_sm[k].numerator.x); float num_s = __ldg(&sp_gpu_helper_sm[s].numerator.x); float num_f = __ldg(&sp_gpu_helper_sm[k].numerator_f.x); float total_marginal_k = (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.x)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.y)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.z)); float total_marginal_s = (num_s - __ldg(&sp_gpu_helper_sm[s].denominator.x)) + (num_s - __ldg(&sp_gpu_helper_sm[s].denominator.y)) + (num_s - __ldg(&sp_gpu_helper_sm[s].denominator.z)); float total_marginal_f = (num_f - __ldg(&sp_gpu_helper_sm[k].denominator_f.x)) + (num_f - __ldg(&sp_gpu_helper_sm[k].denominator_f.y)) + (num_f - __ldg(&sp_gpu_helper_sm[k].denominator_f.z)); //printf("hasating:x k: %d, count: %f, den: %f, %f, %f, b_n: %f, %f, %f, num: %f \n",k, count_k, sp_gpu_helper_sm[k].denominator.x, sp_gpu_helper_sm[k].denominator.y, sp_gpu_helper_sm[k].denominator.z, __logf (sp_gpu_helper_sm[k].b_n.x) , __logf (sp_gpu_helper_sm[k].b_n.y), __logf (sp_gpu_helper_sm[k].b_n.z), sp_gpu_helper_sm[k].numerator.x); float log_nominator = __logf(alpha_hasting_ratio)+ lgammaf(count_k)+ total_marginal_k + lgammaf(count_s) + total_marginal_s ; log_nominator = total_marginal_k + total_marginal_s ; float log_denominator = lgammaf(count_f) + total_marginal_f; log_denominator =total_marginal_f; sp_gpu_helper_sm[k].hasting = log_nominator - log_denominator; sp_gpu_helper_sm[k].merge = (sp_gpu_helper_sm[k].hasting > -2); sp_gpu_helper_sm[s].merge = (sp_gpu_helper_sm[k].hasting > -2); if((sp_gpu_helper_sm[k].merge)) { s = atomicAdd(max_sp,1) +1; split_merge_pairs[2*k] = s; //atomicMax(max_sp,s); sp_params[k].prior_count/=2; sp_params[s].prior_count= sp_params[k].prior_count; //#check why. } } __global__ void merge_sp(int* seg, bool* border, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; int k = seg[idx]; // center //if (sp_params[k].valid == 0) return; int f = split_merge_pairs[2*k+1]; if(sp_gpu_helper_sm[k].remove) seg[idx] = f; return; } __global__ void split_sp(int* seg, int* seg_split1, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim,int max_SP){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; int k = seg[idx]; // center int k2 = k + max_SP; if ((sp_gpu_helper_sm[k].merge == false)||sp_gpu_helper_sm[k2].merge == false ) return; if(seg_split1[idx]==k2) seg[idx] = split_merge_pairs[2*k]; //seg[idx] = seg_split1[idx]; //printf("Add the following: %d - %d'\n", k,split_merge_pairs[2*k]); sp_params[split_merge_pairs[2*k]].valid = 1; return; } __global__ void remove_sp(int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nsuperpixel_buffer) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; int f = split_merge_pairs[2*k+1]; if ((sp_params[k].valid == 0)||(sp_params[f].valid == 0)) return; if(f<=0) return; if ((sp_gpu_helper_sm[k].merge == true) && (sp_gpu_helper_sm[f].merge == false) && (split_merge_pairs[2*f]==k) ) { sp_gpu_helper_sm[k].remove=true; sp_params[k].valid =0; sp_params[f].prior_count =sp_params[k].prior_count+sp_params[f].prior_count; } else { sp_gpu_helper_sm[k].remove=false; } return; }
3afaf1bf27abaf77fdbd8287bf8c846e73c6a50e.cu
#include <string> #include <iostream> #include <fstream> #include <sstream> #include <float.h> #include <math.h> #ifndef OUT_OF_BOUNDS_LABEL #define OUT_OF_BOUNDS_LABEL -1 #endif #ifndef BAD_TOPOLOGY_LABEL #define BAD_TOPOLOGY_LABEL -2 #endif #ifndef NUM_OF_CHANNELS #define NUM_OF_CHANNELS 3 #endif #ifndef USE_COUNTS #define USE_COUNTS 1 #endif #ifndef OUT_OF_BOUNDS_LABEL #define OUT_OF_BOUNDS_LABEL -1 #endif #define THREADS_PER_BLOCK 512 #include "s_m.h" #include "sp.h" #include <stdio.h> #ifndef WIN32 #include <unistd.h> #endif int tresh = -2; __device__ volatile int sem = 0; __device__ void acquire_semaphore(volatile int *lock){ while (atomicCAS((int *)lock, 0, 1) != 0); } __device__ void release_semaphore(volatile int *lock){ *lock = 0; __threadfence(); } __device__ __forceinline__ float atomicMaxFloat (float * addr, float value) { float old; old = (value >= 0) ? __int_as_float(atomicMax((int *)addr, __float_as_int(value))) : __uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value))); return old; } __device__ int mLock=0; __host__ void CudaCalcMergeCandidate(const float* image_gpu_double, int* split_merge_pairs, int* seg, bool* border, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim, const int nSPs_buffer, const int change, float i_std, float alpha){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); int num_block2 = ceil( double(nSPs_buffer) / double(THREADS_PER_BLOCK) ); dim3 BlockPerGrid2(num_block2,1); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); float a0 = 10000; float b0 = i_std * (a0) ; //b0 = 0.05*0.05*a0; int* mutex ; float alpha_hasting_ratio = alpha; cudaMalloc((void **)&mutex, sizeof(int)); cudaMemset(mutex, 0, sizeof(int)); init_sm<<<BlockPerGrid2,ThreadPerBlock>>>(image_gpu_double,seg,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim,split_merge_pairs); calc_merge_candidate<<<BlockPerGrid,ThreadPerBlock>>>(seg,border,split_merge_pairs,nPixels, xdim, ydim, change); sum_by_label_sm<<<BlockPerGrid,ThreadPerBlock>>>(image_gpu_double,seg,sp_params,sp_gpu_helper_sm, nPixels, xdim); calc_bn<<<BlockPerGrid2,ThreadPerBlock>>>(seg, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, b0); calc_marginal_liklelyhoood_of_sp<<<BlockPerGrid2,ThreadPerBlock>>>(image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer , a0, b0); calc_hasting_ratio<<<BlockPerGrid2,ThreadPerBlock>>>(image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, a0, b0, alpha_hasting_ratio, mutex); calc_hasting_ratio2<<<BlockPerGrid2,ThreadPerBlock>>>(image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, a0, b0, alpha_hasting_ratio, mutex); remove_sp<<<BlockPerGrid2,ThreadPerBlock>>>(split_merge_pairs,sp_params,sp_gpu_helper_sm,nSPs_buffer); merge_sp<<<BlockPerGrid,ThreadPerBlock>>>(seg,border, split_merge_pairs, sp_params, sp_gpu_helper_sm, nPixels, xdim, ydim); } __host__ int CudaCalcSplitCandidate(const float* image_gpu_double, int* split_merge_pairs, int* seg, bool* border, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim, const int nSPs_buffer, int* seg_split1 ,int* seg_split2, int* seg_split3, int max_SP, int count, float i_std, float alpha){ int num_block = ceil( double(nPixels) / double(THREADS_PER_BLOCK) ); int num_block2 = ceil( double(nSPs_buffer) / double(THREADS_PER_BLOCK) ); dim3 BlockPerGrid2(num_block2,1); dim3 ThreadPerBlock(THREADS_PER_BLOCK,1); dim3 BlockPerGrid(num_block,1); float a0 = 10000; float b0 = i_std * (a0) ; float alpha_hasting_ratio = alpha; int* mutex_2; int done = 1; int* max_sp; cudaMalloc((void **)&max_sp, sizeof(int)); cudaMalloc((void **)&mutex_2, sizeof(int)); // malloc of single value is also important int distance = 1; int offset = count%2+1; cudaMemset(seg_split1, 0, nPixels*sizeof(int)); cudaMemset(seg_split2, 0, nPixels*sizeof(int)); init_sm<<<BlockPerGrid2,ThreadPerBlock>>>(image_gpu_double,seg,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim,split_merge_pairs); init_split<<<BlockPerGrid2,ThreadPerBlock>>>(border,seg_split1,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim, ydim, offset , seg, max_sp, max_SP); init_split<<<BlockPerGrid2,ThreadPerBlock>>>(border,seg_split2,sp_params,sp_gpu_helper_sm, nSPs_buffer, xdim,ydim, -offset, seg,max_sp, max_SP); split_sp<<<BlockPerGrid,ThreadPerBlock>>>(seg,seg_split1, split_merge_pairs, sp_params, sp_gpu_helper_sm, nPixels, xdim, ydim, max_SP); while(done) { cudaMemset(mutex_2, 0, sizeof(int)); cudaMemcpy(&done, mutex_2, sizeof(int), cudaMemcpyDeviceToHost); calc_split_candidate<<<BlockPerGrid,ThreadPerBlock>>>(seg_split1,border,distance, mutex_2, nPixels, xdim, ydim); distance++; cudaMemcpy(&done, mutex_2, sizeof(int), cudaMemcpyDeviceToHost); } done =1; distance = 1; while(done) { cudaMemset(mutex_2, 0, sizeof(int)); cudaMemcpy(&done, mutex_2, sizeof(int), cudaMemcpyDeviceToHost); calc_split_candidate<<<BlockPerGrid,ThreadPerBlock>>>(seg_split2 ,border,distance, mutex_2, nPixels, xdim, ydim); distance++; cudaMemcpy(&done, mutex_2, sizeof(int), cudaMemcpyDeviceToHost); } calc_seg_split<<<BlockPerGrid,ThreadPerBlock>>>(seg_split1,seg_split2, seg, seg_split3, nPixels, max_SP); sum_by_label_split<<<BlockPerGrid,ThreadPerBlock>>>(image_gpu_double,seg_split1,sp_params,sp_gpu_helper_sm, nPixels, xdim,max_SP); calc_bn_split<<<BlockPerGrid2,ThreadPerBlock>>>(seg_split3, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, b0, max_SP); calc_marginal_liklelyhoood_of_sp_split<<<BlockPerGrid2,ThreadPerBlock>>>(image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer , a0, b0, max_SP); calc_hasting_ratio_split<<<BlockPerGrid2,ThreadPerBlock>>>(image_gpu_double, split_merge_pairs, sp_params, sp_gpu_helper, sp_gpu_helper_sm, nPixels, xdim, nSPs_buffer, a0, b0, alpha_hasting_ratio, 0,max_SP, max_sp); split_sp<<<BlockPerGrid,ThreadPerBlock>>>(seg,seg_split1, split_merge_pairs, sp_params, sp_gpu_helper_sm, nPixels, xdim, ydim, max_SP); cudaMemcpy(&max_SP, max_sp, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(max_sp); cudaFree(mutex_2); return max_SP; } __global__ void init_sm(const float* image_gpu_double, const int* seg_gpu, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nsuperpixel_buffer, const int xdim,int* split_merge_pairs) { int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; //if (sp_params[k].valid == 0) return; sp_gpu_helper_sm[k].b_n.x = 0; sp_gpu_helper_sm[k].b_n.y = 0; sp_gpu_helper_sm[k].b_n.z = 0; sp_gpu_helper_sm[k].squares_i.x = 0; sp_gpu_helper_sm[k].squares_i.y = 0; sp_gpu_helper_sm[k].squares_i.z = 0; sp_gpu_helper_sm[k].mu_i_sum.x = 0; sp_gpu_helper_sm[k].mu_i_sum.y = 0; sp_gpu_helper_sm[k].mu_i_sum.z = 0; sp_gpu_helper_sm[k].count_f = 0; sp_gpu_helper_sm[k].count = 0; sp_gpu_helper_sm[k].hasting = -999999; //sp_params[k].count = 0; sp_gpu_helper_sm[k].merge = false; sp_gpu_helper_sm[k].remove = false; split_merge_pairs[k*2+1] = 0; split_merge_pairs[k*2] = 0; } __global__ void calc_merge_candidate(int* seg, bool* border, int* split_merge_pairs, const int nPixels, const int xdim, const int ydim, const int change){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; if(!border[idx]) return; int x = idx % xdim; int y = idx / xdim; int C = seg[idx]; // center int W; // north, south, east,west W = OUT_OF_BOUNDS_LABEL; // init if(change==1) { if ((y>1) && (y< ydim-2)) { W = __ldg(&seg[idx+ydim]); // left } } else { if ((x>1) && (x< xdim-2)) { W = __ldg(&seg[idx-1]); // left } } // If the nbr is different from the central pixel and is not out-of-bounds, // then it is a border pixel. if (W>0 && C!=W) { atomicMax(&split_merge_pairs[C*2+1],W); } return; } __global__ void calc_split_candidate(int* seg, bool* border,int distance, int* mutex, const int nPixels, const int xdim, const int ydim){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; if(border[idx]) return; int x = idx % xdim; int y = idx / xdim; int C = seg[idx]; // center if(C!=distance) return; if ((y>0)&&(idx-xdim>=0)){ if(!seg[idx-xdim]) { seg[idx-xdim] = distance +1 ; mutex[0] = 1; } } if ((x>0)&&(idx-1>=0)){ if(!seg[idx-1]) { seg[idx-1] = distance +1 ; mutex[0] = 1; } } if ((y<ydim-1)&&(idx+xdim<nPixels)){ if(!seg[idx+xdim]) { seg[idx+xdim] = distance +1 ; mutex[0] = 1; } } if ((x<xdim-1)&&(idx+1<nPixels)){ if(!seg[idx+1]) { seg[idx+1] = distance +1 ; mutex[0] = 1; } } return; } __global__ void init_split(const bool* border, int* seg_gpu, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nsuperpixel_buffer, const int xdim, const int ydim, const int offset, const int* seg, int* max_sp, int max_SP) { int k = threadIdx.x + blockIdx.x * blockDim.x; // the label *max_sp = max_SP+1; if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int x; int y; if((offset==1)||(offset==-1)) { x = int(sp_params[k].mu_s.x)+offset; y = int(sp_params[k].mu_s.y); } else { x = int(sp_params[k].mu_s.x); y = int(sp_params[k].mu_s.y)+offset; } int ind = y*xdim+x; if((ind<0)||(ind>xdim*ydim-1)) return; if(border[ind]) return; if (seg[ind]!=k) return; seg_gpu[ind] = 1; } __global__ void calc_seg_split(int* seg_split1, int* seg_split2,int* seg, int* seg_split3, const int nPixels, int max_SP) { int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; int seg_val = __ldg(&seg[t]); if(seg_split1[t]>__ldg(&seg_split2[t])) seg_val += max_SP; seg_split1[t] = seg_val; return; } __global__ void sum_by_label_sm(const float* image_gpu_double, const int* seg_gpu, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim) { // getting the index of the pixel int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; //get the label int k = __ldg(&seg_gpu[t]); float l = __ldg(& image_gpu_double[3*t]); float a = __ldg(& image_gpu_double[3*t+1]); float b = __ldg(& image_gpu_double[3*t+2]); //atomicAdd(&sp_params[k].count, 1); //TODO: Time it atomicAdd(&sp_gpu_helper_sm[k].squares_i.x, l*l); atomicAdd(&sp_gpu_helper_sm[k].squares_i.y, a*a); atomicAdd(&sp_gpu_helper_sm[k].squares_i.z,b*b); } __global__ void sum_by_label_split(const float* image_gpu_double, const int* seg, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, int max_SP) { // getting the index of the pixel int t = threadIdx.x + blockIdx.x * blockDim.x; if (t>=nPixels) return; //get the label int k = __ldg(&seg[t]); float l = __ldg(& image_gpu_double[3*t]); float a = __ldg(& image_gpu_double[3*t+1]); float b = __ldg(& image_gpu_double[3*t+2]); atomicAdd(&sp_gpu_helper_sm[k].count, 1); //TODO: Time it atomicAdd(&sp_gpu_helper_sm[k].squares_i.x, l*l); atomicAdd(&sp_gpu_helper_sm[k].squares_i.y, a*a); atomicAdd(&sp_gpu_helper_sm[k].squares_i.z,b*b); atomicAdd(&sp_gpu_helper_sm[k].mu_i_sum.x, l); atomicAdd(&sp_gpu_helper_sm[k].mu_i_sum.y, a); atomicAdd(&sp_gpu_helper_sm[k].mu_i_sum.z, b); return; } __global__ void calc_bn(int* seg, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float b_0) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; // TODO: check if there is no neigh //get the label of neigh int f = split_merge_pairs[2*k+1]; //if (sp_params[f].valid == 0) return; //if (f<=0) return; float count_f = __ldg(&sp_params[f].count); float count_k = __ldg(&sp_params[k].count); float squares_f_x = __ldg(&sp_gpu_helper_sm[f].squares_i.x); float squares_f_y = __ldg(&sp_gpu_helper_sm[f].squares_i.y); float squares_f_z = __ldg(&sp_gpu_helper_sm[f].squares_i.z); float squares_k_x = __ldg(&sp_gpu_helper_sm[k].squares_i.x); float squares_k_y = __ldg(&sp_gpu_helper_sm[k].squares_i.y); float squares_k_z = __ldg(&sp_gpu_helper_sm[k].squares_i.z); float mu_f_x = __ldg(&sp_gpu_helper[f].mu_i_sum.x); float mu_f_y = __ldg(&sp_gpu_helper[f].mu_i_sum.y); float mu_f_z = __ldg(&sp_gpu_helper[f].mu_i_sum.z); float mu_k_x = __ldg(&sp_gpu_helper[k].mu_i_sum.x); float mu_k_y = __ldg(&sp_gpu_helper[k].mu_i_sum.y); float mu_k_z = __ldg(&sp_gpu_helper[k].mu_i_sum.z); //if ((k==105)||(k==42)) printf("Merger: %d, %d ,sq_x: %f , sq_y: %f , sq_z: %f\n", k, f,squares_k_x, squares_k_y, squares_k_z) ; int count_fk = count_f + count_k; sp_gpu_helper_sm[k].count_f = count_fk; //sp_gpu_helper_sm[k].count_f = sp_params[k].count + sp_params[f].count; sp_gpu_helper_sm[k].b_n.x = b_0 + 0.5 * ((squares_k_x) - ( mu_k_x*mu_k_x/ count_k)); sp_gpu_helper_sm[k].b_n_f.x = b_0 + 0.5 *( (squares_k_x+squares_f_x) - ( (mu_f_x + mu_k_x ) * (mu_f_x + mu_k_x ) / (count_fk))); sp_gpu_helper_sm[k].b_n.y = b_0 + 0.5 * ((squares_k_y) - ( mu_k_y*mu_k_y/ count_k)); sp_gpu_helper_sm[k].b_n_f.y = b_0 + 0.5 *( (squares_k_y+squares_f_y) - ( (mu_f_y + mu_k_y ) * (mu_f_y + mu_k_y ) / (count_fk))); sp_gpu_helper_sm[k].b_n.z = b_0 + 0.5 * ((squares_k_z) - ( mu_k_z*mu_k_z/ count_k)); sp_gpu_helper_sm[k].b_n_f.z = b_0 + 0.5 *( (squares_k_z+squares_f_z) - ( (mu_f_z + mu_k_z ) * (mu_f_z + mu_k_z ) / (count_fk))); if( sp_gpu_helper_sm[k].b_n.x<0) sp_gpu_helper_sm[k].b_n.x = 0.1; if( sp_gpu_helper_sm[k].b_n.y<0) sp_gpu_helper_sm[k].b_n.y = 0.1; if( sp_gpu_helper_sm[k].b_n.z<0) sp_gpu_helper_sm[k].b_n.z = 0.1; if( sp_gpu_helper_sm[k].b_n_f.x<0) sp_gpu_helper_sm[k].b_n_f.x = 0.1; if( sp_gpu_helper_sm[k].b_n_f.y<0) sp_gpu_helper_sm[k].b_n_f.y = 0.1; if( sp_gpu_helper_sm[k].b_n_f.z<0) sp_gpu_helper_sm[k].b_n_f.z = 0.1; } __global__ void calc_bn_split(int* seg, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float b_0, int max_SP) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; // TODO: check if there is no neigh //get the label of neigh int s = k + max_SP; if (s>=nsuperpixel_buffer) return; float count_f = __ldg(&sp_params[k].count); float count_k= __ldg(&sp_gpu_helper_sm[k].count); float count_s = __ldg(&sp_gpu_helper_sm[s].count); if((count_f<1)||( count_k<1)||(count_s<1)) return; float squares_s_x = __ldg(&sp_gpu_helper_sm[s].squares_i.x); float squares_s_y = __ldg(&sp_gpu_helper_sm[s].squares_i.y); float squares_s_z = __ldg(&sp_gpu_helper_sm[s].squares_i.z); float squares_k_x = __ldg(&sp_gpu_helper_sm[k].squares_i.x); float squares_k_y = __ldg(&sp_gpu_helper_sm[k].squares_i.y); float squares_k_z = __ldg(&sp_gpu_helper_sm[k].squares_i.z); float mu_s_x = __ldg(&sp_gpu_helper_sm[s].mu_i_sum.x); float mu_s_y = __ldg(&sp_gpu_helper_sm[s].mu_i_sum.y); float mu_s_z = __ldg(&sp_gpu_helper_sm[s].mu_i_sum.z); float mu_k_x = __ldg(&sp_gpu_helper_sm[k].mu_i_sum.x); float mu_k_y = __ldg(&sp_gpu_helper_sm[k].mu_i_sum.y); float mu_k_z = __ldg(&sp_gpu_helper_sm[k].mu_i_sum.z); float mu_f_x =__ldg(&sp_gpu_helper[k].mu_i_sum.x); float mu_f_y = __ldg(&sp_gpu_helper[k].mu_i_sum.y); float mu_f_z = __ldg(&sp_gpu_helper[k].mu_i_sum.z); sp_gpu_helper_sm[k].b_n.x = b_0 + 0.5 * ((squares_k_x) - ( (mu_k_x*mu_k_x)/ (count_k))); //sp_gpu_helper_sm[k].b_n.x = b_0 + (squares_k_x)+(mu_k_x*mu_k_x)/(count_k*count_k)-2*(mu_k_x*mu_k_x)/(count_k)+(mu_k_x*mu_k_x)/(count_k*count_k); sp_gpu_helper_sm[k].b_n.y = b_0 + 0.5 * ((squares_k_y) - ( mu_k_y*mu_k_y/ count_k)); //sp_gpu_helper_sm[k].b_n.y = b_0 + (squares_k_y)+(mu_k_y*mu_k_y)/(count_k*count_k)-2*(mu_k_y*mu_k_y)/(count_k)+(mu_k_y*mu_k_y)/(count_k*count_k); sp_gpu_helper_sm[k].b_n.z = b_0 + 0.5 * ((squares_k_z) - ( mu_k_z*mu_k_z/ count_k)); // sp_gpu_helper_sm[k].b_n.z = b_0 + (squares_k_z)+(mu_k_z*mu_k_z)/(count_k*count_k)-2*(mu_k_z*mu_k_z)/(count_k)+(mu_k_z*mu_k_z)/(count_k*count_k); sp_gpu_helper_sm[s].b_n.x = b_0 + 0.5 * ((squares_s_x) - ( mu_s_x*mu_s_x/ count_s)); sp_gpu_helper_sm[s].b_n.y = b_0 + 0.5 * ((squares_s_y) - ( mu_s_y*mu_s_y/ count_s)); sp_gpu_helper_sm[s].b_n.z = b_0 + 0.5 * ((squares_s_z) - ( mu_s_z*mu_s_z/ count_s)); /* sp_gpu_helper_sm[s].b_n.x = b_0 + (squares_s_x)+(mu_s_x*mu_s_x)/(count_s*count_s)-2*(mu_s_x*mu_s_x)/(count_s)+(mu_s_x*mu_s_x)/(count_k*count_k); sp_gpu_helper_sm[s].b_n.y = b_0 + (squares_s_y)+(mu_s_y*mu_s_y)/(count_s*count_s)-2*(mu_s_y*mu_s_y)/(count_s)+(mu_s_y*mu_s_y)/(count_k*count_k); sp_gpu_helper_sm[s].b_n.z = b_0 + (squares_s_z)+(mu_s_z*mu_s_z)/(count_s*count_s)-2*(mu_s_z*mu_s_z)/(count_s)+(mu_s_z*mu_s_z)/(count_k*count_k); */ sp_gpu_helper_sm[k].b_n_f.x = b_0 + 0.5 * ((squares_k_x+squares_s_x) - ( mu_f_x*mu_f_x/ count_f)); sp_gpu_helper_sm[k].b_n_f.y = b_0 + 0.5 * ((squares_k_y+squares_s_y) - ( mu_f_y*mu_f_y/ count_f)); sp_gpu_helper_sm[k].b_n_f.z = b_0 + 0.5 * ((squares_k_z+squares_s_z) - ( mu_f_z*mu_f_z/ count_f)); } __global__ void calc_marginal_liklelyhoood_of_sp_split(const float* image_gpu_double, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a_0, float b_0, int max_SP) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int s = k + max_SP; if (s>=nsuperpixel_buffer) return; float count_f = __ldg(&sp_params[k].count); float count_k= __ldg(&sp_gpu_helper_sm[k].count); float count_s = __ldg(&sp_gpu_helper_sm[s].count); if((count_f<1)||( count_k<1)||(count_s<1)) return; if (count_f!=count_k+count_s) return; // TODO: check if there is no neigh // TODO: check if num is the same //get the label //a_0 = 1100*(count_f); float a_n_k = a_0 + float(count_k)/2; float a_n_s = a_0+ float(count_s)/2; float a_n_f = a_0+ float(count_f)/2; float v_n_k = 1/float(count_k); float v_n_s = 1/float(count_s); float v_n_f = 1/float(count_f); /* v_n_k = 1; v_n_f =1; v_n_s=1;*/ float b_n_k_x = __ldg(&sp_gpu_helper_sm[k].b_n.x); float b_n_k_y = __ldg(&sp_gpu_helper_sm[k].b_n.y); float b_n_k_z = __ldg(&sp_gpu_helper_sm[k].b_n.z); float b_n_s_x = __ldg(&sp_gpu_helper_sm[s].b_n.x); float b_n_s_y = __ldg(&sp_gpu_helper_sm[s].b_n.y); float b_n_s_z = __ldg(&sp_gpu_helper_sm[s].b_n.z); float b_n_f_x = __ldg(&sp_gpu_helper_sm[k].b_n_f.x); float b_n_f_y = __ldg(&sp_gpu_helper_sm[k].b_n_f.y); float b_n_f_z = __ldg(&sp_gpu_helper_sm[k].b_n_f.z); a_0 =a_n_k; sp_gpu_helper_sm[k].numerator.x = a_0 * __logf(b_0) + lgammaf(a_n_k)+ 0.5*__logf(v_n_k); sp_gpu_helper_sm[k].denominator.x = a_n_k * __logf (b_n_k_x) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator.y = a_n_k * __logf (b_n_k_y) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator.z = a_n_k * __logf (b_n_k_z) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); a_0 =a_n_s; sp_gpu_helper_sm[s].numerator.x = a_0 * __logf(b_0) + lgammaf(a_n_s)+0.5*__logf(v_n_s); sp_gpu_helper_sm[s].denominator.x = a_n_s * __logf (b_n_s_x) + 0.5 * count_s * __logf (M_PI) + \ count_s * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[s].denominator.y = a_n_s * __logf (b_n_s_y) + 0.5 * count_s * __logf (M_PI) + \ count_s * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[s].denominator.z = a_n_s * __logf (b_n_s_z) + 0.5 * count_s * __logf (M_PI) + \ count_s * __logf (2) + lgammaf(a_0); a_0 =a_n_f; sp_gpu_helper_sm[k].numerator_f.x = a_0 * __logf(b_0) + lgammaf(a_n_f)+0.5*__logf(v_n_f); sp_gpu_helper_sm[k].denominator_f.x = a_n_f * __logf (b_n_f_x) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator_f.y = a_n_f * __logf (b_n_f_y) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); sp_gpu_helper_sm[k].denominator_f.z = a_n_f * __logf (b_n_f_z) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); } __global__ void calc_marginal_liklelyhoood_of_sp(const float* image_gpu_double, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a_0, float b_0) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; // TODO: check if there is no neigh // TODO: check if num is the same //get the label float count_k = __ldg(&sp_params[k].count); float count_f = __ldg(&sp_gpu_helper_sm[k].count_f); //if ((count_k==0)||(count_f==0)) return; //if (sp_params[k].valid == 0) return; //if (f==-1) return; float a_n = a_0 + float(count_k) / 2; float a_n_f = a_0+ float(count_f) / 2; // float v_n = 1 / float(num_pixels_in_sp); float v_n = 1/float(count_k); float v_n_f = 1/float(count_f); //printf("Merge: %f,%d, \n", sp_gpu_helper_sm[k].b_n.x, count_k); a_0 = a_n; sp_gpu_helper_sm[k].numerator.x = a_0 * __logf(b_0) + lgammaf(a_n)+0.5*__logf(v_n); sp_gpu_helper_sm[k].denominator.x = a_n* __logf ( __ldg(&sp_gpu_helper_sm[k].b_n.x)) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); //sp_gpu_helper_sm[k].numerator.y = a_0 * __logf (b_0) + lgammaf(a_0)+0.5*v_n; sp_gpu_helper_sm[k].denominator.y = a_n* __logf ( __ldg(&sp_gpu_helper_sm[k].b_n.y)) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgamma(a_0); //sp_gpu_helper_sm[k].numerator.z = a_0 * __logf(b_0) + lgammaf(a_0)+0.5*v_n; sp_gpu_helper_sm[k].denominator.z = a_n* __logf(__ldg(&sp_gpu_helper_sm[k].b_n.z)) + 0.5 * count_k * __logf (M_PI) + \ count_k * __logf (2) + lgammaf(a_0); a_0 = a_n_f; sp_gpu_helper_sm[k].numerator_f.x = a_0 * __logf (b_0) + lgammaf(a_n_f)+0.5*__logf(v_n_f); sp_gpu_helper_sm[k].denominator_f.x = a_n_f* __logf (__ldg(&sp_gpu_helper_sm[k].b_n_f.x)) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); //sp_gpu_helper_sm[k].numerator_f.y = a_0 * __logf (b_0) + lgammaf(a_0)+0.5*v_n_f; sp_gpu_helper_sm[k].denominator_f.y = a_n_f* __logf (__ldg(&sp_gpu_helper_sm[k].b_n_f.y)) + 0.5 * count_f * __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); //sp_gpu_helper_sm[k].numerator_f.z = a_0 * __logf (b_0) + lgammaf(a_0)+0.5*v_n_f; sp_gpu_helper_sm[k].denominator_f.z = a_n_f* __logf (__ldg(&sp_gpu_helper_sm[k].b_n_f.z)) + 0.5 * count_f* __logf (M_PI) + \ count_f * __logf (2) + lgammaf(a_0); } __global__ void calc_hasting_ratio(const float* image_gpu_double,int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a0, float b0, float alpha_hasting_ratio, int* mutex ) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int f = split_merge_pairs[2*k+1]; if (sp_params[f].valid == 0) return; if(f<=0) return; float count_k = __ldg(&sp_params[k].count); float count_f = __ldg(&sp_gpu_helper_sm[k].count_f); if ((count_k<1)||(count_f<1)) return; sp_gpu_helper_sm[k].merge = false; float num_k = __ldg(&sp_gpu_helper_sm[k].numerator.x); float total_marginal_1 = (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.x)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.y)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.z)); float num_f = __ldg(&sp_gpu_helper_sm[f].numerator.x); float total_marginal_2 = (num_f - __ldg(&sp_gpu_helper_sm[f].denominator.x)) + (num_f - __ldg(&sp_gpu_helper_sm[f].denominator.y)) + (num_f - __ldg(&sp_gpu_helper_sm[f].denominator.z)); float num_kf = __ldg(&sp_gpu_helper_sm[k].numerator_f.x); float total_marginal_f = (num_kf - __ldg(&sp_gpu_helper_sm[k].denominator_f.x)) + (num_kf - __ldg(&sp_gpu_helper_sm[k].denominator_f.y)) + (num_kf - __ldg(&sp_gpu_helper_sm[k].denominator_f.z)); float log_nominator = lgammaf(count_f) + total_marginal_f + lgammaf(alpha_hasting_ratio) + lgammaf(alpha_hasting_ratio / 2 + count_k) + lgammaf(alpha_hasting_ratio / 2 + count_f - count_k); float log_denominator = __logf(alpha_hasting_ratio) + lgammaf(count_k) + lgammaf(count_f - count_k) + total_marginal_1 + total_marginal_2 + lgammaf(alpha_hasting_ratio + count_f) + lgammaf(alpha_hasting_ratio / 2) + lgammaf(alpha_hasting_ratio / 2); log_denominator = __logf(alpha_hasting_ratio) + total_marginal_1 + total_marginal_2; log_nominator = total_marginal_f ; sp_gpu_helper_sm[k].hasting = log_nominator - log_denominator; return; } __global__ void calc_hasting_ratio2(const float* image_gpu_double,int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a0, float b0, float alpha_hasting_ratio, int* mutex ) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int f = split_merge_pairs[2*k+1]; if (sp_params[f].valid == 0) return; if(f<=0) return; if((sp_gpu_helper_sm[k].hasting ) > -2) { //printf("Want to merge k: %d, f: %d, splitmerge k %d, splitmerge f %d, %d\n", k, f, split_merge_pairs[2*k], split_merge_pairs[2*f], split_merge_pairs[2*f+1] ); if( k > atomicMax(&split_merge_pairs[2*f],k)) { //printf("Merge: %f \n",sp_gpu_helper_sm[k].hasting ); sp_gpu_helper_sm[k].merge = true; } } return; } __global__ void calc_hasting_ratio_split(const float* image_gpu_double,int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper* sp_gpu_helper, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int nsuperpixel_buffer, float a0, float b0, float alpha_hasting_ratio, int* mutex, int max_SP, int* max_sp ) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; if (sp_params[k].valid == 0) return; int s = k + max_SP; if(s>=nsuperpixel_buffer) return; float count_f = __ldg(&sp_params[k].count); float count_k= __ldg(&sp_gpu_helper_sm[k].count); float count_s = __ldg(&sp_gpu_helper_sm[s].count); if((count_f<1)||( count_k<1)||(count_s<1)) return; float num_k = __ldg(&sp_gpu_helper_sm[k].numerator.x); float num_s = __ldg(&sp_gpu_helper_sm[s].numerator.x); float num_f = __ldg(&sp_gpu_helper_sm[k].numerator_f.x); float total_marginal_k = (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.x)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.y)) + (num_k - __ldg(&sp_gpu_helper_sm[k].denominator.z)); float total_marginal_s = (num_s - __ldg(&sp_gpu_helper_sm[s].denominator.x)) + (num_s - __ldg(&sp_gpu_helper_sm[s].denominator.y)) + (num_s - __ldg(&sp_gpu_helper_sm[s].denominator.z)); float total_marginal_f = (num_f - __ldg(&sp_gpu_helper_sm[k].denominator_f.x)) + (num_f - __ldg(&sp_gpu_helper_sm[k].denominator_f.y)) + (num_f - __ldg(&sp_gpu_helper_sm[k].denominator_f.z)); //printf("hasating:x k: %d, count: %f, den: %f, %f, %f, b_n: %f, %f, %f, num: %f \n",k, count_k, sp_gpu_helper_sm[k].denominator.x, sp_gpu_helper_sm[k].denominator.y, sp_gpu_helper_sm[k].denominator.z, __logf (sp_gpu_helper_sm[k].b_n.x) , __logf (sp_gpu_helper_sm[k].b_n.y), __logf (sp_gpu_helper_sm[k].b_n.z), sp_gpu_helper_sm[k].numerator.x); float log_nominator = __logf(alpha_hasting_ratio)+ lgammaf(count_k)+ total_marginal_k + lgammaf(count_s) + total_marginal_s ; log_nominator = total_marginal_k + total_marginal_s ; float log_denominator = lgammaf(count_f) + total_marginal_f; log_denominator =total_marginal_f; sp_gpu_helper_sm[k].hasting = log_nominator - log_denominator; sp_gpu_helper_sm[k].merge = (sp_gpu_helper_sm[k].hasting > -2); sp_gpu_helper_sm[s].merge = (sp_gpu_helper_sm[k].hasting > -2); if((sp_gpu_helper_sm[k].merge)) { s = atomicAdd(max_sp,1) +1; split_merge_pairs[2*k] = s; //atomicMax(max_sp,s); sp_params[k].prior_count/=2; sp_params[s].prior_count= sp_params[k].prior_count; //#check why. } } __global__ void merge_sp(int* seg, bool* border, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; int k = seg[idx]; // center //if (sp_params[k].valid == 0) return; int f = split_merge_pairs[2*k+1]; if(sp_gpu_helper_sm[k].remove) seg[idx] = f; return; } __global__ void split_sp(int* seg, int* seg_split1, int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nPixels, const int xdim, const int ydim,int max_SP){ int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx>=nPixels) return; int k = seg[idx]; // center int k2 = k + max_SP; if ((sp_gpu_helper_sm[k].merge == false)||sp_gpu_helper_sm[k2].merge == false ) return; if(seg_split1[idx]==k2) seg[idx] = split_merge_pairs[2*k]; //seg[idx] = seg_split1[idx]; //printf("Add the following: %d - %d'\n", k,split_merge_pairs[2*k]); sp_params[split_merge_pairs[2*k]].valid = 1; return; } __global__ void remove_sp(int* split_merge_pairs, superpixel_params* sp_params, superpixel_GPU_helper_sm* sp_gpu_helper_sm, const int nsuperpixel_buffer) { // getting the index of the pixel int k = threadIdx.x + blockIdx.x * blockDim.x; // the label if (k>=nsuperpixel_buffer) return; int f = split_merge_pairs[2*k+1]; if ((sp_params[k].valid == 0)||(sp_params[f].valid == 0)) return; if(f<=0) return; if ((sp_gpu_helper_sm[k].merge == true) && (sp_gpu_helper_sm[f].merge == false) && (split_merge_pairs[2*f]==k) ) { sp_gpu_helper_sm[k].remove=true; sp_params[k].valid =0; sp_params[f].prior_count =sp_params[k].prior_count+sp_params[f].prior_count; } else { sp_gpu_helper_sm[k].remove=false; } return; }
9d9c72e7975c25aa0642da638b2001146587ee41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudakernel/memory/pad.h" #include "cudakernel/common/divmod_fast.h" #include "cudakernel/common/memory_utils.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include <hip/hip_fp16.h> template <typename T> __global__ void ppl_cukernel_pad( int64_t num_elems, int num_dims, PadKernelParam param, GArray<int64_t> input_dims, GArray<int64_t> input_strides, const T* input, const int64_t* pads, GArray<DivModFast> output_strides_fast, T* output) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; bool use_pad_value = false; int64_t input_offset = 0; int out_idx, remain = index; for (int it = 0; (it < num_dims) && !use_pad_value; ++it) { output_strides_fast[it].divmod(remain, out_idx, remain); int64_t start_pad_val = pads[it]; int in_idx = 0; if (out_idx < start_pad_val) { switch (param.mode) { case 0: // constant use_pad_value = true; break; case 1: // reflect in_idx = start_pad_val - out_idx; break; case 2: // edge in_idx = 0; break; } } else if (out_idx >= start_pad_val + input_dims[it]) { switch (param.mode) { case 0: // constant use_pad_value = true; break; case 1: // reflect in_idx = input_dims[it] - 2 - (out_idx - (start_pad_val + input_dims[it])); break; case 2: // edge in_idx = input_dims[it] - 1; break; } } else { in_idx = out_idx - start_pad_val; } input_offset += in_idx * input_strides[it]; } output[index] = use_pad_value ? (T)param.constant_value : input[input_offset]; } ppl::common::RetCode PPLCUDAPadForwardImp( hipStream_t stream, PadKernelParam param, ppl::nn::TensorShape* input_shape, const void* input, ppl::nn::TensorShape* pads_shape, const int64_t* pads, ppl::nn::TensorShape* output_shape, void* output) { int block_size = 256; uint64_t num_elems = output_shape->GetElementsIncludingPadding(); int grid_size = (num_elems + block_size - 1) / block_size; int num_dims = output_shape->GetDimCount(); GArray<int64_t> input_dims(num_dims); GArray<int64_t> input_strides(num_dims); GArray<DivModFast> output_strides_fast(num_dims); int64_t acc_output_stride = 1; int64_t acc_input_stride = 1; for (int it = num_dims - 1; it >= 0; --it) { input_dims[it] = input_shape->GetDim(it); input_strides[it] = acc_input_stride; output_strides_fast[it] = DivModFast(acc_output_stride); acc_input_stride *= input_shape->GetDim(it); acc_output_stride *= output_shape->GetDim(it); } switch (input_shape->GetDataType()) { case ppl::common::DATATYPE_FLOAT16: { hipLaunchKernelGGL(( ppl_cukernel_pad), dim3(grid_size), dim3(block_size), 0, stream, num_elems, num_dims, param, input_dims, input_strides, (const half*)input, pads, output_strides_fast, (half*)output); return ppl::common::RC_SUCCESS; } case ppl::common::DATATYPE_FLOAT32: { hipLaunchKernelGGL(( ppl_cukernel_pad), dim3(grid_size), dim3(block_size), 0, stream, num_elems, num_dims, param, input_dims, input_strides, (const float*)input, pads, output_strides_fast, (float*)output); return ppl::common::RC_SUCCESS; } default: return ppl::common::RC_UNSUPPORTED; } }
9d9c72e7975c25aa0642da638b2001146587ee41.cu
#include "cudakernel/memory/pad.h" #include "cudakernel/common/divmod_fast.h" #include "cudakernel/common/memory_utils.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include <cuda_fp16.h> template <typename T> __global__ void ppl_cukernel_pad( int64_t num_elems, int num_dims, PadKernelParam param, GArray<int64_t> input_dims, GArray<int64_t> input_strides, const T* input, const int64_t* pads, GArray<DivModFast> output_strides_fast, T* output) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; bool use_pad_value = false; int64_t input_offset = 0; int out_idx, remain = index; for (int it = 0; (it < num_dims) && !use_pad_value; ++it) { output_strides_fast[it].divmod(remain, out_idx, remain); int64_t start_pad_val = pads[it]; int in_idx = 0; if (out_idx < start_pad_val) { switch (param.mode) { case 0: // constant use_pad_value = true; break; case 1: // reflect in_idx = start_pad_val - out_idx; break; case 2: // edge in_idx = 0; break; } } else if (out_idx >= start_pad_val + input_dims[it]) { switch (param.mode) { case 0: // constant use_pad_value = true; break; case 1: // reflect in_idx = input_dims[it] - 2 - (out_idx - (start_pad_val + input_dims[it])); break; case 2: // edge in_idx = input_dims[it] - 1; break; } } else { in_idx = out_idx - start_pad_val; } input_offset += in_idx * input_strides[it]; } output[index] = use_pad_value ? (T)param.constant_value : input[input_offset]; } ppl::common::RetCode PPLCUDAPadForwardImp( cudaStream_t stream, PadKernelParam param, ppl::nn::TensorShape* input_shape, const void* input, ppl::nn::TensorShape* pads_shape, const int64_t* pads, ppl::nn::TensorShape* output_shape, void* output) { int block_size = 256; uint64_t num_elems = output_shape->GetElementsIncludingPadding(); int grid_size = (num_elems + block_size - 1) / block_size; int num_dims = output_shape->GetDimCount(); GArray<int64_t> input_dims(num_dims); GArray<int64_t> input_strides(num_dims); GArray<DivModFast> output_strides_fast(num_dims); int64_t acc_output_stride = 1; int64_t acc_input_stride = 1; for (int it = num_dims - 1; it >= 0; --it) { input_dims[it] = input_shape->GetDim(it); input_strides[it] = acc_input_stride; output_strides_fast[it] = DivModFast(acc_output_stride); acc_input_stride *= input_shape->GetDim(it); acc_output_stride *= output_shape->GetDim(it); } switch (input_shape->GetDataType()) { case ppl::common::DATATYPE_FLOAT16: { ppl_cukernel_pad<<<grid_size, block_size, 0, stream>>>( num_elems, num_dims, param, input_dims, input_strides, (const half*)input, pads, output_strides_fast, (half*)output); return ppl::common::RC_SUCCESS; } case ppl::common::DATATYPE_FLOAT32: { ppl_cukernel_pad<<<grid_size, block_size, 0, stream>>>( num_elems, num_dims, param, input_dims, input_strides, (const float*)input, pads, output_strides_fast, (float*)output); return ppl::common::RC_SUCCESS; } default: return ppl::common::RC_UNSUPPORTED; } }
9038377c629036eeadba06211d70ac14482338d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef EVOLUTION_AUX_H #define EVOLUTION_AUX_H #include "evolutionUtils.h" #include "cudaMath.h" #include "coriolisUtils.h" #ifdef printf #undef printf #endif static __global__ void _print_constant_memory_() { printf(" %f %f %f %d\n", r1_dev.left, r1_dev.dr, r1_dev.mass, r1_dev.n); printf(" %f %f %f %d\n", r2_dev.left, r2_dev.dr, r2_dev.mass, r2_dev.n); for(int i = 0; i < 500; i+=10) printf("%d %18.15f %18.15f\n", i+1, r1_dev.dump[i], r2_dev.dump[i]); } static __global__ void _print_gradient_coeffients_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15e\n", i, gradient_coeffients_dev[i]); } static __global__ void _print_energies_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15f\n", i, energies_dev[i]); } static __global__ void _psi_times_kinetic_energy_(Complex *psi_out, const Complex *psi_in, const int n1, const int n2, const int n_theta) { extern __shared__ double kinetic_data[]; double *kin1 = (double *) kinetic_data; double *kin2 = &kin1[n1/2+1]; cudaMath::setup_kinetic_energy_for_fft_nonnegative(kin1, r1_dev.n, r1_dev.n*r1_dev.dr, r1_dev.mass); cudaMath::setup_kinetic_energy_for_fft(kin2, r2_dev.n, r2_dev.n*r2_dev.dr, r2_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1/2+1, n2, n_theta, i, j, k); #if 0 const double e = kin1[i] + kin2[j]; if(e <= kinetic_cutoff) { psi_out[index] = e*psi_in[index]; } else { psi_out[index].zero(); } #endif psi_out[index] = (kin1[i] + kin2[j])*psi_in[index]; } } static __global__ void _add_T_radial_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *TRadPsi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*2*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, (n1/2+1)*2, n2, n_theta, i, j, k); if(i < n1) { const int index2 = cudaUtils::ijk_2_index(n1, n2, n_theta, i, j, k); HPsi[index2] += TRadPsi[index]/(n1*n2); } } } static __global__ void _add_potential_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *psi, const double *pot, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n && pot[index] <= potential_cutoff) HPsi[index] += pot[index]*psi[index]; } static __global__ void _add_T_bend_T_sym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int omega, const int a, const int b) { extern __shared__ double rotational_moments[]; double *I1 = rotational_moments; double *I2 = &I1[n1]; double &Tsym = I2[n2]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); cudaMath::setup_moments_of_inertia(I2, r2_dev.n, r2_dev.left, r2_dev.dr, r2_dev.mass); if(threadIdx.x == 0) Tsym = double(J*(J+1) - 2*omega*omega); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); // l += omega; l = a*l + b; const double e = (I1[i]+I2[j])*l*(l+1) + I1[i]*Tsym; if(e <= potential_cutoff) TangPsi[index] += e*psi[index]; //TangPsi[index] += ((I1[i]+I2[j])*l*(l+1) + I1[i]*Tsym)*psi[index]; } } static __global__ void _add_T_asym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int Omega, const int Omega1, const int OmegaMax, const int a, const int b) { extern __shared__ double I1[]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); //l += OmegaMax; l = a*l + b; const double c = coriolisUtils::coriolis(J, l, Omega, Omega1); const double e = I1[i]*c; if(e <= potential_cutoff) TangPsi[index] += e*psi[index]; //TangPsi[index] += I1[i]*c*psi[index]; } } static __global__ void _dump_wavepacket_(double *psi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1, n2, n_theta, i, j, k); psi[index] *= r1_dev.dump[i]*r2_dev.dump[j]; } } static __global__ void _daxpy_(double *y, const double *x, const double alpha, const double beta, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) y[index] = alpha*x[index] + beta*y[index]; } #if 0 static __global__ void _setup_potential_scale_(int *scale, const double *pot_dev, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) scale[index] = pot_dev[index] < cutoff ? 1 : 0; } static __global__ void _scale_wavepacket_with_potential_cutoff_(double *psi, const double *potential, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n && potential[index] > cutoff) psi[index] = 0.0; } #endif static __global__ void _psi_time_to_fai_energy_on_dividing_surface_ (const int n, const int n_energies, const double t, const double dt, const double *psi_real_dev, const double *psi_imag_dev, const double *d_psi_real_dev, const double *d_psi_imag_dev, Complex *fai_dev, Complex *d_fai_dev) { extern __shared__ Complex expIEtDt[]; for(int i = threadIdx.x; i < n_energies; i += blockDim.x) expIEtDt[i] = exp(Complex(0.0, t*energies_dev[i]))*dt; __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n*n_energies) { int i = -1; int iE = -1; cudaUtils::index_2_ij(index, n, n_energies, i, iE); fai_dev[index] += expIEtDt[iE]*Complex(psi_real_dev[i], psi_imag_dev[i]); d_fai_dev[index] += expIEtDt[iE]*Complex(d_psi_real_dev[i], d_psi_imag_dev[i]); } } #endif /* EVOLUTION_AUX_H */
9038377c629036eeadba06211d70ac14482338d2.cu
#ifndef EVOLUTION_AUX_H #define EVOLUTION_AUX_H #include "evolutionUtils.h" #include "cudaMath.h" #include "coriolisUtils.h" #ifdef printf #undef printf #endif static __global__ void _print_constant_memory_() { printf(" %f %f %f %d\n", r1_dev.left, r1_dev.dr, r1_dev.mass, r1_dev.n); printf(" %f %f %f %d\n", r2_dev.left, r2_dev.dr, r2_dev.mass, r2_dev.n); for(int i = 0; i < 500; i+=10) printf("%d %18.15f %18.15f\n", i+1, r1_dev.dump[i], r2_dev.dump[i]); } static __global__ void _print_gradient_coeffients_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15e\n", i, gradient_coeffients_dev[i]); } static __global__ void _print_energies_(const int n) { for(int i = 0; i < n; i++) printf(" %d %18.15f\n", i, energies_dev[i]); } static __global__ void _psi_times_kinetic_energy_(Complex *psi_out, const Complex *psi_in, const int n1, const int n2, const int n_theta) { extern __shared__ double kinetic_data[]; double *kin1 = (double *) kinetic_data; double *kin2 = &kin1[n1/2+1]; cudaMath::setup_kinetic_energy_for_fft_nonnegative(kin1, r1_dev.n, r1_dev.n*r1_dev.dr, r1_dev.mass); cudaMath::setup_kinetic_energy_for_fft(kin2, r2_dev.n, r2_dev.n*r2_dev.dr, r2_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1/2+1, n2, n_theta, i, j, k); #if 0 const double e = kin1[i] + kin2[j]; if(e <= kinetic_cutoff) { psi_out[index] = e*psi_in[index]; } else { psi_out[index].zero(); } #endif psi_out[index] = (kin1[i] + kin2[j])*psi_in[index]; } } static __global__ void _add_T_radial_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *TRadPsi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < (n1/2+1)*2*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, (n1/2+1)*2, n2, n_theta, i, j, k); if(i < n1) { const int index2 = cudaUtils::ijk_2_index(n1, n2, n_theta, i, j, k); HPsi[index2] += TRadPsi[index]/(n1*n2); } } } static __global__ void _add_potential_weighted_psi_to_H_weighted_psi_(double *HPsi, const double *psi, const double *pot, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n && pot[index] <= potential_cutoff) HPsi[index] += pot[index]*psi[index]; } static __global__ void _add_T_bend_T_sym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int omega, const int a, const int b) { extern __shared__ double rotational_moments[]; double *I1 = rotational_moments; double *I2 = &I1[n1]; double &Tsym = I2[n2]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); cudaMath::setup_moments_of_inertia(I2, r2_dev.n, r2_dev.left, r2_dev.dr, r2_dev.mass); if(threadIdx.x == 0) Tsym = double(J*(J+1) - 2*omega*omega); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); // l += omega; l = a*l + b; const double e = (I1[i]+I2[j])*l*(l+1) + I1[i]*Tsym; if(e <= potential_cutoff) TangPsi[index] += e*psi[index]; //TangPsi[index] += ((I1[i]+I2[j])*l*(l+1) + I1[i]*Tsym)*psi[index]; } } static __global__ void _add_T_asym_to_T_angle_legendre_psi_dev_(double *TangPsi, const double *psi, const int n1, const int n2, const int nLegs, const int J, const int Omega, const int Omega1, const int OmegaMax, const int a, const int b) { extern __shared__ double I1[]; cudaMath::setup_moments_of_inertia(I1, r1_dev.n, r1_dev.left, r1_dev.dr, r1_dev.mass); __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*nLegs) { int i = -1; int j = -1; int l = -1; cudaUtils::index_2_ijk(index, n1, n2, nLegs, i, j, l); //l += OmegaMax; l = a*l + b; const double c = coriolisUtils::coriolis(J, l, Omega, Omega1); const double e = I1[i]*c; if(e <= potential_cutoff) TangPsi[index] += e*psi[index]; //TangPsi[index] += I1[i]*c*psi[index]; } } static __global__ void _dump_wavepacket_(double *psi, const int n1, const int n2, const int n_theta) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n1*n2*n_theta) { int i = -1; int j = -1; int k = -1; cudaUtils::index_2_ijk(index, n1, n2, n_theta, i, j, k); psi[index] *= r1_dev.dump[i]*r2_dev.dump[j]; } } static __global__ void _daxpy_(double *y, const double *x, const double alpha, const double beta, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) y[index] = alpha*x[index] + beta*y[index]; } #if 0 static __global__ void _setup_potential_scale_(int *scale, const double *pot_dev, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n) scale[index] = pot_dev[index] < cutoff ? 1 : 0; } static __global__ void _scale_wavepacket_with_potential_cutoff_(double *psi, const double *potential, const double cutoff, const int n) { const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n && potential[index] > cutoff) psi[index] = 0.0; } #endif static __global__ void _psi_time_to_fai_energy_on_dividing_surface_ (const int n, const int n_energies, const double t, const double dt, const double *psi_real_dev, const double *psi_imag_dev, const double *d_psi_real_dev, const double *d_psi_imag_dev, Complex *fai_dev, Complex *d_fai_dev) { extern __shared__ Complex expIEtDt[]; for(int i = threadIdx.x; i < n_energies; i += blockDim.x) expIEtDt[i] = exp(Complex(0.0, t*energies_dev[i]))*dt; __syncthreads(); const int index = threadIdx.x + blockDim.x*blockIdx.x; if(index < n*n_energies) { int i = -1; int iE = -1; cudaUtils::index_2_ij(index, n, n_energies, i, iE); fai_dev[index] += expIEtDt[iE]*Complex(psi_real_dev[i], psi_imag_dev[i]); d_fai_dev[index] += expIEtDt[iE]*Complex(d_psi_real_dev[i], d_psi_imag_dev[i]); } } #endif /* EVOLUTION_AUX_H */
493b6fc5d8603637e68c69a9f12d91afa7ece82a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <assert.h> #define threshold 5 //(50% probability) #define block_size 256 __global__ void calculation( char* dev_a, char* dev_b, char* dev_c, int num_matrices, int matrix_size ) { // Each thread handles a matrix int k = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (k >= num_matrices) return; // If first element is different than 0 do the computation if (dev_a[k*matrix_size*matrix_size] != 0){ for (int j = 0; j < matrix_size; j++){ //If first value in the row of the matrix, do addition if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] + dev_b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] - dev_b[index]; } } } } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments (we only receive command + vector size) if (argc != 3) { // Tell the user how to run the program printf ("Usage:\n%s <number of matrices> <matrix_size>\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return -1; } srand ( time(NULL) ); // Set variables with input arguments int num_matrices = atoi(argv[1]); int matrix_size = atoi(argv[2]); // Set device that we will use for our cuda code hipSetDevice(0); // Time Variables hipEvent_t stp_start, stp_stop; hipEvent_t cpu_start, cpu_stop; hipEvent_t gpu_start, gpu_stop; hipEvent_t ker_start, ker_stop; hipEventCreate (&stp_start); hipEventCreate (&stp_stop); hipEventCreate (&cpu_start); hipEventCreate (&cpu_stop); hipEventCreate (&gpu_start); hipEventCreate (&gpu_stop); hipEventCreate (&ker_start); hipEventCreate (&ker_stop); float time, ker_time; // Input Arrays and variables char *a ;// = new char [num_matrices*matrix_size*matrix_size]; //checkCuda( hipHostMalloc((void**)&a, num_matrices*matrix_size*matrix_size);// ); // host pinned char *b;// = new char [num_matrices*matrix_size*matrix_size]; //checkCuda( hipHostMalloc((void**)&b, num_matrices*matrix_size*matrix_size);//); char *c_cpu = new char [num_matrices*matrix_size*matrix_size]; char *c_gpu = new char [num_matrices*matrix_size*matrix_size]; // Pointers in GPU memory char *dev_a; char *dev_b; char *dev_c; // // Fill arrays ////////////////// hipEventRecord(stp_start,0); #if defined(_OPENMP) printf("Setting up input arrays in parallel.\n"); omp_set_num_threads(8); #else printf("Setting up input arrays.\n"); #endif #pragma omp parallel for for (int k = 0; k < num_matrices; k++) { #if defined(_OPENMP) if (k == 0) printf ("Using %d threads.\n", omp_get_num_threads()); #endif for (int j = 0; j < matrix_size*matrix_size; j++){ a[k*matrix_size*matrix_size + j] = j%9+1; b[k*matrix_size*matrix_size + j] = j%10; c_cpu[k*matrix_size*matrix_size + j] = 0; c_gpu[k*matrix_size*matrix_size + j] = 0; } } hipEventRecord(stp_stop,0); hipEventSynchronize(stp_stop); hipEventElapsedTime(&time, stp_start, stp_stop); printf("\tSetup Time: %.2f ms\n", time); // // CPU Calculation ////////////////// printf("Running sequential job.\n"); hipEventRecord(cpu_start,0); // Calculate C in the CPU for (int k = 0; k < num_matrices; k++) { // If first element is different than 0 do the computation if (a[k*matrix_size*matrix_size] != 0){ for (int j = 0; j < matrix_size; j++){ //If first value in the row of the matrix, do addition if (a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] + b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] - b[index]; } } } } } hipEventRecord(cpu_stop,0); hipEventSynchronize(cpu_stop); hipEventElapsedTime(&time, cpu_start, cpu_stop); printf("\tSequential Job Time: %.2f ms\n", time); // // GPU Calculation ////////////////// printf("Running parallel job.\n"); int grid_size = ((num_matrices-1)/block_size) + 1; //checkCuda( hipHostMalloc((void**)&h_aPinned, bytes) ); // host pinned //checkCuda( hipHostMalloc((void**)&h_bPinned, bytes) ); // host pinned hipEventRecord(gpu_start,0); // allocate the memory on the GPU hipMalloc( (void**)&dev_a, num_matrices * matrix_size * matrix_size * sizeof(char) ); hipMalloc( (void**)&dev_b, num_matrices * matrix_size * matrix_size * sizeof(char) ); hipMalloc( (void**)&dev_c, num_matrices * matrix_size * matrix_size * sizeof(char) ); // set arrays to 0 hipMemset(dev_a, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); hipMemset(dev_b, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); hipMemset(dev_c, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); // copy the 'data' to the GPU hipMemcpy( dev_a, a, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyHostToDevice ); // run kernel hipEventRecord(ker_start,0); hipLaunchKernelGGL(( calculation), dim3(grid_size),dim3(block_size), 0, 0, dev_a, dev_b, dev_c, num_matrices, matrix_size ); hipEventRecord(ker_stop,0); // copy the array 'c' back from the GPU to the CPU hipMemcpy( c_gpu, dev_c, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyDeviceToHost ); hipEventRecord(gpu_stop,0); hipEventSynchronize(gpu_stop); hipEventElapsedTime(&time , gpu_start, gpu_stop); hipEventElapsedTime(&ker_time, ker_start, ker_stop); printf("\tParallel Job Time: %.2f ms\n", time); printf("\tKernel Exec. Time: %.2f ms\n", ker_time); // // Compare Results ////////////////// int error = 0; for (int i = 0; i < num_matrices * matrix_size * matrix_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // // Free resources ////////////////// // free the memory allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); // free cuda events hipEventDestroy (cpu_start); hipEventDestroy (gpu_start); hipEventDestroy (ker_start); hipEventDestroy (cpu_stop); hipEventDestroy (gpu_stop); hipEventDestroy (ker_stop); // free CPU memory //free(a); //free(b); hipHostFree(a); hipHostFree(b); free(c_cpu); free(c_gpu); return 0; }
493b6fc5d8603637e68c69a9f12d91afa7ece82a.cu
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <assert.h> #define threshold 5 //(50% probability) #define block_size 256 __global__ void calculation( char* dev_a, char* dev_b, char* dev_c, int num_matrices, int matrix_size ) { // Each thread handles a matrix int k = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (k >= num_matrices) return; // If first element is different than 0 do the computation if (dev_a[k*matrix_size*matrix_size] != 0){ for (int j = 0; j < matrix_size; j++){ //If first value in the row of the matrix, do addition if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] + dev_b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] - dev_b[index]; } } } } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments (we only receive command + vector size) if (argc != 3) { // Tell the user how to run the program printf ("Usage:\n%s <number of matrices> <matrix_size>\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return -1; } srand ( time(NULL) ); // Set variables with input arguments int num_matrices = atoi(argv[1]); int matrix_size = atoi(argv[2]); // Set device that we will use for our cuda code cudaSetDevice(0); // Time Variables cudaEvent_t stp_start, stp_stop; cudaEvent_t cpu_start, cpu_stop; cudaEvent_t gpu_start, gpu_stop; cudaEvent_t ker_start, ker_stop; cudaEventCreate (&stp_start); cudaEventCreate (&stp_stop); cudaEventCreate (&cpu_start); cudaEventCreate (&cpu_stop); cudaEventCreate (&gpu_start); cudaEventCreate (&gpu_stop); cudaEventCreate (&ker_start); cudaEventCreate (&ker_stop); float time, ker_time; // Input Arrays and variables char *a ;// = new char [num_matrices*matrix_size*matrix_size]; //checkCuda( cudaMallocHost((void**)&a, num_matrices*matrix_size*matrix_size);// ); // host pinned char *b;// = new char [num_matrices*matrix_size*matrix_size]; //checkCuda( cudaMallocHost((void**)&b, num_matrices*matrix_size*matrix_size);//); char *c_cpu = new char [num_matrices*matrix_size*matrix_size]; char *c_gpu = new char [num_matrices*matrix_size*matrix_size]; // Pointers in GPU memory char *dev_a; char *dev_b; char *dev_c; // // Fill arrays ////////////////// cudaEventRecord(stp_start,0); #if defined(_OPENMP) printf("Setting up input arrays in parallel.\n"); omp_set_num_threads(8); #else printf("Setting up input arrays.\n"); #endif #pragma omp parallel for for (int k = 0; k < num_matrices; k++) { #if defined(_OPENMP) if (k == 0) printf ("Using %d threads.\n", omp_get_num_threads()); #endif for (int j = 0; j < matrix_size*matrix_size; j++){ a[k*matrix_size*matrix_size + j] = j%9+1; b[k*matrix_size*matrix_size + j] = j%10; c_cpu[k*matrix_size*matrix_size + j] = 0; c_gpu[k*matrix_size*matrix_size + j] = 0; } } cudaEventRecord(stp_stop,0); cudaEventSynchronize(stp_stop); cudaEventElapsedTime(&time, stp_start, stp_stop); printf("\tSetup Time: %.2f ms\n", time); // // CPU Calculation ////////////////// printf("Running sequential job.\n"); cudaEventRecord(cpu_start,0); // Calculate C in the CPU for (int k = 0; k < num_matrices; k++) { // If first element is different than 0 do the computation if (a[k*matrix_size*matrix_size] != 0){ for (int j = 0; j < matrix_size; j++){ //If first value in the row of the matrix, do addition if (a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] + b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] - b[index]; } } } } } cudaEventRecord(cpu_stop,0); cudaEventSynchronize(cpu_stop); cudaEventElapsedTime(&time, cpu_start, cpu_stop); printf("\tSequential Job Time: %.2f ms\n", time); // // GPU Calculation ////////////////// printf("Running parallel job.\n"); int grid_size = ((num_matrices-1)/block_size) + 1; //checkCuda( cudaMallocHost((void**)&h_aPinned, bytes) ); // host pinned //checkCuda( cudaMallocHost((void**)&h_bPinned, bytes) ); // host pinned cudaEventRecord(gpu_start,0); // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, num_matrices * matrix_size * matrix_size * sizeof(char) ); cudaMalloc( (void**)&dev_b, num_matrices * matrix_size * matrix_size * sizeof(char) ); cudaMalloc( (void**)&dev_c, num_matrices * matrix_size * matrix_size * sizeof(char) ); // set arrays to 0 cudaMemset(dev_a, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); cudaMemset(dev_b, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); cudaMemset(dev_c, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); // copy the 'data' to the GPU cudaMemcpy( dev_a, a, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyHostToDevice ); // run kernel cudaEventRecord(ker_start,0); calculation<<<grid_size,block_size>>>( dev_a, dev_b, dev_c, num_matrices, matrix_size ); cudaEventRecord(ker_stop,0); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( c_gpu, dev_c, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyDeviceToHost ); cudaEventRecord(gpu_stop,0); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&time , gpu_start, gpu_stop); cudaEventElapsedTime(&ker_time, ker_start, ker_stop); printf("\tParallel Job Time: %.2f ms\n", time); printf("\tKernel Exec. Time: %.2f ms\n", ker_time); // // Compare Results ////////////////// int error = 0; for (int i = 0; i < num_matrices * matrix_size * matrix_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // // Free resources ////////////////// // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); // free cuda events cudaEventDestroy (cpu_start); cudaEventDestroy (gpu_start); cudaEventDestroy (ker_start); cudaEventDestroy (cpu_stop); cudaEventDestroy (gpu_stop); cudaEventDestroy (ker_stop); // free CPU memory //free(a); //free(b); cudaFreeHost(a); cudaFreeHost(b); free(c_cpu); free(c_gpu); return 0; }
b0b92ede1d3dbd4569f1fbdf66ed72178f5ddaef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <add_cuda.h> namespace zjx{ __global__ void addwithcuda(int* x, int* y,int *z){ *z = *x + *y; } int add_ceshi(int m, int n){ int *x=0; int *y=0; int *z=0; hipMalloc((void**)&x,sizeof(int)); hipMalloc((void**)&y,sizeof(int)); hipMalloc((void**)&z,sizeof(int)); hipMemcpy(x, &m, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(y, &n, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( addwithcuda), dim3(1), dim3(1), 0, 0, x,y,z); int *result=(int* )malloc(sizeof(int)); hipMemcpy(result, z, sizeof(int), hipMemcpyDeviceToHost); hipFree(x); hipFree(y); hipFree(z); return *result; } }
b0b92ede1d3dbd4569f1fbdf66ed72178f5ddaef.cu
#include <add_cuda.h> namespace zjx{ __global__ void addwithcuda(int* x, int* y,int *z){ *z = *x + *y; } int add_ceshi(int m, int n){ int *x=0; int *y=0; int *z=0; cudaMalloc((void**)&x,sizeof(int)); cudaMalloc((void**)&y,sizeof(int)); cudaMalloc((void**)&z,sizeof(int)); cudaMemcpy(x, &m, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(y, &n, sizeof(int), cudaMemcpyHostToDevice); addwithcuda<<<1, 1>>>(x,y,z); int *result=(int* )malloc(sizeof(int)); cudaMemcpy(result, z, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(x); cudaFree(y); cudaFree(z); return *result; } }
c4a88ba5108cb97facef58e08570a812424c23ee.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/case.hpp> #include <cudf/utilities/error.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <strings/char_types/is_flags.h> #include <strings/utilities.hpp> #include <strings/utilities.cuh> namespace cudf { namespace strings { namespace detail { namespace { // anonym. //execute string wrap: // struct execute_wrap { execute_wrap(column_device_view const d_column, int32_t const* d_offsets, char* d_chars, size_type width): d_column_(d_column), d_offsets_(d_offsets), d_chars_(d_chars), width_(width) { } __device__ int32_t operator()(size_type idx) { if( d_column_.is_null(idx) ) return 0; // null string string_view d_str = d_column_.template element<string_view>(idx); char* d_buffer = d_chars_ + d_offsets_[idx]; int charOffsetToLastSpace = -1; int byteOffsetToLastSpace = -1; int spos=0; int bidx=0; for( auto itr = d_str.begin(); itr != d_str.end(); ++itr ) { auto the_chr = *itr; auto pos = itr.position(); //execute conditions: // if( the_chr <= ' ' ) { // convert all whitespace to space d_buffer[bidx] = ' '; byteOffsetToLastSpace = bidx; charOffsetToLastSpace = pos; } if( (pos - spos) >= width_ ) { if( byteOffsetToLastSpace >=0 ) { d_buffer[byteOffsetToLastSpace] = '\n'; spos = charOffsetToLastSpace; byteOffsetToLastSpace = charOffsetToLastSpace = -1; } } bidx += detail::bytes_in_char_utf8(the_chr); } return 0; } private: column_device_view const d_column_; int32_t const* d_offsets_; char* d_chars_; size_type width_; }; }//anonym. template<typename device_execute_functor> std::unique_ptr<column> wrap( strings_column_view const& strings, size_type width, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_EXPECTS(width > 0, "Positive wrap width required"); auto strings_count = strings.size(); if( strings_count == 0 ) return detail::make_empty_strings_column(mr,stream); auto execpol = rmm::exec_policy(stream); auto strings_column = column_device_view::create(strings.parent(),stream); auto d_column = *strings_column; // copy null mask rmm::device_buffer null_mask = copy_bitmask(strings.parent(),stream,mr); // build offsets column auto offsets_column = std::make_unique<column>( strings.offsets(), stream, mr ); // makes a copy auto d_new_offsets = offsets_column->view().template data<int32_t>(); auto chars_column = std::make_unique<column>( strings.chars(), stream, mr ); // makes a copy auto d_chars = chars_column->mutable_view().data<char>(); device_execute_functor d_execute_fctr{d_column, d_new_offsets, d_chars, width}; thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count, d_execute_fctr); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), d_column.null_count(), std::move(null_mask), stream, mr); } }//namespace detail std::unique_ptr<column> wrap( strings_column_view const& strings, size_type width, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::wrap<detail::execute_wrap>(strings, width, mr); } }//namespace strings }//namespace cudf
c4a88ba5108cb97facef58e08570a812424c23ee.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/case.hpp> #include <cudf/utilities/error.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <strings/char_types/is_flags.h> #include <strings/utilities.hpp> #include <strings/utilities.cuh> namespace cudf { namespace strings { namespace detail { namespace { // anonym. //execute string wrap: // struct execute_wrap { execute_wrap(column_device_view const d_column, int32_t const* d_offsets, char* d_chars, size_type width): d_column_(d_column), d_offsets_(d_offsets), d_chars_(d_chars), width_(width) { } __device__ int32_t operator()(size_type idx) { if( d_column_.is_null(idx) ) return 0; // null string string_view d_str = d_column_.template element<string_view>(idx); char* d_buffer = d_chars_ + d_offsets_[idx]; int charOffsetToLastSpace = -1; int byteOffsetToLastSpace = -1; int spos=0; int bidx=0; for( auto itr = d_str.begin(); itr != d_str.end(); ++itr ) { auto the_chr = *itr; auto pos = itr.position(); //execute conditions: // if( the_chr <= ' ' ) { // convert all whitespace to space d_buffer[bidx] = ' '; byteOffsetToLastSpace = bidx; charOffsetToLastSpace = pos; } if( (pos - spos) >= width_ ) { if( byteOffsetToLastSpace >=0 ) { d_buffer[byteOffsetToLastSpace] = '\n'; spos = charOffsetToLastSpace; byteOffsetToLastSpace = charOffsetToLastSpace = -1; } } bidx += detail::bytes_in_char_utf8(the_chr); } return 0; } private: column_device_view const d_column_; int32_t const* d_offsets_; char* d_chars_; size_type width_; }; }//anonym. template<typename device_execute_functor> std::unique_ptr<column> wrap( strings_column_view const& strings, size_type width, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS(width > 0, "Positive wrap width required"); auto strings_count = strings.size(); if( strings_count == 0 ) return detail::make_empty_strings_column(mr,stream); auto execpol = rmm::exec_policy(stream); auto strings_column = column_device_view::create(strings.parent(),stream); auto d_column = *strings_column; // copy null mask rmm::device_buffer null_mask = copy_bitmask(strings.parent(),stream,mr); // build offsets column auto offsets_column = std::make_unique<column>( strings.offsets(), stream, mr ); // makes a copy auto d_new_offsets = offsets_column->view().template data<int32_t>(); auto chars_column = std::make_unique<column>( strings.chars(), stream, mr ); // makes a copy auto d_chars = chars_column->mutable_view().data<char>(); device_execute_functor d_execute_fctr{d_column, d_new_offsets, d_chars, width}; thrust::for_each_n(execpol->on(stream), thrust::make_counting_iterator<size_type>(0), strings_count, d_execute_fctr); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), d_column.null_count(), std::move(null_mask), stream, mr); } }//namespace detail std::unique_ptr<column> wrap( strings_column_view const& strings, size_type width, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::wrap<detail::execute_wrap>(strings, width, mr); } }//namespace strings }//namespace cudf
fc569f4d1f75fcbd8af63b41eb00d1f6833dfc08.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zlaswp.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "common_magma.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } dlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswp_kernel( int n, double *dAT, int ldda, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /** Purpose: ============= DLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,N) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1 <= n.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2 <= n.) \param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ // It is used in dgessm, dgetrf_incpiv. extern "C" void magmablas_dlaswp_q( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 1 || k1 > n ) info = -4; else if ( k2 < 1 || k2 > n ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 blocks( (n + NTHREADS - 1) / NTHREADS ); dim3 threads( NTHREADS ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( dlaswp_kernel), dim3(blocks), dim3(threads), 0, queue , n, dAT(k,0), ldda, params ); } #undef dAT } /** @see magmablas_dlaswp_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswp( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci ) { magmablas_dlaswp_q( n, dAT, ldda, k1, k2, ipiv, inci, magma_stream ); } // ------------------------------------------------------------ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswpx_kernel( int n, double *dA, int ldx, int ldy, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; double *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dA + i2*ldx; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /** Purpose: ============= DLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dA DOUBLE PRECISION array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] ldx INTEGER Stride between elements in same column. \param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) \param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswpx_q( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 blocks( (n + NTHREADS - 1) / NTHREADS ); dim3 threads( NTHREADS ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( dlaswpx_kernel), dim3(blocks), dim3(threads), 0, queue , n, dA(k,0), ldx, ldy, params ); } #undef dA } /** @see magmablas_dlaswpx_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswpx( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci ) { return magmablas_dlaswpx_q( n, dA, ldx, ldy, k1, k2, ipiv, inci, magma_stream ); } // ------------------------------------------------------------ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_dlaswp // (including copying pivots to the GPU). __global__ void dlaswp2_kernel( int n, double *dAT, int ldda, int npivots, const magma_int_t* d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /** Purpose: ============= DLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) \param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswp2_q( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 blocks( (n + NTHREADS - 1) / NTHREADS ); dim3 threads( NTHREADS ); hipLaunchKernelGGL(( dlaswp2_kernel), dim3(blocks), dim3(threads), 0, queue , n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); } /** @see magmablas_dlaswp2_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswp2( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci ) { magmablas_dlaswp2_q( n, dAT, ldda, k1, k2, d_ipiv, inci, magma_stream ); }
fc569f4d1f75fcbd8af63b41eb00d1f6833dfc08.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zlaswp.cu normal z -> d, Sat Nov 15 19:53:59 2014 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "common_magma.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } dlaswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswp_kernel( int n, double *dAT, int ldda, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /** Purpose: ============= DLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,N) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1 <= n.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2 <= n.) \param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ // It is used in dgessm, dgetrf_incpiv. extern "C" void magmablas_dlaswp_q( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 1 || k1 > n ) info = -4; else if ( k2 < 1 || k2 > n ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 blocks( (n + NTHREADS - 1) / NTHREADS ); dim3 threads( NTHREADS ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } dlaswp_kernel<<< blocks, threads, 0, queue >>>( n, dAT(k,0), ldda, params ); } #undef dAT } /** @see magmablas_dlaswp_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswp( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci ) { magmablas_dlaswp_q( n, dAT, ldda, k1, k2, ipiv, inci, magma_stream ); } // ------------------------------------------------------------ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void dlaswpx_kernel( int n, double *dA, int ldx, int ldy, dlaswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; double *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; double *A2 = dA + i2*ldx; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /** Purpose: ============= DLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dA DOUBLE PRECISION array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] ldx INTEGER Stride between elements in same column. \param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) \param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswpx_q( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 blocks( (n + NTHREADS - 1) / NTHREADS ); dim3 threads( NTHREADS ); dlaswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } dlaswpx_kernel<<< blocks, threads, 0, queue >>>( n, dA(k,0), ldx, ldy, params ); } #undef dA } /** @see magmablas_dlaswpx_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswpx( magma_int_t n, magmaDouble_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci ) { return magmablas_dlaswpx_q( n, dA, ldx, ldy, k1, k2, ipiv, inci, magma_stream ); } // ------------------------------------------------------------ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_dlaswp // (including copying pivots to the GPU). __global__ void dlaswp2_kernel( int n, double *dAT, int ldda, int npivots, const magma_int_t* d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; double *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index double *A2 = dAT + i2*ldda; double temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /** Purpose: ============= DLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== \param[in] n INTEGER The number of columns of the matrix A. \param[in,out] dAT DOUBLE PRECISION array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. \param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) \param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) \param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) \param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. \param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswp2_q( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 blocks( (n + NTHREADS - 1) / NTHREADS ); dim3 threads( NTHREADS ); dlaswp2_kernel<<< blocks, threads, 0, queue >>> ( n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); } /** @see magmablas_dlaswp2_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dlaswp2( magma_int_t n, magmaDouble_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci ) { magmablas_dlaswp2_q( n, dAT, ldda, k1, k2, d_ipiv, inci, magma_stream ); }
fe2fc01e16dc0050bc058546dd3832030c4edec1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_flux_calc_kernelz; int xdim0_flux_calc_kernelz_h = -1; __constant__ int ydim0_flux_calc_kernelz; int ydim0_flux_calc_kernelz_h = -1; __constant__ int xdim1_flux_calc_kernelz; int xdim1_flux_calc_kernelz_h = -1; __constant__ int ydim1_flux_calc_kernelz; int ydim1_flux_calc_kernelz_h = -1; __constant__ int xdim2_flux_calc_kernelz; int xdim2_flux_calc_kernelz_h = -1; __constant__ int ydim2_flux_calc_kernelz; int ydim2_flux_calc_kernelz_h = -1; __constant__ int xdim3_flux_calc_kernelz; int xdim3_flux_calc_kernelz_h = -1; __constant__ int ydim3_flux_calc_kernelz; int ydim3_flux_calc_kernelz_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_flux_calc_kernelz*(y)+xdim0_flux_calc_kernelz*ydim0_flux_calc_kernelz*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_flux_calc_kernelz*(y)+xdim1_flux_calc_kernelz*ydim1_flux_calc_kernelz*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_flux_calc_kernelz*(y)+xdim2_flux_calc_kernelz*ydim2_flux_calc_kernelz*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_flux_calc_kernelz*(y)+xdim3_flux_calc_kernelz*ydim3_flux_calc_kernelz*(z)) //user function __device__ void flux_calc_kernelz( double *vol_flux_z, const double *zarea, const double *zvel0, const double *zvel1) { vol_flux_z[OPS_ACC0(0,0,0)] = 0.125 * dt * (zarea[OPS_ACC1(0,0,0)]) * ( zvel0[OPS_ACC2(0,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,1,0)] + zvel1[OPS_ACC3(0,0,0)] + zvel1[OPS_ACC3(1,0,0)] + zvel1[OPS_ACC3(0,1,0)] + zvel1[OPS_ACC3(1,1,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_flux_calc_kernelz( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_flux_calc_kernelz + idx_z * 1 * xdim0_flux_calc_kernelz * ydim0_flux_calc_kernelz; arg1 += idx_x * 1 + idx_y * 1 * xdim1_flux_calc_kernelz + idx_z * 1 * xdim1_flux_calc_kernelz * ydim1_flux_calc_kernelz; arg2 += idx_x * 1 + idx_y * 1 * xdim2_flux_calc_kernelz + idx_z * 1 * xdim2_flux_calc_kernelz * ydim2_flux_calc_kernelz; arg3 += idx_x * 1 + idx_y * 1 * xdim3_flux_calc_kernelz + idx_z * 1 * xdim3_flux_calc_kernelz * ydim3_flux_calc_kernelz; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { flux_calc_kernelz(arg0, arg1, arg2, arg3); } } // host stub function void ops_par_loop_flux_calc_kernelz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_arg args[4] = { arg0, arg1, arg2, arg3}; ops_timing_realloc(10,"flux_calc_kernelz"); OPS_kernels[10].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_flux_calc_kernelz_h || ydim0 != ydim0_flux_calc_kernelz_h || xdim1 != xdim1_flux_calc_kernelz_h || ydim1 != ydim1_flux_calc_kernelz_h || xdim2 != xdim2_flux_calc_kernelz_h || ydim2 != ydim2_flux_calc_kernelz_h || xdim3 != xdim3_flux_calc_kernelz_h || ydim3 != ydim3_flux_calc_kernelz_h) { hipMemcpyToSymbol( xdim0_flux_calc_kernelz, &xdim0, sizeof(int) ); xdim0_flux_calc_kernelz_h = xdim0; hipMemcpyToSymbol( ydim0_flux_calc_kernelz, &ydim0, sizeof(int) ); ydim0_flux_calc_kernelz_h = ydim0; hipMemcpyToSymbol( xdim1_flux_calc_kernelz, &xdim1, sizeof(int) ); xdim1_flux_calc_kernelz_h = xdim1; hipMemcpyToSymbol( ydim1_flux_calc_kernelz, &ydim1, sizeof(int) ); ydim1_flux_calc_kernelz_h = ydim1; hipMemcpyToSymbol( xdim2_flux_calc_kernelz, &xdim2, sizeof(int) ); xdim2_flux_calc_kernelz_h = xdim2; hipMemcpyToSymbol( ydim2_flux_calc_kernelz, &ydim2, sizeof(int) ); ydim2_flux_calc_kernelz_h = ydim2; hipMemcpyToSymbol( xdim3_flux_calc_kernelz, &xdim3, sizeof(int) ); xdim3_flux_calc_kernelz_h = xdim3; hipMemcpyToSymbol( ydim3_flux_calc_kernelz, &ydim3, sizeof(int) ); ydim3_flux_calc_kernelz_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; char *p_a[4]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); ops_timers_core(&c1,&t1); OPS_kernels[10].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_flux_calc_kernelz), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[10].time += t2-t1; ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); //Update kernel record OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg3); }
fe2fc01e16dc0050bc058546dd3832030c4edec1.cu
// // auto-generated by ops.py // __constant__ int xdim0_flux_calc_kernelz; int xdim0_flux_calc_kernelz_h = -1; __constant__ int ydim0_flux_calc_kernelz; int ydim0_flux_calc_kernelz_h = -1; __constant__ int xdim1_flux_calc_kernelz; int xdim1_flux_calc_kernelz_h = -1; __constant__ int ydim1_flux_calc_kernelz; int ydim1_flux_calc_kernelz_h = -1; __constant__ int xdim2_flux_calc_kernelz; int xdim2_flux_calc_kernelz_h = -1; __constant__ int ydim2_flux_calc_kernelz; int ydim2_flux_calc_kernelz_h = -1; __constant__ int xdim3_flux_calc_kernelz; int xdim3_flux_calc_kernelz_h = -1; __constant__ int ydim3_flux_calc_kernelz; int ydim3_flux_calc_kernelz_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_flux_calc_kernelz*(y)+xdim0_flux_calc_kernelz*ydim0_flux_calc_kernelz*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_flux_calc_kernelz*(y)+xdim1_flux_calc_kernelz*ydim1_flux_calc_kernelz*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_flux_calc_kernelz*(y)+xdim2_flux_calc_kernelz*ydim2_flux_calc_kernelz*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_flux_calc_kernelz*(y)+xdim3_flux_calc_kernelz*ydim3_flux_calc_kernelz*(z)) //user function __device__ void flux_calc_kernelz( double *vol_flux_z, const double *zarea, const double *zvel0, const double *zvel1) { vol_flux_z[OPS_ACC0(0,0,0)] = 0.125 * dt * (zarea[OPS_ACC1(0,0,0)]) * ( zvel0[OPS_ACC2(0,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,1,0)] + zvel1[OPS_ACC3(0,0,0)] + zvel1[OPS_ACC3(1,0,0)] + zvel1[OPS_ACC3(0,1,0)] + zvel1[OPS_ACC3(1,1,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_flux_calc_kernelz( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_flux_calc_kernelz + idx_z * 1 * xdim0_flux_calc_kernelz * ydim0_flux_calc_kernelz; arg1 += idx_x * 1 + idx_y * 1 * xdim1_flux_calc_kernelz + idx_z * 1 * xdim1_flux_calc_kernelz * ydim1_flux_calc_kernelz; arg2 += idx_x * 1 + idx_y * 1 * xdim2_flux_calc_kernelz + idx_z * 1 * xdim2_flux_calc_kernelz * ydim2_flux_calc_kernelz; arg3 += idx_x * 1 + idx_y * 1 * xdim3_flux_calc_kernelz + idx_z * 1 * xdim3_flux_calc_kernelz * ydim3_flux_calc_kernelz; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { flux_calc_kernelz(arg0, arg1, arg2, arg3); } } // host stub function void ops_par_loop_flux_calc_kernelz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_arg args[4] = { arg0, arg1, arg2, arg3}; ops_timing_realloc(10,"flux_calc_kernelz"); OPS_kernels[10].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_flux_calc_kernelz_h || ydim0 != ydim0_flux_calc_kernelz_h || xdim1 != xdim1_flux_calc_kernelz_h || ydim1 != ydim1_flux_calc_kernelz_h || xdim2 != xdim2_flux_calc_kernelz_h || ydim2 != ydim2_flux_calc_kernelz_h || xdim3 != xdim3_flux_calc_kernelz_h || ydim3 != ydim3_flux_calc_kernelz_h) { cudaMemcpyToSymbol( xdim0_flux_calc_kernelz, &xdim0, sizeof(int) ); xdim0_flux_calc_kernelz_h = xdim0; cudaMemcpyToSymbol( ydim0_flux_calc_kernelz, &ydim0, sizeof(int) ); ydim0_flux_calc_kernelz_h = ydim0; cudaMemcpyToSymbol( xdim1_flux_calc_kernelz, &xdim1, sizeof(int) ); xdim1_flux_calc_kernelz_h = xdim1; cudaMemcpyToSymbol( ydim1_flux_calc_kernelz, &ydim1, sizeof(int) ); ydim1_flux_calc_kernelz_h = ydim1; cudaMemcpyToSymbol( xdim2_flux_calc_kernelz, &xdim2, sizeof(int) ); xdim2_flux_calc_kernelz_h = xdim2; cudaMemcpyToSymbol( ydim2_flux_calc_kernelz, &ydim2, sizeof(int) ); ydim2_flux_calc_kernelz_h = ydim2; cudaMemcpyToSymbol( xdim3_flux_calc_kernelz, &xdim3, sizeof(int) ); xdim3_flux_calc_kernelz_h = xdim3; cudaMemcpyToSymbol( ydim3_flux_calc_kernelz, &ydim3, sizeof(int) ); ydim3_flux_calc_kernelz_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; char *p_a[4]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); ops_timers_core(&c1,&t1); OPS_kernels[10].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_flux_calc_kernelz<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[10].time += t2-t1; ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); //Update kernel record OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[10].transfer += ops_compute_transfer(dim, range, &arg3); }
ff86553c188ee0651dd2f0556ed7ffd592ee2861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel2_x [4][2]; static int dims_advec_mom_kernel2_x_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel2_x_gpu(ACC<double> &vel1, const ACC<double> &node_mass_post, const ACC<double> &node_mass_pre, const ACC<double> &mom_flux) { vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) + mom_flux(-1,0,0) - mom_flux(0,0,0) ) / node_mass_post(0,0,0); } __global__ void ops_advec_mom_kernel2_x( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[0][0] * dims_advec_mom_kernel2_x[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[1][0] * dims_advec_mom_kernel2_x[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[2][0] * dims_advec_mom_kernel2_x[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[3][0] * dims_advec_mom_kernel2_x[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel2_x[0][0], dims_advec_mom_kernel2_x[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel2_x[1][0], dims_advec_mom_kernel2_x[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel2_x[2][0], dims_advec_mom_kernel2_x[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel2_x[3][0], dims_advec_mom_kernel2_x[3][1], arg3); advec_mom_kernel2_x_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel2_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel2_x_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,129)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(129,"advec_mom_kernel2_x"); OPS_kernels[129].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel2_x_h[0][0] || ydim0 != dims_advec_mom_kernel2_x_h[0][1] || xdim1 != dims_advec_mom_kernel2_x_h[1][0] || ydim1 != dims_advec_mom_kernel2_x_h[1][1] || xdim2 != dims_advec_mom_kernel2_x_h[2][0] || ydim2 != dims_advec_mom_kernel2_x_h[2][1] || xdim3 != dims_advec_mom_kernel2_x_h[3][0] || ydim3 != dims_advec_mom_kernel2_x_h[3][1]) { dims_advec_mom_kernel2_x_h[0][0] = xdim0; dims_advec_mom_kernel2_x_h[0][1] = ydim0; dims_advec_mom_kernel2_x_h[1][0] = xdim1; dims_advec_mom_kernel2_x_h[1][1] = ydim1; dims_advec_mom_kernel2_x_h[2][0] = xdim2; dims_advec_mom_kernel2_x_h[2][1] = ydim2; dims_advec_mom_kernel2_x_h[3][0] = xdim3; dims_advec_mom_kernel2_x_h[3][1] = ydim3; cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel2_x, dims_advec_mom_kernel2_x_h, sizeof(dims_advec_mom_kernel2_x))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[129].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel2_x), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[129].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[129].mpi_time += t2-t1; OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel2_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 129; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 129; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel2_x_execute; if (OPS_diags > 1) { ops_timing_realloc(129,"advec_mom_kernel2_x"); } ops_enqueue_kernel(desc); } #endif
ff86553c188ee0651dd2f0556ed7ffd592ee2861.cu
// // auto-generated by ops.py // __constant__ int dims_advec_mom_kernel2_x [4][2]; static int dims_advec_mom_kernel2_x_h [4][2] = {0}; //user function __device__ inline void advec_mom_kernel2_x_gpu(ACC<double> &vel1, const ACC<double> &node_mass_post, const ACC<double> &node_mass_pre, const ACC<double> &mom_flux) { vel1(0,0,0) = ( vel1(0,0,0) * node_mass_pre(0,0,0) + mom_flux(-1,0,0) - mom_flux(0,0,0) ) / node_mass_post(0,0,0); } __global__ void ops_advec_mom_kernel2_x( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[0][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[0][0] * dims_advec_mom_kernel2_x[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[1][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[1][0] * dims_advec_mom_kernel2_x[1][1]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[2][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[2][0] * dims_advec_mom_kernel2_x[2][1]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel2_x[3][0] + idx_z * 1*1 * dims_advec_mom_kernel2_x[3][0] * dims_advec_mom_kernel2_x[3][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_advec_mom_kernel2_x[0][0], dims_advec_mom_kernel2_x[0][1], arg0); const ACC<double> argp1(dims_advec_mom_kernel2_x[1][0], dims_advec_mom_kernel2_x[1][1], arg1); const ACC<double> argp2(dims_advec_mom_kernel2_x[2][0], dims_advec_mom_kernel2_x[2][1], arg2); const ACC<double> argp3(dims_advec_mom_kernel2_x[3][0], dims_advec_mom_kernel2_x[3][1], arg3); advec_mom_kernel2_x_gpu(argp0, argp1, argp2, argp3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel2_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel2_x_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,129)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(129,"advec_mom_kernel2_x"); OPS_kernels[129].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 4,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != dims_advec_mom_kernel2_x_h[0][0] || ydim0 != dims_advec_mom_kernel2_x_h[0][1] || xdim1 != dims_advec_mom_kernel2_x_h[1][0] || ydim1 != dims_advec_mom_kernel2_x_h[1][1] || xdim2 != dims_advec_mom_kernel2_x_h[2][0] || ydim2 != dims_advec_mom_kernel2_x_h[2][1] || xdim3 != dims_advec_mom_kernel2_x_h[3][0] || ydim3 != dims_advec_mom_kernel2_x_h[3][1]) { dims_advec_mom_kernel2_x_h[0][0] = xdim0; dims_advec_mom_kernel2_x_h[0][1] = ydim0; dims_advec_mom_kernel2_x_h[1][0] = xdim1; dims_advec_mom_kernel2_x_h[1][1] = ydim1; dims_advec_mom_kernel2_x_h[2][0] = xdim2; dims_advec_mom_kernel2_x_h[2][1] = ydim2; dims_advec_mom_kernel2_x_h[3][0] = xdim3; dims_advec_mom_kernel2_x_h[3][1] = ydim3; cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel2_x, dims_advec_mom_kernel2_x_h, sizeof(dims_advec_mom_kernel2_x))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[129].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel2_x<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[129].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[129].mpi_time += t2-t1; OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel2_x(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 129; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 129; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel2_x_execute; if (OPS_diags > 1) { ops_timing_realloc(129,"advec_mom_kernel2_x"); } ops_enqueue_kernel(desc); } #endif
87b20766ba959aa97a5c935e778a86ee7d395f2c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // OpenGL Graphics includes #include <glew.h> #include <freeglut.h> #include <cudaDefs.h> #include <imageManager.h> // includes, cuda #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <helper_gl.h> // helper functions for CUDA/GL interop #include "imageKernels.cuh" #define BLOCK_DIM 8 hipError_t error = hipSuccess; hipDeviceProp_t deviceProp = hipDeviceProp_t(); //CUDA variables unsigned int imageWidth; unsigned int imageHeight; unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit unsigned int imagePitch; cudaGraphicsResource_t cudaPBOResource; cudaGraphicsResource_t cudaTexResource; texture<uchar4, 2, hipReadModeElementType> cudaTexRef; hipChannelFormatDesc cudaTexChannelDesc; KernelSetting ks; unsigned char someValue = 0; //OpenGL unsigned int pboID; unsigned int textureID; unsigned int viewportWidth = 1024; unsigned int viewportHeight = 1024; #pragma region CUDA Routines __global__ void applyFilter(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo) { unsigned int col = (threadIdx.x + blockIdx.x * blockDim.x); unsigned int row = (threadIdx.y + blockIdx.y * blockDim.y); unsigned int offset = col + row * pboWidth; uchar4 texel = tex2D(cudaTexRef, col, row); texel.x = someValue; uchar4* uchar4pbo = (uchar4*)pbo; uchar4pbo[offset] = texel; } void cudaWorker() { hipArray* array; //TODO 3: Map cudaTexResource hipGraphicsMapResources(1, &cudaTexResource, 0); //TODO 4: Get Mapped Array of cudaTexResource hipGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0); //TODO 5: Get cudaTexChannelDesc from previously obtained array hipGetChannelDesc(&cudaTexChannelDesc, array); //TODO 6: Binf cudaTexRef to array hipBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc); checkError(); unsigned char *pboData; size_t pboSize; //TODO 7: Map cudaPBOResource hipGraphicsMapResources(1, &cudaPBOResource, 0); //TODO 7: Map Mapped pointer to cudaPBOResource data hipGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource); checkError(); //TODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1); ks.blockSize = BLOCK_DIM * BLOCK_DIM; ks.dimGrid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1); //Calling applyFileter kernel someValue++; if (someValue>255) someValue = 0; hipLaunchKernelGGL(( applyFilter), dim3(ks.dimGrid), dim3(ks.dimBlock), 0, 0, someValue, imageWidth,imageHeight, pboData); //Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code! hipUnbindTexture(&cudaTexRef); hipGraphicsUnmapResources(1, &cudaPBOResource, 0); hipGraphicsUnmapResources(1, &cudaTexResource, 0); glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pboID); glBindTexture( GL_TEXTURE_2D, textureID); glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory } void initCUDAtex() { hipGLSetGLDevice(0); checkError(); //CUDA Texture settings cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates cudaTexRef.filterMode = hipFilterModePoint; //Otherwise texRef.filterMode = hipFilterModeLinear; for Linear interpolation of texels cudaTexRef.addressMode[0] = hipAddressModeClamp; //No repeat texture pattern cudaTexRef.addressMode[1] = hipAddressModeClamp; //No repeat texture pattern //TODO 1: Register OpenGL texture to CUDA resource hipGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, hipGraphicsMapFlagsReadOnly); checkError(); //TODO 2: Register PBO to CUDA resource hipGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, hipGraphicsRegisterFlagsWriteDiscard); checkError(); } void releaseCUDA() { hipGraphicsUnregisterResource(cudaPBOResource); hipGraphicsUnregisterResource(cudaTexResource); } #pragma endregion #pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!! void loadTexture(const char* imageFileName) { FreeImage_Initialise(); FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0); imageWidth = FreeImage_GetWidth(tmp); imageHeight = FreeImage_GetHeight(tmp); imageBPP = FreeImage_GetBPP(tmp); imagePitch = FreeImage_GetPitch(tmp); //OpenGL Texture glEnable(GL_TEXTURE_2D); glGenTextures(1,&textureID); glBindTexture( GL_TEXTURE_2D, textureID); //WARNING: Just some of inner format are supported by CUDA!!! glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp)); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); FreeImage_Unload(tmp); } void preparePBO() { glGenBuffers(1, &pboID); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based) glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL,GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image } void my_display() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, textureID); //I know this is a very old OpenGL, but we want to practice CUDA :-) //Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2). glBegin(GL_QUADS); glTexCoord2d(0,0); glVertex2d(0,0); glTexCoord2d(1,0); glVertex2d(viewportWidth, 0); glTexCoord2d(1,1); glVertex2d(viewportWidth, viewportHeight); glTexCoord2d(0,1); glVertex2d(0, viewportHeight); glEnd(); glDisable(GL_TEXTURE_2D); glFlush(); glutSwapBuffers(); } void my_resize(GLsizei w, GLsizei h) { viewportWidth=w; viewportHeight=h; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glViewport(0,0,viewportWidth,viewportHeight); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0,viewportWidth, 0,viewportHeight); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } void my_idle() { cudaWorker(); glutPostRedisplay(); } void initGL(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(viewportWidth,viewportHeight); glutInitWindowPosition(0,0); glutCreateWindow(":-)"); glutDisplayFunc(my_display); glutReshapeFunc(my_resize); glutIdleFunc(my_idle); glutSetCursor(GLUT_CURSOR_CROSSHAIR); // initialize necessary OpenGL extensions glewInit(); glClearColor(0.0, 0.0, 0.0, 1.0); glShadeModel(GL_SMOOTH); glViewport(0,0,viewportWidth,viewportHeight); glFlush(); } void releaseOpenGL() { if (textureID > 0) glDeleteTextures(1, &textureID); if (pboID > 0) glDeleteBuffers(1, &pboID); } #pragma endregion void releaseResources() { releaseCUDA(); releaseOpenGL(); } int main(int argc, char *argv[]) { initializeCUDA(deviceProp); initGL(argc, argv); loadTexture("C:/Users/kne0035/dev/parallelAlgorithm2/parallelAlgorithm2/images/lena.png"); preparePBO(); initCUDAtex(); //start rendering mainloop glutMainLoop(); atexit(releaseResources); }
87b20766ba959aa97a5c935e778a86ee7d395f2c.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // OpenGL Graphics includes #include <glew.h> #include <freeglut.h> #include <cudaDefs.h> #include <imageManager.h> // includes, cuda #include <cuda_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <helper_gl.h> // helper functions for CUDA/GL interop #include "imageKernels.cuh" #define BLOCK_DIM 8 cudaError_t error = cudaSuccess; cudaDeviceProp deviceProp = cudaDeviceProp(); //CUDA variables unsigned int imageWidth; unsigned int imageHeight; unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit unsigned int imagePitch; cudaGraphicsResource_t cudaPBOResource; cudaGraphicsResource_t cudaTexResource; texture<uchar4, 2, cudaReadModeElementType> cudaTexRef; cudaChannelFormatDesc cudaTexChannelDesc; KernelSetting ks; unsigned char someValue = 0; //OpenGL unsigned int pboID; unsigned int textureID; unsigned int viewportWidth = 1024; unsigned int viewportHeight = 1024; #pragma region CUDA Routines __global__ void applyFilter(const unsigned char someValue, const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo) { unsigned int col = (threadIdx.x + blockIdx.x * blockDim.x); unsigned int row = (threadIdx.y + blockIdx.y * blockDim.y); unsigned int offset = col + row * pboWidth; uchar4 texel = tex2D(cudaTexRef, col, row); texel.x = someValue; uchar4* uchar4pbo = (uchar4*)pbo; uchar4pbo[offset] = texel; } void cudaWorker() { cudaArray* array; //TODO 3: Map cudaTexResource cudaGraphicsMapResources(1, &cudaTexResource, 0); //TODO 4: Get Mapped Array of cudaTexResource cudaGraphicsSubResourceGetMappedArray(&array, cudaTexResource, 0, 0); //TODO 5: Get cudaTexChannelDesc from previously obtained array cudaGetChannelDesc(&cudaTexChannelDesc, array); //TODO 6: Binf cudaTexRef to array cudaBindTextureToArray(&cudaTexRef, array, &cudaTexChannelDesc); checkError(); unsigned char *pboData; size_t pboSize; //TODO 7: Map cudaPBOResource cudaGraphicsMapResources(1, &cudaPBOResource, 0); //TODO 7: Map Mapped pointer to cudaPBOResource data cudaGraphicsResourceGetMappedPointer((void**)&pboData, &pboSize, cudaPBOResource); checkError(); //TODO 8: Set KernelSetting variable ks (dimBlock, dimGrid, etc.) such that block will have BLOCK_DIM x BLOCK_DIM threads ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1); ks.blockSize = BLOCK_DIM * BLOCK_DIM; ks.dimGrid = dim3((imageWidth + BLOCK_DIM - 1) / BLOCK_DIM, (imageHeight + BLOCK_DIM - 1) / BLOCK_DIM, 1); //Calling applyFileter kernel someValue++; if (someValue>255) someValue = 0; applyFilter<<<ks.dimGrid, ks.dimBlock>>>(someValue, imageWidth,imageHeight, pboData); //Following code release mapped resources, unbinds texture and ensures that PBO data will be coppied into OpenGL texture. Do not modify following code! cudaUnbindTexture(&cudaTexRef); cudaGraphicsUnmapResources(1, &cudaPBOResource, 0); cudaGraphicsUnmapResources(1, &cudaTexResource, 0); glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pboID); glBindTexture( GL_TEXTURE_2D, textureID); glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, imageWidth, imageHeight, GL_RGBA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory } void initCUDAtex() { cudaGLSetGLDevice(0); checkError(); //CUDA Texture settings cudaTexRef.normalized = false; //Otherwise TRUE to access with normalized texture coordinates cudaTexRef.filterMode = cudaFilterModePoint; //Otherwise texRef.filterMode = cudaFilterModeLinear; for Linear interpolation of texels cudaTexRef.addressMode[0] = cudaAddressModeClamp; //No repeat texture pattern cudaTexRef.addressMode[1] = cudaAddressModeClamp; //No repeat texture pattern //TODO 1: Register OpenGL texture to CUDA resource cudaGraphicsGLRegisterImage(&cudaTexResource, textureID, GL_TEXTURE_2D, cudaGraphicsMapFlagsReadOnly); checkError(); //TODO 2: Register PBO to CUDA resource cudaGraphicsGLRegisterBuffer(&cudaPBOResource, pboID, cudaGraphicsRegisterFlagsWriteDiscard); checkError(); } void releaseCUDA() { cudaGraphicsUnregisterResource(cudaPBOResource); cudaGraphicsUnregisterResource(cudaTexResource); } #pragma endregion #pragma region OpenGL Routines - DO NOT MODIFY THIS SECTION !!! void loadTexture(const char* imageFileName) { FreeImage_Initialise(); FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0); imageWidth = FreeImage_GetWidth(tmp); imageHeight = FreeImage_GetHeight(tmp); imageBPP = FreeImage_GetBPP(tmp); imagePitch = FreeImage_GetPitch(tmp); //OpenGL Texture glEnable(GL_TEXTURE_2D); glGenTextures(1,&textureID); glBindTexture( GL_TEXTURE_2D, textureID); //WARNING: Just some of inner format are supported by CUDA!!! glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, imageWidth, imageHeight, 0, GL_BGRA, GL_UNSIGNED_BYTE, FreeImage_GetBits(tmp)); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP); FreeImage_Unload(tmp); } void preparePBO() { glGenBuffers(1, &pboID); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboID); // Make this the current UNPACK buffer (OpenGL is state-based) glBufferData(GL_PIXEL_UNPACK_BUFFER, imageWidth * imageHeight * 4, NULL,GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image } void my_display() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glEnable(GL_TEXTURE_2D); glBindTexture(GL_TEXTURE_2D, textureID); //I know this is a very old OpenGL, but we want to practice CUDA :-) //Now it will be a wasted time to learn you current features of OpenGL. Sorry for that however, you can visit my second seminar dealing with Computer Graphics (CG2). glBegin(GL_QUADS); glTexCoord2d(0,0); glVertex2d(0,0); glTexCoord2d(1,0); glVertex2d(viewportWidth, 0); glTexCoord2d(1,1); glVertex2d(viewportWidth, viewportHeight); glTexCoord2d(0,1); glVertex2d(0, viewportHeight); glEnd(); glDisable(GL_TEXTURE_2D); glFlush(); glutSwapBuffers(); } void my_resize(GLsizei w, GLsizei h) { viewportWidth=w; viewportHeight=h; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glViewport(0,0,viewportWidth,viewportHeight); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluOrtho2D(0,viewportWidth, 0,viewportHeight); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glutPostRedisplay(); } void my_idle() { cudaWorker(); glutPostRedisplay(); } void initGL(int argc, char **argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA); glutInitWindowSize(viewportWidth,viewportHeight); glutInitWindowPosition(0,0); glutCreateWindow(":-)"); glutDisplayFunc(my_display); glutReshapeFunc(my_resize); glutIdleFunc(my_idle); glutSetCursor(GLUT_CURSOR_CROSSHAIR); // initialize necessary OpenGL extensions glewInit(); glClearColor(0.0, 0.0, 0.0, 1.0); glShadeModel(GL_SMOOTH); glViewport(0,0,viewportWidth,viewportHeight); glFlush(); } void releaseOpenGL() { if (textureID > 0) glDeleteTextures(1, &textureID); if (pboID > 0) glDeleteBuffers(1, &pboID); } #pragma endregion void releaseResources() { releaseCUDA(); releaseOpenGL(); } int main(int argc, char *argv[]) { initializeCUDA(deviceProp); initGL(argc, argv); loadTexture("C:/Users/kne0035/dev/parallelAlgorithm2/parallelAlgorithm2/images/lena.png"); preparePBO(); initCUDAtex(); //start rendering mainloop glutMainLoop(); atexit(releaseResources); }
998800abb121a5af7855e424faca16733198baf5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main(int argc, char *argv[]) { int c; int *dev_c; hipError_t error = hipMalloc((void **)&dev_c, sizeof(int)); if(error != hipSuccess) { printf("Memory could not be allocated on device\n"); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 7, dev_c); error = hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost); if(error != hipSuccess) { printf("Could not copy from device to host\n"); exit(EXIT_FAILURE); } printf("%d = 2 + 7\n", c); hipFree(dev_c); return 0; }
998800abb121a5af7855e424faca16733198baf5.cu
#include <stdio.h> #include <stdlib.h> __global__ void add(int a, int b, int *c) { *c = a + b; } int main(int argc, char *argv[]) { int c; int *dev_c; cudaError_t error = cudaMalloc((void **)&dev_c, sizeof(int)); if(error != cudaSuccess) { printf("Memory could not be allocated on device\n"); exit(EXIT_FAILURE); } add<<<1,1>>>(2, 7, dev_c); error = cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); if(error != cudaSuccess) { printf("Could not copy from device to host\n"); exit(EXIT_FAILURE); } printf("%d = 2 + 7\n", c); cudaFree(dev_c); return 0; }
36521f24734b7d61118e14683ce1847744163274.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ztranspose.cu normal z -> d, Fri Sep 11 18:29:21 2015 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void dtranspose_device( int m, int n, const double *A, int lda, double *AT, int ldat) { __shared__ double sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void dtranspose_kernel( int m, int n, const double *A, int lda, double *AT, int ldat) { dtranspose_device(m, n, A, lda, AT, ldat); } __global__ void dtranspose_kernel_batched( int m, int n, double **dA_array, int lda, double **dAT_array, int ldat) { int batchid = blockIdx.z; dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- dtranspose_q copies and transposes a matrix dA to matrix dAT. Same as dtranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT DOUBLE_PRECISION array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_q( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( dtranspose_kernel), dim3(grid), dim3(threads), 0, queue , m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_dtranspose_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat ) { magmablas_dtranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- dtranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as dtranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched_q( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); hipLaunchKernelGGL(( dtranspose_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_dtranspose_batched_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_dtranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
36521f24734b7d61118e14683ce1847744163274.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ztranspose.cu normal z -> d, Fri Sep 11 18:29:21 2015 @author Stan Tomov @author Mark Gates */ #include "common_magma.h" #define PRECISION_d #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) static __device__ void dtranspose_device( int m, int n, const double *A, int lda, double *AT, int ldat) { __shared__ double sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; AT += iby + tx + (ibx + ty)*ldat; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA into AT i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { AT[i2 + j2*ldat] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; AT += NX*ldat; } } /* kernel wrapper to call the device function. */ __global__ void dtranspose_kernel( int m, int n, const double *A, int lda, double *AT, int ldat) { dtranspose_device(m, n, A, lda, AT, ldat); } __global__ void dtranspose_kernel_batched( int m, int n, double **dA_array, int lda, double **dAT_array, int ldat) { int batchid = blockIdx.z; dtranspose_device(m, n, dA_array[batchid], lda, dAT_array[batchid], ldat); } /** Purpose ------- dtranspose_q copies and transposes a matrix dA to matrix dAT. Same as dtranspose, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA DOUBLE_PRECISION array, dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT DOUBLE_PRECISION array, dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_q( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); dtranspose_kernel<<< grid, threads, 0, queue >>> ( m, n, dA, ldda, dAT, lddat ); } /** @see magmablas_dtranspose_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose( magma_int_t m, magma_int_t n, magmaDouble_const_ptr dA, magma_int_t ldda, magmaDouble_ptr dAT, magma_int_t lddat ) { magmablas_dtranspose_q( m, n, dA, ldda, dAT, lddat, magma_stream ); } /** Purpose ------- dtranspose_batched_q copies and transposes a matrix dA_array[i] to matrix dAT_array[i]. Same as dtranspose_batched, but adds queue argument. Arguments --------- @param[in] m INTEGER The number of rows of the matrix dA. M >= 0. @param[in] n INTEGER The number of columns of the matrix dA. N >= 0. @param[in] dA_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dA, where each dA is of dimension (LDDA,N) The M-by-N matrix dA. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= M. @param[in] dAT_array DOUBLE_PRECISION* array, dimension (batchCount) array of pointers to the matrices dAT, where each dAT is of dimension (LDDAT,M) The N-by-M matrix dAT. @param[in] lddat INTEGER The leading dimension of the array dAT. LDDAT >= N. @param[in] queue magma_queue_t Queue to execute in. @param[in] batchCount Number of matrices in dA_array and dAT_array @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched_q( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < m ) info = -4; else if ( lddat < n ) info = -6; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ), batchCount ); dtranspose_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dA_array, ldda, dAT_array, lddat ); } /** @see magmablas_dtranspose_batched_q @ingroup magma_daux2 ********************************************************************/ extern "C" void magmablas_dtranspose_batched( magma_int_t m, magma_int_t n, double **dA_array, magma_int_t ldda, double **dAT_array, magma_int_t lddat, magma_int_t batchCount ) { magmablas_dtranspose_batched_q( m, n, dA_array, ldda, dAT_array, lddat, batchCount, magma_stream ); }
98380c70655b01015fa5f5293d1055671d36766c.hip
// !!! This is a file automatically generated by hipify!!! /** * Project TACO: Parallel ACO algorithm for TSP * 15-418 Parallel Algorithms - Final Project * Ivan Wang, Carl Lin */ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <math.h> #include <math_functions.h> #include "CycleTimer.h" #include "ants.h" #define MAX_THREADS 128 __device__ static inline int toIndex(int i, int j) { return i * MAX_CITIES + j; } __device__ static inline float cudaAntProduct(float *edges, float *phero, int city) { // TODO: delete this when we're sure it's fixed /*if (isinf(pow(1.0 / edges[toIndex(from, to)], BETA))) { printf("OH NO INFINITY: dist = %1.15f\n", edges[toIndex(from, to)]); } if (pow(phero[toIndex(from, to)], ALPHA) * pow(1.0 / edges[toIndex(from, to)], BETA) == 0) { printf("I'M ZERO\n"); } if (isnan(powf(1.0 / edges[city], BETA))) { printf("IS NAN: city %d\n", city); return 0; }*/ return (powf(phero[city], ALPHA) * powf(1.0 / edges[city], BETA)); } __global__ void init_rand(hiprandState_t *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(418, idx, 0, &state[idx]); } __device__ static inline void make_rand(hiprandState_t *state, float *randArray) { int idx = blockIdx.x * blockDim.x + threadIdx.x; randArray[idx] = hiprand_uniform(&state[idx]); } // Randomly select a city based off an array of values (return the index) __device__ int selectCity(hiprandState_t *state, float *randArray, float *start, int length) { float sum = 0; int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < length; i++) { /*if (start[i] > 0) { printf("%1.15f\n", start[i]); }*/ sum += start[i]; } if (sum == 0.0) { return 0; } /*if (isnan(sum)) { printf("error; value is nan!\n"); return 0; }*/ make_rand(state, randArray); float luckyNumber = (float)randArray[idx]; float acc = 0; int lastBestIndex = 0; for (int i = 0; i < length; i++) { float value = start[i] / sum; if (value > 0) { acc += value; lastBestIndex = i; if (acc >= luckyNumber) { /*if (idx == 0) { printf("SUM: %1.15f, ACC: %1.15f, LUCKYNUM: %1.15f, i: %d, length: %d\n", sum, acc, luckyNumber, i, length); }*/ return i; } } } //printf("warning: acc did not reach luckyNumber in selectNextCity\n"); //printf("sum: %1.15f, acc: %1.15f, luckyNumber: %1.15f\n", sum, acc, luckyNumber); return lastBestIndex; } __device__ static inline int calculateFrom(int i) { //find least triangle number less than i int row = (int)(-1 + (sqrt((float)(1 + 8 * i)))) >> 1; int tnum = (row * (row + 1)) >> 1; int remain = i - tnum; return MAX_CITIES - 1 - remain; } __device__ static inline int calculateTo(int i) { //find least triangle number less than i int row = (int)(-1 + (sqrt((float)(1 + 8 * i)))) >> 1; int tnum = (row * (row + 1)) >> 1; int remain = i - tnum; return row - remain; } __global__ void initPhero(float *phero) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= MAX_CITIES * MAX_CITIES) { return; } phero[idx] = INIT_PHER; } __global__ void copyBestPath(int i, int *bestPathResult, int *pathResults) { memcpy(bestPathResult, &pathResults[i * MAX_ANTS], MAX_CITIES * sizeof(int)); } __global__ void constructAntTour(float *edges, float *phero, hiprandState_t *state, float *randArray, float *tourResults, int *pathResults) { __shared__ bool tabu[MAX_CITIES]; //TODO: put in register wtf is that //__shared__ int path[MAX_CITIES]; __shared__ int current_city; __shared__ int bestCities[MAX_THREADS]; __shared__ float cityProb[MAX_THREADS]; __shared__ float localEdges[MAX_CITIES]; __shared__ float localPhero[MAX_CITIES]; const int citiesPerThread = (MAX_CITIES + MAX_THREADS - 1) / MAX_THREADS; const int startCityIndex = threadIdx.x * citiesPerThread; const int antId = blockIdx.x; float tour_length = 0.0; if (startCityIndex >= MAX_CITIES) { cityProb[threadIdx.x] = 0; return; } float localCityProb[citiesPerThread]; if (threadIdx.x == 0) { make_rand(state, randArray); current_city = randArray[antId * blockDim.x + threadIdx.x] * MAX_CITIES; tabu[current_city] = true; pathResults[antId * MAX_CITIES] = current_city; } __syncthreads(); //initiailize tabu list to zero for (int i = 0; i < citiesPerThread; i++) { int city = i + startCityIndex; if (city >= MAX_CITIES) { break; } if (city != current_city) { tabu[city] = false; } } __syncthreads(); //check if we have finished the tour for (int num_visited = 1; num_visited < MAX_CITIES; num_visited++) { int tile; if (startCityIndex + citiesPerThread >= MAX_CITIES) { tile = MAX_CITIES - startCityIndex; } else { tile = citiesPerThread; } memcpy(&localEdges[startCityIndex], &edges[current_city * MAX_CITIES + startCityIndex], tile * sizeof(float)); memcpy(&localPhero[startCityIndex], &phero[current_city * MAX_CITIES + startCityIndex], tile * sizeof(float)); __syncthreads(); //pick next (unvisited) city for (int i = 0; i < citiesPerThread; i++) { int city = i + startCityIndex; if (city >= MAX_CITIES || tabu[city]) { localCityProb[i] = 0.0; } else { localCityProb[i] = cudaAntProduct(localEdges, localPhero, city); //printf("city prob: %1.15f\n", localCityProb[i]); } } //if (threadIdx.x == 0) // printf("cuda ant product done\n"); //for each thread, look through cities and stochastically select one int localCity = selectCity(state, randArray, localCityProb, citiesPerThread); cityProb[threadIdx.x] = localCityProb[localCity]; bestCities[threadIdx.x] = localCity + startCityIndex; __syncthreads(); //reduce over bestCities and randomly select city if (threadIdx.x == 0) { int nextIndex = selectCity(state, randArray, cityProb, MAX_THREADS); int next_city = bestCities[nextIndex]; //printf("best cities done in block %d\n", blockIdx.x); /*float best_distance = MAX_DIST * 2; int next_city = -1; for (int i = 0; i < MAX_THREADS && i < MAX_CITIES; i++) { if (cityProb[i] == 0) { continue; } //printf("best city[%d]: %d\n", i, bestCities[i]); float distance = localEdges[bestCities[i]]; if (distance < best_distance) { best_distance = distance; next_city = bestCities[i]; } }*/ /*if (next_city == -1) { printf("OH NO\n"); }*/ /*if (antId == 0) { printf("current: %d, next: %d\n", current_city, next_city); }*/ tour_length += localEdges[next_city]; pathResults[antId * MAX_CITIES + num_visited] = next_city; current_city = next_city; tabu[current_city] = true; } __syncthreads(); //TODO: move this syncthreads? } //extract best ant tour length and write the paths out to global memory if (threadIdx.x == 0) { tour_length += edges[toIndex(current_city, pathResults[antId * MAX_CITIES])]; tourResults[antId] = tour_length; } } // Evaporate pheromones along each edge __global__ void evaporatePheromones(float *phero) { int current_phero = blockIdx.x * blockDim.x + threadIdx.x; if (current_phero >= NUM_EDGES) { return; } int from = calculateFrom(current_phero); //triangle number thing int to = calculateTo(current_phero); int idx = toIndex(from, to); phero[idx] *= 1.0 - RHO; if (phero[idx] < 0.0) { phero[idx] = INIT_PHER; } phero[toIndex(to, from)] = phero[idx]; } // Add new pheromone to the trails __global__ void updateTrailsAtomic(float *phero, int *paths, float *tourLengths) { int antId = blockIdx.x; int from, to; for (int i = 0; i < MAX_CITIES; i++) { from = paths[toIndex(antId, i)]; if (i < MAX_CITIES - 1) { to = paths[toIndex(antId, i+1)]; } else { to = paths[toIndex(antId, 0)]; } if (from < to) { int tmp = from; from = to; to = tmp; } atomicAdd(&phero[toIndex(from, to)], QVAL / tourLengths[antId]); } } __global__ void updateSymmetricPhero(float *phero) { for (int i = 0; i < MAX_CITIES; i++) { for (int j = 0; j < i; j++) { phero[toIndex(j, i)] = phero[toIndex(i, j)]; } } } __global__ void updateTrails(float *phero, int *paths, float *tourLengths) { //__shared__ float localPaths[MAX_CITIES]; int numPhero = (NUM_EDGES + (blockDim.x * (MAX_ANTS * 2) - 1)) / (blockDim.x * (MAX_ANTS * 2)); int blockStartPhero = numPhero * blockDim.x * blockIdx.x; int from, to; int cur_phero; for (int i = 0; i < MAX_ANTS; i++) { // For each ant, cache paths in shared memory /*int tile; if (startCityIndex + citiesPerThread >= MAX_CITIES) { tile = MAX_CITIES - startCityIndex; } else { tile = citiesPerThread; } memcpy(&localPaths[startCityIndex], &paths[i * MAX_CITIES + startCityIndex], tile * sizeof(float)); */ // TODO: figure out tiling /*if (threadIdx.x == 0) { memcpy(&localPaths, &paths[i * MAX_CITIES], MAX_CITIES * sizeof(float)); } __syncthreads(); */ for (int j = 0; j < numPhero; j++) { cur_phero = blockStartPhero + j + numPhero * threadIdx.x; if (cur_phero >= NUM_EDGES) { break; } from = calculateFrom(cur_phero); //triangle number thing to = calculateTo(cur_phero); bool touched = false; int checkTo; int checkFrom; for (int k = 0; k < MAX_CITIES; k++) { checkFrom = paths[toIndex(i, k)]; if (k < MAX_CITIES - 1) { checkTo = paths[toIndex(i, k + 1)]; } else { checkTo = paths[toIndex(i, 0)]; } if ((checkFrom == from && checkTo == to) || (checkFrom == to && checkTo == from)) { touched = true; break; } } if (touched) { int idx = toIndex(from, to); phero[idx] += (QVAL / tourLengths[i]); phero[toIndex(to, from)] = phero[idx]; } } //__syncthreads(); } } __global__ void checkPhero(float *pheroSeq, float *phero) { for (int i = 0; i < MAX_CITIES; i++) { for (int j = 0; j < MAX_CITIES; j++) { if (i == j) continue; int idx = toIndex(i, j); if (fabsf(pheroSeq[idx] - phero[idx]) > 0.001) { printf("PHERO IS BROKEN at (%d, %d); expected: %1.15f, actual: %1.15f\n", i, j, pheroSeq[idx], phero[idx]); } } } } __global__ void seqPheroUpdate(float *phero, float *pheroReal, int *paths, float *tourLengths) { memcpy(phero, pheroReal, sizeof(float) * MAX_CITIES * MAX_CITIES); int from, to; // evaporate for (from = 0; from < MAX_CITIES; from++) { for (to = 0; to < from; to++) { phero[toIndex(from, to)] *= 1.0 - RHO; if (phero[toIndex(from, to)] < 0.0) { phero[toIndex(from, to)] = INIT_PHER; } phero[toIndex(to, from)] = phero[toIndex(from, to)]; } } //Add new pheromone to the trails for (int ant = 0; ant < MAX_ANTS; ant++) { for (int i = 0; i < MAX_CITIES; i++) { from = paths[toIndex(ant, i)]; if (i < MAX_CITIES - 1) { to = paths[toIndex(ant, i+1)]; } else { to = paths[toIndex(ant, 0)]; } phero[toIndex(from, to)] += (QVAL / tourLengths[ant]); phero[toIndex(to, from)] = phero[toIndex(from, to)]; } } } float cuda_ACO(EdgeMatrix *dist, int *bestPath) { dim3 numAntBlocks(MAX_ANTS); dim3 numTwoAntBlocks(MAX_ANTS * 2); dim3 numCityBlocks((MAX_CITIES + MAX_THREADS - 1) / MAX_THREADS); dim3 numEdgesBlocks((MAX_CITIES * MAX_CITIES + MAX_THREADS - 1) / MAX_THREADS); dim3 numPheroBlocks((NUM_EDGES + MAX_THREADS - 1) / MAX_THREADS); dim3 threadsPerBlock(MAX_THREADS); dim3 single(1); int best_index; float best = (float) MAX_TOUR; // allocate host memory float *copiedTourResults = new float[MAX_ANTS]; // allocate device memory float *tourResults; int *pathResults; int *bestPathResult; float *deviceEdges; float *phero; float *testPhero; float *randArray; hiprandState_t *randState; hipMalloc((void**)&pathResults, sizeof(int) * MAX_ANTS * MAX_CITIES); hipMalloc((void**)&tourResults, sizeof(float) * MAX_ANTS); hipMalloc((void**)&deviceEdges, sizeof(float) * MAX_CITIES * MAX_CITIES); hipMalloc((void**)&phero, sizeof(float) * MAX_CITIES * MAX_CITIES); hipMalloc((void**)&testPhero, sizeof(float) * MAX_CITIES * MAX_CITIES); hipMalloc(&randState, sizeof(hiprandState_t) * MAX_ANTS * MAX_THREADS); hipMalloc((void**)&randArray, sizeof(float) * MAX_ANTS * MAX_THREADS); hipMalloc((void**)&bestPathResult, sizeof(int) * MAX_CITIES); hipLaunchKernelGGL(( init_rand), dim3(numAntBlocks), dim3(threadsPerBlock), 0, 0, randState); hipMemcpy(deviceEdges, dist->get_array(), sizeof(float) * MAX_CITIES * MAX_CITIES, hipMemcpyHostToDevice); hipLaunchKernelGGL(( initPhero), dim3(numEdgesBlocks), dim3(threadsPerBlock), 0, 0, phero); hipDeviceSynchronize(); float pathTime = 0; float pheroTime = 0; float sBegin, sEnd; for (int i = 0; i < MAX_TOURS; i++) { best_index = -1; sBegin = CycleTimer::currentSeconds(); hipLaunchKernelGGL(( constructAntTour), dim3(numAntBlocks), dim3(threadsPerBlock), 0, 0, deviceEdges, phero, randState, randArray, tourResults, pathResults); hipDeviceSynchronize(); sEnd = CycleTimer::currentSeconds(); pathTime += sEnd - sBegin; hipMemcpy(copiedTourResults, tourResults, sizeof(float) * MAX_ANTS, hipMemcpyDeviceToHost); //find the best tour result from all the ants for (int j = 0; j < MAX_ANTS; j++) { if (copiedTourResults[j] < best) { best = copiedTourResults[j]; //printf("new best: %1.f\n", best); best_index = j; } } //copy the corresponding tour for the best ant if (best_index != -1) { hipLaunchKernelGGL(( copyBestPath), dim3(single), dim3(single), 0, 0, best_index, bestPathResult, pathResults); } //seqPheroUpdate<<<single, single>>>(testPhero, phero, pathResults, tourResults); //hipDeviceSynchronize(); //evaporate pheromones in parallel sBegin = CycleTimer::currentSeconds(); hipLaunchKernelGGL(( evaporatePheromones), dim3(numPheroBlocks), dim3(threadsPerBlock), 0, 0, phero); hipDeviceSynchronize(); //pheromone update hipLaunchKernelGGL(( updateTrailsAtomic), dim3(numAntBlocks), dim3(single), 0, 0, phero, pathResults, tourResults); hipDeviceSynchronize(); hipLaunchKernelGGL(( updateSymmetricPhero), dim3(single), dim3(single), 0, 0, phero); hipDeviceSynchronize(); sEnd = CycleTimer::currentSeconds(); pheroTime += (sEnd - sBegin); //checkPhero<<<single, single>>>(testPhero, phero); //hipDeviceSynchronize(); } printf("PATHTIME: %f, PHEROTIME: %f\n", pathTime, pheroTime); hipMemcpy(bestPath, bestPathResult, MAX_CITIES * sizeof(int), hipMemcpyDeviceToHost); hipFree(bestPathResult); hipFree(pathResults); hipFree(tourResults); hipFree(deviceEdges); hipFree(randArray); hipFree(randState); delete copiedTourResults; return best; }
98380c70655b01015fa5f5293d1055671d36766c.cu
/** * Project TACO: Parallel ACO algorithm for TSP * 15-418 Parallel Algorithms - Final Project * Ivan Wang, Carl Lin */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #include <math.h> #include <math_functions.h> #include "CycleTimer.h" #include "ants.h" #define MAX_THREADS 128 __device__ static inline int toIndex(int i, int j) { return i * MAX_CITIES + j; } __device__ static inline float cudaAntProduct(float *edges, float *phero, int city) { // TODO: delete this when we're sure it's fixed /*if (isinf(pow(1.0 / edges[toIndex(from, to)], BETA))) { printf("OH NO INFINITY: dist = %1.15f\n", edges[toIndex(from, to)]); } if (pow(phero[toIndex(from, to)], ALPHA) * pow(1.0 / edges[toIndex(from, to)], BETA) == 0) { printf("I'M ZERO\n"); } if (isnan(powf(1.0 / edges[city], BETA))) { printf("IS NAN: city %d\n", city); return 0; }*/ return (powf(phero[city], ALPHA) * powf(1.0 / edges[city], BETA)); } __global__ void init_rand(curandState *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; curand_init(418, idx, 0, &state[idx]); } __device__ static inline void make_rand(curandState *state, float *randArray) { int idx = blockIdx.x * blockDim.x + threadIdx.x; randArray[idx] = curand_uniform(&state[idx]); } // Randomly select a city based off an array of values (return the index) __device__ int selectCity(curandState *state, float *randArray, float *start, int length) { float sum = 0; int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = 0; i < length; i++) { /*if (start[i] > 0) { printf("%1.15f\n", start[i]); }*/ sum += start[i]; } if (sum == 0.0) { return 0; } /*if (isnan(sum)) { printf("error; value is nan!\n"); return 0; }*/ make_rand(state, randArray); float luckyNumber = (float)randArray[idx]; float acc = 0; int lastBestIndex = 0; for (int i = 0; i < length; i++) { float value = start[i] / sum; if (value > 0) { acc += value; lastBestIndex = i; if (acc >= luckyNumber) { /*if (idx == 0) { printf("SUM: %1.15f, ACC: %1.15f, LUCKYNUM: %1.15f, i: %d, length: %d\n", sum, acc, luckyNumber, i, length); }*/ return i; } } } //printf("warning: acc did not reach luckyNumber in selectNextCity\n"); //printf("sum: %1.15f, acc: %1.15f, luckyNumber: %1.15f\n", sum, acc, luckyNumber); return lastBestIndex; } __device__ static inline int calculateFrom(int i) { //find least triangle number less than i int row = (int)(-1 + (sqrt((float)(1 + 8 * i)))) >> 1; int tnum = (row * (row + 1)) >> 1; int remain = i - tnum; return MAX_CITIES - 1 - remain; } __device__ static inline int calculateTo(int i) { //find least triangle number less than i int row = (int)(-1 + (sqrt((float)(1 + 8 * i)))) >> 1; int tnum = (row * (row + 1)) >> 1; int remain = i - tnum; return row - remain; } __global__ void initPhero(float *phero) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= MAX_CITIES * MAX_CITIES) { return; } phero[idx] = INIT_PHER; } __global__ void copyBestPath(int i, int *bestPathResult, int *pathResults) { memcpy(bestPathResult, &pathResults[i * MAX_ANTS], MAX_CITIES * sizeof(int)); } __global__ void constructAntTour(float *edges, float *phero, curandState *state, float *randArray, float *tourResults, int *pathResults) { __shared__ bool tabu[MAX_CITIES]; //TODO: put in register wtf is that //__shared__ int path[MAX_CITIES]; __shared__ int current_city; __shared__ int bestCities[MAX_THREADS]; __shared__ float cityProb[MAX_THREADS]; __shared__ float localEdges[MAX_CITIES]; __shared__ float localPhero[MAX_CITIES]; const int citiesPerThread = (MAX_CITIES + MAX_THREADS - 1) / MAX_THREADS; const int startCityIndex = threadIdx.x * citiesPerThread; const int antId = blockIdx.x; float tour_length = 0.0; if (startCityIndex >= MAX_CITIES) { cityProb[threadIdx.x] = 0; return; } float localCityProb[citiesPerThread]; if (threadIdx.x == 0) { make_rand(state, randArray); current_city = randArray[antId * blockDim.x + threadIdx.x] * MAX_CITIES; tabu[current_city] = true; pathResults[antId * MAX_CITIES] = current_city; } __syncthreads(); //initiailize tabu list to zero for (int i = 0; i < citiesPerThread; i++) { int city = i + startCityIndex; if (city >= MAX_CITIES) { break; } if (city != current_city) { tabu[city] = false; } } __syncthreads(); //check if we have finished the tour for (int num_visited = 1; num_visited < MAX_CITIES; num_visited++) { int tile; if (startCityIndex + citiesPerThread >= MAX_CITIES) { tile = MAX_CITIES - startCityIndex; } else { tile = citiesPerThread; } memcpy(&localEdges[startCityIndex], &edges[current_city * MAX_CITIES + startCityIndex], tile * sizeof(float)); memcpy(&localPhero[startCityIndex], &phero[current_city * MAX_CITIES + startCityIndex], tile * sizeof(float)); __syncthreads(); //pick next (unvisited) city for (int i = 0; i < citiesPerThread; i++) { int city = i + startCityIndex; if (city >= MAX_CITIES || tabu[city]) { localCityProb[i] = 0.0; } else { localCityProb[i] = cudaAntProduct(localEdges, localPhero, city); //printf("city prob: %1.15f\n", localCityProb[i]); } } //if (threadIdx.x == 0) // printf("cuda ant product done\n"); //for each thread, look through cities and stochastically select one int localCity = selectCity(state, randArray, localCityProb, citiesPerThread); cityProb[threadIdx.x] = localCityProb[localCity]; bestCities[threadIdx.x] = localCity + startCityIndex; __syncthreads(); //reduce over bestCities and randomly select city if (threadIdx.x == 0) { int nextIndex = selectCity(state, randArray, cityProb, MAX_THREADS); int next_city = bestCities[nextIndex]; //printf("best cities done in block %d\n", blockIdx.x); /*float best_distance = MAX_DIST * 2; int next_city = -1; for (int i = 0; i < MAX_THREADS && i < MAX_CITIES; i++) { if (cityProb[i] == 0) { continue; } //printf("best city[%d]: %d\n", i, bestCities[i]); float distance = localEdges[bestCities[i]]; if (distance < best_distance) { best_distance = distance; next_city = bestCities[i]; } }*/ /*if (next_city == -1) { printf("OH NO\n"); }*/ /*if (antId == 0) { printf("current: %d, next: %d\n", current_city, next_city); }*/ tour_length += localEdges[next_city]; pathResults[antId * MAX_CITIES + num_visited] = next_city; current_city = next_city; tabu[current_city] = true; } __syncthreads(); //TODO: move this syncthreads? } //extract best ant tour length and write the paths out to global memory if (threadIdx.x == 0) { tour_length += edges[toIndex(current_city, pathResults[antId * MAX_CITIES])]; tourResults[antId] = tour_length; } } // Evaporate pheromones along each edge __global__ void evaporatePheromones(float *phero) { int current_phero = blockIdx.x * blockDim.x + threadIdx.x; if (current_phero >= NUM_EDGES) { return; } int from = calculateFrom(current_phero); //triangle number thing int to = calculateTo(current_phero); int idx = toIndex(from, to); phero[idx] *= 1.0 - RHO; if (phero[idx] < 0.0) { phero[idx] = INIT_PHER; } phero[toIndex(to, from)] = phero[idx]; } // Add new pheromone to the trails __global__ void updateTrailsAtomic(float *phero, int *paths, float *tourLengths) { int antId = blockIdx.x; int from, to; for (int i = 0; i < MAX_CITIES; i++) { from = paths[toIndex(antId, i)]; if (i < MAX_CITIES - 1) { to = paths[toIndex(antId, i+1)]; } else { to = paths[toIndex(antId, 0)]; } if (from < to) { int tmp = from; from = to; to = tmp; } atomicAdd(&phero[toIndex(from, to)], QVAL / tourLengths[antId]); } } __global__ void updateSymmetricPhero(float *phero) { for (int i = 0; i < MAX_CITIES; i++) { for (int j = 0; j < i; j++) { phero[toIndex(j, i)] = phero[toIndex(i, j)]; } } } __global__ void updateTrails(float *phero, int *paths, float *tourLengths) { //__shared__ float localPaths[MAX_CITIES]; int numPhero = (NUM_EDGES + (blockDim.x * (MAX_ANTS * 2) - 1)) / (blockDim.x * (MAX_ANTS * 2)); int blockStartPhero = numPhero * blockDim.x * blockIdx.x; int from, to; int cur_phero; for (int i = 0; i < MAX_ANTS; i++) { // For each ant, cache paths in shared memory /*int tile; if (startCityIndex + citiesPerThread >= MAX_CITIES) { tile = MAX_CITIES - startCityIndex; } else { tile = citiesPerThread; } memcpy(&localPaths[startCityIndex], &paths[i * MAX_CITIES + startCityIndex], tile * sizeof(float)); */ // TODO: figure out tiling /*if (threadIdx.x == 0) { memcpy(&localPaths, &paths[i * MAX_CITIES], MAX_CITIES * sizeof(float)); } __syncthreads(); */ for (int j = 0; j < numPhero; j++) { cur_phero = blockStartPhero + j + numPhero * threadIdx.x; if (cur_phero >= NUM_EDGES) { break; } from = calculateFrom(cur_phero); //triangle number thing to = calculateTo(cur_phero); bool touched = false; int checkTo; int checkFrom; for (int k = 0; k < MAX_CITIES; k++) { checkFrom = paths[toIndex(i, k)]; if (k < MAX_CITIES - 1) { checkTo = paths[toIndex(i, k + 1)]; } else { checkTo = paths[toIndex(i, 0)]; } if ((checkFrom == from && checkTo == to) || (checkFrom == to && checkTo == from)) { touched = true; break; } } if (touched) { int idx = toIndex(from, to); phero[idx] += (QVAL / tourLengths[i]); phero[toIndex(to, from)] = phero[idx]; } } //__syncthreads(); } } __global__ void checkPhero(float *pheroSeq, float *phero) { for (int i = 0; i < MAX_CITIES; i++) { for (int j = 0; j < MAX_CITIES; j++) { if (i == j) continue; int idx = toIndex(i, j); if (fabsf(pheroSeq[idx] - phero[idx]) > 0.001) { printf("PHERO IS BROKEN at (%d, %d); expected: %1.15f, actual: %1.15f\n", i, j, pheroSeq[idx], phero[idx]); } } } } __global__ void seqPheroUpdate(float *phero, float *pheroReal, int *paths, float *tourLengths) { memcpy(phero, pheroReal, sizeof(float) * MAX_CITIES * MAX_CITIES); int from, to; // evaporate for (from = 0; from < MAX_CITIES; from++) { for (to = 0; to < from; to++) { phero[toIndex(from, to)] *= 1.0 - RHO; if (phero[toIndex(from, to)] < 0.0) { phero[toIndex(from, to)] = INIT_PHER; } phero[toIndex(to, from)] = phero[toIndex(from, to)]; } } //Add new pheromone to the trails for (int ant = 0; ant < MAX_ANTS; ant++) { for (int i = 0; i < MAX_CITIES; i++) { from = paths[toIndex(ant, i)]; if (i < MAX_CITIES - 1) { to = paths[toIndex(ant, i+1)]; } else { to = paths[toIndex(ant, 0)]; } phero[toIndex(from, to)] += (QVAL / tourLengths[ant]); phero[toIndex(to, from)] = phero[toIndex(from, to)]; } } } float cuda_ACO(EdgeMatrix *dist, int *bestPath) { dim3 numAntBlocks(MAX_ANTS); dim3 numTwoAntBlocks(MAX_ANTS * 2); dim3 numCityBlocks((MAX_CITIES + MAX_THREADS - 1) / MAX_THREADS); dim3 numEdgesBlocks((MAX_CITIES * MAX_CITIES + MAX_THREADS - 1) / MAX_THREADS); dim3 numPheroBlocks((NUM_EDGES + MAX_THREADS - 1) / MAX_THREADS); dim3 threadsPerBlock(MAX_THREADS); dim3 single(1); int best_index; float best = (float) MAX_TOUR; // allocate host memory float *copiedTourResults = new float[MAX_ANTS]; // allocate device memory float *tourResults; int *pathResults; int *bestPathResult; float *deviceEdges; float *phero; float *testPhero; float *randArray; curandState *randState; cudaMalloc((void**)&pathResults, sizeof(int) * MAX_ANTS * MAX_CITIES); cudaMalloc((void**)&tourResults, sizeof(float) * MAX_ANTS); cudaMalloc((void**)&deviceEdges, sizeof(float) * MAX_CITIES * MAX_CITIES); cudaMalloc((void**)&phero, sizeof(float) * MAX_CITIES * MAX_CITIES); cudaMalloc((void**)&testPhero, sizeof(float) * MAX_CITIES * MAX_CITIES); cudaMalloc(&randState, sizeof(curandState) * MAX_ANTS * MAX_THREADS); cudaMalloc((void**)&randArray, sizeof(float) * MAX_ANTS * MAX_THREADS); cudaMalloc((void**)&bestPathResult, sizeof(int) * MAX_CITIES); init_rand<<<numAntBlocks, threadsPerBlock>>>(randState); cudaMemcpy(deviceEdges, dist->get_array(), sizeof(float) * MAX_CITIES * MAX_CITIES, cudaMemcpyHostToDevice); initPhero<<<numEdgesBlocks, threadsPerBlock>>>(phero); cudaThreadSynchronize(); float pathTime = 0; float pheroTime = 0; float sBegin, sEnd; for (int i = 0; i < MAX_TOURS; i++) { best_index = -1; sBegin = CycleTimer::currentSeconds(); constructAntTour<<<numAntBlocks, threadsPerBlock>>>(deviceEdges, phero, randState, randArray, tourResults, pathResults); cudaThreadSynchronize(); sEnd = CycleTimer::currentSeconds(); pathTime += sEnd - sBegin; cudaMemcpy(copiedTourResults, tourResults, sizeof(float) * MAX_ANTS, cudaMemcpyDeviceToHost); //find the best tour result from all the ants for (int j = 0; j < MAX_ANTS; j++) { if (copiedTourResults[j] < best) { best = copiedTourResults[j]; //printf("new best: %1.f\n", best); best_index = j; } } //copy the corresponding tour for the best ant if (best_index != -1) { copyBestPath<<<single, single>>>(best_index, bestPathResult, pathResults); } //seqPheroUpdate<<<single, single>>>(testPhero, phero, pathResults, tourResults); //cudaThreadSynchronize(); //evaporate pheromones in parallel sBegin = CycleTimer::currentSeconds(); evaporatePheromones<<<numPheroBlocks, threadsPerBlock>>>(phero); cudaThreadSynchronize(); //pheromone update updateTrailsAtomic<<<numAntBlocks, single>>>(phero, pathResults, tourResults); cudaThreadSynchronize(); updateSymmetricPhero<<<single, single>>>(phero); cudaThreadSynchronize(); sEnd = CycleTimer::currentSeconds(); pheroTime += (sEnd - sBegin); //checkPhero<<<single, single>>>(testPhero, phero); //cudaThreadSynchronize(); } printf("PATHTIME: %f, PHEROTIME: %f\n", pathTime, pheroTime); cudaMemcpy(bestPath, bestPathResult, MAX_CITIES * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(bestPathResult); cudaFree(pathResults); cudaFree(tourResults); cudaFree(deviceEdges); cudaFree(randArray); cudaFree(randState); delete copiedTourResults; return best; }
4e38e620c4989978bd6179245bbad452224aeb2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> using namespace at; // temporal fix for pytorch<=0.4.1 (see #9848) #define THREADS_PER_BLOCK 1024 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65536; return min(optimal_block_num, max_block_num); } __device__ inline int Loc2IndexSimple(const int h,const int w, const int height, const int width) { int index = h * width + w; return index; } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } template <typename scalar_t> __global__ void TENERGY(const int nthreads, const scalar_t *bottom_masks, const int scale_factor, const int k, const int height, const int width,const int channels, scalar_t *top_data) { //int index = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int mask_index_a = Loc2Index(n,c, ph, pw, channels, height, width); int mask_index_n1 = Loc2Index(n,c, ph + 1, pw,channels,height, width); int mask_index_n2 = Loc2Index(n,c, ph - 1, pw,channels,height, width); int mask_index_n3 = Loc2Index(n,c, ph, pw + 1,channels,height, width); int mask_index_n4 = Loc2Index(n,c, ph, pw - 1,channels,height, width); int mask_index_n5 = Loc2Index(n,c, ph + 1, pw + 1,channels,height, width); int mask_index_n6 = Loc2Index(n,c, ph - 1, pw - 1,channels,height, width); int mask_index_n7 = Loc2Index(n,c, ph - 1, pw + 1,channels,height, width); int mask_index_n8 = Loc2Index(n,c, ph + 1, pw - 1,channels,height, width); if(k==0) top_data[mask_index_a] = bottom_masks[mask_index_a]; else if (pw > 0 && ph > 0 && pw < width -1 && ph < height -1) { if (top_data[mask_index_n1]>=k && top_data[mask_index_n2]>=k && top_data[mask_index_n3]>=k && top_data[mask_index_n4]>=k && top_data[mask_index_n5]>=k && top_data[mask_index_n6]>=k && top_data[mask_index_n7]>=k && top_data[mask_index_n8]>=k) { top_data[mask_index_a] = top_data[mask_index_a] + 1.0; } } } } int TENERGYLauncher(const at::Tensor masks, const int batch_size, const int scale_factor,const int max_energy, const int height,const int width, const int channels, at::Tensor output) { const int output_size = batch_size * channels * height * width; AT_DISPATCH_FLOATING_TYPES_AND_HALF( masks.type(), "TENERGYLauncherVote", ([&] { const scalar_t *bottom_masks = masks.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); for( int k = 0;k < max_energy; k++) { hipLaunchKernelGGL(( TENERGY<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_masks, scale_factor, k, height, width, channels, top_data); hipDeviceSynchronize(); } })); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; }
4e38e620c4989978bd6179245bbad452224aeb2a.cu
#include <ATen/ATen.h> #include <THC/THCAtomics.cuh> using namespace at; // temporal fix for pytorch<=0.4.1 (see #9848) #define THREADS_PER_BLOCK 1024 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65536; return min(optimal_block_num, max_block_num); } __device__ inline int Loc2IndexSimple(const int h,const int w, const int height, const int width) { int index = h * width + w; return index; } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } template <typename scalar_t> __global__ void TENERGY(const int nthreads, const scalar_t *bottom_masks, const int scale_factor, const int k, const int height, const int width,const int channels, scalar_t *top_data) { //int index = blockIdx.x * blockDim.x + threadIdx.x; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int mask_index_a = Loc2Index(n,c, ph, pw, channels, height, width); int mask_index_n1 = Loc2Index(n,c, ph + 1, pw,channels,height, width); int mask_index_n2 = Loc2Index(n,c, ph - 1, pw,channels,height, width); int mask_index_n3 = Loc2Index(n,c, ph, pw + 1,channels,height, width); int mask_index_n4 = Loc2Index(n,c, ph, pw - 1,channels,height, width); int mask_index_n5 = Loc2Index(n,c, ph + 1, pw + 1,channels,height, width); int mask_index_n6 = Loc2Index(n,c, ph - 1, pw - 1,channels,height, width); int mask_index_n7 = Loc2Index(n,c, ph - 1, pw + 1,channels,height, width); int mask_index_n8 = Loc2Index(n,c, ph + 1, pw - 1,channels,height, width); if(k==0) top_data[mask_index_a] = bottom_masks[mask_index_a]; else if (pw > 0 && ph > 0 && pw < width -1 && ph < height -1) { if (top_data[mask_index_n1]>=k && top_data[mask_index_n2]>=k && top_data[mask_index_n3]>=k && top_data[mask_index_n4]>=k && top_data[mask_index_n5]>=k && top_data[mask_index_n6]>=k && top_data[mask_index_n7]>=k && top_data[mask_index_n8]>=k) { top_data[mask_index_a] = top_data[mask_index_a] + 1.0; } } } } int TENERGYLauncher(const at::Tensor masks, const int batch_size, const int scale_factor,const int max_energy, const int height,const int width, const int channels, at::Tensor output) { const int output_size = batch_size * channels * height * width; AT_DISPATCH_FLOATING_TYPES_AND_HALF( masks.type(), "TENERGYLauncherVote", ([&] { const scalar_t *bottom_masks = masks.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); for( int k = 0;k < max_energy; k++) { TENERGY<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(output_size, bottom_masks, scale_factor, k, height, width, channels, top_data); cudaDeviceSynchronize(); } })); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
784ba4c1ab8427d9399f03202b996b243d2d78ca.hip
// !!! This is a file automatically generated by hipify!!! /** * @file * * @author Lawrence Murray <[email protected]> * $Rev$ * $Date$ */ #include "device.hpp" #include "cuda.hpp" #include <vector> #ifdef ENABLE_CUDA hipDeviceProp_t bi::device_prop; #endif int bi::chooseDevice(const int rank) { int dev, num; std::vector<int> valid; /* build list of valid devices */ CUDA_CHECKED_CALL(hipGetDeviceCount(&num)); for (dev = 0; dev < num; ++dev) { CUDA_CHECKED_CALL(hipGetDeviceProperties(&device_prop, dev)); if (device_prop.major >= 2) { // require compute 2.0 or later valid.push_back(dev); } } BI_ERROR_MSG(valid.size() > 0, "No devices of at least compute 2.0 available"); /* select device */ CUDA_CHECKED_CALL(hipSetDevice(valid[rank % valid.size()])); CUDA_CHECKED_CALL(hipGetDevice(&dev)); return dev; } int bi::deviceIdealThreads() { return deviceOverloading()*deviceMultiprocessors()*deviceIdealThreadsPerBlock(); } int bi::deviceIdealThreadsPerBlock() { if (device_prop.major >= 2) { return 256; } else { return 128; } } int bi::deviceMultiprocessors() { return device_prop.multiProcessorCount; } int bi::deviceOverloading() { if (device_prop.major >= 3) { return 8; } else { return 4; } } int bi::deviceWarpSize() { return device_prop.warpSize; } size_t bi::deviceSharedMemPerBlock() { return device_prop.sharedMemPerBlock; }
784ba4c1ab8427d9399f03202b996b243d2d78ca.cu
/** * @file * * @author Lawrence Murray <[email protected]> * $Rev$ * $Date$ */ #include "device.hpp" #include "cuda.hpp" #include <vector> #ifdef ENABLE_CUDA cudaDeviceProp bi::device_prop; #endif int bi::chooseDevice(const int rank) { int dev, num; std::vector<int> valid; /* build list of valid devices */ CUDA_CHECKED_CALL(cudaGetDeviceCount(&num)); for (dev = 0; dev < num; ++dev) { CUDA_CHECKED_CALL(cudaGetDeviceProperties(&device_prop, dev)); if (device_prop.major >= 2) { // require compute 2.0 or later valid.push_back(dev); } } BI_ERROR_MSG(valid.size() > 0, "No devices of at least compute 2.0 available"); /* select device */ CUDA_CHECKED_CALL(cudaSetDevice(valid[rank % valid.size()])); CUDA_CHECKED_CALL(cudaGetDevice(&dev)); return dev; } int bi::deviceIdealThreads() { return deviceOverloading()*deviceMultiprocessors()*deviceIdealThreadsPerBlock(); } int bi::deviceIdealThreadsPerBlock() { if (device_prop.major >= 2) { return 256; } else { return 128; } } int bi::deviceMultiprocessors() { return device_prop.multiProcessorCount; } int bi::deviceOverloading() { if (device_prop.major >= 3) { return 8; } else { return 4; } } int bi::deviceWarpSize() { return device_prop.warpSize; } size_t bi::deviceSharedMemPerBlock() { return device_prop.sharedMemPerBlock; }
654e9b8bd99dc9bb0b348a63ad2010e54f9cd586.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "head.h" #define tpb 256 extern float *d_t; extern float *d_it; extern float *d_V; extern float *d_dV2; extern float *d_Vnew; extern float *d_m; extern float *d_h; extern float *d_jj; extern float *d_d; extern float *d_f; extern float *d_X; extern float *d_cai; extern float *d_m0; extern float *d_h0; extern float *d_jj0; extern float *d_d0; extern float *d_f0; extern float *d_X0; extern float *d_dVdt; extern float *dcai; __global__ void boundary(float *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; hipLaunchKernelGGL(( boundary), dim3(bpg), dim3(tpb), 0, 0, d_V); //hipDeviceSynchronize(); } __global__ void comp_dV2(float *d_V ,float *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2); //hipDeviceSynchronize(); } __device__ void comp_it(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int I, int i, int k, float *d_t) { //int id = k+nx+2+1+2*j; d_it[k] = 0.0; //comp_ina float gna = 23; float ena = ((R*temp) / frdy)*__logf(nao / nai); float am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - __expf(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); float bm = 0.08*__expf(-d_V[k+nx+2+1+2*i] / 11); float ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*__expf((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*__expf(0.079*d_V[k+nx+2+1+2*i]) + 310000 * __expf(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * __expf(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*__expf(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + __expf(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*__expf(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + __expf(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + __expf((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*__expf(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + __expf(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } float mtau = 1 / (am + bm); float htau = 1 / (ah + bh); float jtau = 1 / (aj + bj); float mss = am*mtau; float hss = ah*htau; float jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*__expf(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*__expf(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*__expf(-d_t[k] / jtau); d_it[k] += gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*i] - ena); //comp_ical __shared__ float esi[tpb]; __shared__ float isi[tpb]; esi[I] = 7.7 - 13.0287*__logf(d_cai[k]); float ad = 50 * 0.095*__expf(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + __expf(-0.072*(d_V[k+nx+2+1+2*i] - 5))); float bd = 50 * 0.07*__expf(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + __expf(0.05*(d_V[k+nx+2+1+2*i] + 44))); float af = 50 * 0.012*__expf(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + __expf(0.15*(d_V[k+nx+2+1+2*i] + 28))); float bf = 50 * 0.0065*__expf(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + __expf(-0.2*(d_V[k+nx+2+1+2*i] + 30))); float taud = 1 / (ad + bd); float tauf = 1 / (af + bf); float dss = ad*taud; float fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*__expf(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*__expf(-d_t[k] / tauf); isi[I] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*i] - esi[I]); dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + isi[I]; //comp_ik float gk = 0.282*sqrt(ko / 5.4); float ek = ((R*temp) / frdy)*__logf(ko / ki); //float prnak = 0.01833; //ek = ((R*temp) / frdy)*__logf((ko + prnak*nao) / (ki + prnak*nai)); float ax = 50 * 0.0005*__expf(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + __expf(0.057*(d_V[k+nx+2+1+2*i] + 50))); float bx = 50 * 0.0013*__expf(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + __expf(-0.04*(d_V[k+nx+2+1+2*i] + 20))); float taux = 1 / (ax + bx); float xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*__expf(-d_t[k] / taux); float Xi; if (d_V[k+nx+2+1+2*i] > -100) { Xi = 2.837*(__expf(0.04*(d_V[k+nx+2+1+2*i] + 77)) - 1)/((d_V[k+nx+2+1+2*i] + 77 + 1e-15)*__expf(0.04*(d_V[k+nx+2+1+2*i] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*i] - ek); //comp_ik1 float gk1 = 0.6047*(sqrt(ko / 5.4)); float ek1 = ((R*temp) / frdy)*__logf(ko / ki); float ak1 = 1.02 / (1 + __expf(0.2385*(d_V[k+nx+2+1+2*i] - ek1 - 59.215))); float bk1 = (0.49124*__expf(0.08032*(d_V[k+nx+2+1+2*i] - ek1 + 5.476))+__expf(0.06175*(d_V[k+nx+2+1+2*i] - ek1 - 594.31)))/(1 + __expf(-0.5143*(d_V[k+nx+2+1+2*i] - ek1 + 4.753))); float K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[k+nx+2+1+2*i] - ek1); //comp_ikp float gkp = 0.0183; float ekp = ((R*temp) / frdy)*__logf(ko / ki); float kp = 1 / (1 + __expf((7.488 - d_V[k+nx+2+1+2*i]) / 5.98)); d_it[k] += gkp*kp*(d_V[k+nx+2+1+2*i] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[k+nx+2+1+2*i] + 59.87); } __global__ void comp_dVdt(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dVdt), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void plane_waves(float *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; hipLaunchKernelGGL(( plane_waves), dim3(bpg), dim3(tpb), 0, 0, d_dVdt); //hipDeviceSynchronize(); } __device__ void gate(float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_X, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int k){ d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } __global__ void comp_ODE_stim(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id]) + 0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai if(i>0 && i<5){ d_dVdt[id] = -d_it[id] + (-st); }else{ d_dVdt[id] = -d_it[id]; } d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } __global__ void comp_ODE(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id])+0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id]; d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } void ODE_stim(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ODE_stim), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); // bpg = ((nx-5)*ny+tpb-1)/tpb; // comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 5); } void ODE(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ODE), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void Euler(float *d_V, float *d_dV2, float *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( Euler), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew); //hipDeviceSynchronize(); }
654e9b8bd99dc9bb0b348a63ad2010e54f9cd586.cu
#include "head.h" #define tpb 256 extern float *d_t; extern float *d_it; extern float *d_V; extern float *d_dV2; extern float *d_Vnew; extern float *d_m; extern float *d_h; extern float *d_jj; extern float *d_d; extern float *d_f; extern float *d_X; extern float *d_cai; extern float *d_m0; extern float *d_h0; extern float *d_jj0; extern float *d_d0; extern float *d_f0; extern float *d_X0; extern float *d_dVdt; extern float *dcai; __global__ void boundary(float *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; boundary<<<bpg, tpb>>>(d_V); //cudaDeviceSynchronize(); } __global__ void comp_dV2(float *d_V ,float *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; comp_dV2<<<bpg, tpb>>>(d_V, d_dV2); //cudaDeviceSynchronize(); } __device__ void comp_it(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int I, int i, int k, float *d_t) { //int id = k+nx+2+1+2*j; d_it[k] = 0.0; //comp_ina float gna = 23; float ena = ((R*temp) / frdy)*__logf(nao / nai); float am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - __expf(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); float bm = 0.08*__expf(-d_V[k+nx+2+1+2*i] / 11); float ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*__expf((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*__expf(0.079*d_V[k+nx+2+1+2*i]) + 310000 * __expf(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * __expf(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*__expf(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + __expf(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*__expf(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + __expf(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + __expf((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*__expf(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + __expf(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } float mtau = 1 / (am + bm); float htau = 1 / (ah + bh); float jtau = 1 / (aj + bj); float mss = am*mtau; float hss = ah*htau; float jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*__expf(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*__expf(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*__expf(-d_t[k] / jtau); d_it[k] += gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*i] - ena); //comp_ical __shared__ float esi[tpb]; __shared__ float isi[tpb]; esi[I] = 7.7 - 13.0287*__logf(d_cai[k]); float ad = 50 * 0.095*__expf(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + __expf(-0.072*(d_V[k+nx+2+1+2*i] - 5))); float bd = 50 * 0.07*__expf(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + __expf(0.05*(d_V[k+nx+2+1+2*i] + 44))); float af = 50 * 0.012*__expf(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + __expf(0.15*(d_V[k+nx+2+1+2*i] + 28))); float bf = 50 * 0.0065*__expf(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + __expf(-0.2*(d_V[k+nx+2+1+2*i] + 30))); float taud = 1 / (ad + bd); float tauf = 1 / (af + bf); float dss = ad*taud; float fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*__expf(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*__expf(-d_t[k] / tauf); isi[I] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*i] - esi[I]); dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + isi[I]; //comp_ik float gk = 0.282*sqrt(ko / 5.4); float ek = ((R*temp) / frdy)*__logf(ko / ki); //float prnak = 0.01833; //ek = ((R*temp) / frdy)*__logf((ko + prnak*nao) / (ki + prnak*nai)); float ax = 50 * 0.0005*__expf(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + __expf(0.057*(d_V[k+nx+2+1+2*i] + 50))); float bx = 50 * 0.0013*__expf(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + __expf(-0.04*(d_V[k+nx+2+1+2*i] + 20))); float taux = 1 / (ax + bx); float xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*__expf(-d_t[k] / taux); float Xi; if (d_V[k+nx+2+1+2*i] > -100) { Xi = 2.837*(__expf(0.04*(d_V[k+nx+2+1+2*i] + 77)) - 1)/((d_V[k+nx+2+1+2*i] + 77 + 1e-15)*__expf(0.04*(d_V[k+nx+2+1+2*i] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*i] - ek); //comp_ik1 float gk1 = 0.6047*(sqrt(ko / 5.4)); float ek1 = ((R*temp) / frdy)*__logf(ko / ki); float ak1 = 1.02 / (1 + __expf(0.2385*(d_V[k+nx+2+1+2*i] - ek1 - 59.215))); float bk1 = (0.49124*__expf(0.08032*(d_V[k+nx+2+1+2*i] - ek1 + 5.476))+__expf(0.06175*(d_V[k+nx+2+1+2*i] - ek1 - 594.31)))/(1 + __expf(-0.5143*(d_V[k+nx+2+1+2*i] - ek1 + 4.753))); float K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[k+nx+2+1+2*i] - ek1); //comp_ikp float gkp = 0.0183; float ekp = ((R*temp) / frdy)*__logf(ko / ki); float kp = 1 / (1 + __expf((7.488 - d_V[k+nx+2+1+2*i]) / 5.98)); d_it[k] += gkp*kp*(d_V[k+nx+2+1+2*i] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[k+nx+2+1+2*i] + 59.87); } __global__ void comp_dVdt(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_dVdt<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void plane_waves(float *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; plane_waves<<<bpg, tpb>>>(d_dVdt); //cudaDeviceSynchronize(); } __device__ void gate(float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_X, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, int k){ d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } __global__ void comp_ODE_stim(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id]) + 0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai if(i>0 && i<5){ d_dVdt[id] = -d_it[id] + (-st); }else{ d_dVdt[id] = -d_it[id]; } d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } __global__ void comp_ODE(float *d_V, float *d_m, float *d_h, float *d_jj, float *d_d, float *d_f, float *d_cai, float *dcai, float *d_X, float *d_it, float *d_m0, float *d_h0, float *d_jj0, float *d_d0, float *d_f0, float *d_X0, float *d_dVdt, float *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id])+0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id]; d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } void ODE_stim(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_ODE_stim<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); // bpg = ((nx-5)*ny+tpb-1)/tpb; // comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 5); } void ODE(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void Euler(float *d_V, float *d_dV2, float *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; Euler<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew); //cudaDeviceSynchronize(); }
9c06140c24c05ff20b0ac0235c03eadc4368f981.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <reduce.h> __device__ float update(float old,float opOutput,float *extraParams) { float mean = extraParams[1]; float curr = (opOutput - mean); return old + powf(curr,2); } __device__ float op(float d1,float d2,float *extraParams) { return d1 + d2; } __device__ float merge(float d1,float d2,float *extraParams) { return d1 + d2; } __device__ float op(float d1,float *extraParams) { return d1; } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return sqrtf(reduction); } extern "C" __global__ void std_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
9c06140c24c05ff20b0ac0235c03eadc4368f981.cu
#include <reduce.h> __device__ float update(float old,float opOutput,float *extraParams) { float mean = extraParams[1]; float curr = (opOutput - mean); return old + powf(curr,2); } __device__ float op(float d1,float d2,float *extraParams) { return d1 + d2; } __device__ float merge(float d1,float d2,float *extraParams) { return d1 + d2; } __device__ float op(float d1,float *extraParams) { return d1; } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return sqrtf(reduction); } extern "C" __global__ void std_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
b16c109493f6daddb5c0aa16e36e1a03e843e954.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <THHUNN/THHUNN.h> #include <THHUNN/common.h> #include <THH/THHTensor.hpp> #include <THH/THHThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/transform_reduce.h> #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ #include <thrust/system/hip/execution_policy.h> #endif #include <thrust/unique.h> #include <TH/THHalf.h> #include <THHUNN/THHHalfAutoNumerics.cuh> #include <THH/THHTensorSort.cuh> #if defined(__HIP_PLATFORM_HCC__) const int WARP_SIZE = 64; #else const int WARP_SIZE = 32; #endif const int MODE_SUM = 0; const int MODE_MEAN = 1; template <typename Dtype, typename Acctype> __global__ void cunn_LookupTableBag_updateOutputKernel( int64_t *input, int64_t *offsets, Dtype *weight, Dtype *output, int64_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t stride, int mode, int64_t *bag_size) { // the strategy here is that each bag x feature is handled by a single thread int64_t chunksPerBag = THCCeilDiv(stride, (int64_t) blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < stride) { int64_t bag = chunk / chunksPerBag; Dtype* weightFeat = weight + featureDim; int64_t begin = offsets[bag]; int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; assert(end >= begin); Acctype weightFeatSum = ScalarConvert<float, Acctype>::to(0); int64_t bag_size_ = 0; for (int64_t emb = begin; emb < end; emb++) { const int weightRow = ((int) input[emb]) * stride; weightFeatSum += ScalarConvert<Dtype, Acctype>::to(weightFeat[weightRow]); bag_size_ ++; if (featureDim == 0) { offset2bag[emb] = bag; } } if (mode == MODE_MEAN) { weightFeatSum = weightFeatSum / ScalarConvert<int64_t, Acctype>::to(bag_size_); bag_size[bag] = bag_size_; } (void) MODE_SUM; //silence warnings about unused MODE_SUM; output[bag * stride + featureDim] = ScalarConvert<Acctype, Dtype>::to(weightFeatSum); } } } // FIXME: removed the accGradParametersKernelByFeature case present in // LookupTable. That kernel is faster at small sizes (<768 indices), which // does not need LookupTableBag (LookupTable + Sum works fine), but would // still be nice to not be slow in that case. template <typename Dtype, typename Acctype> __global__ void cunn_LookupTableBag_accGradParametersKernel( int64_t *input, int64_t *indices, Dtype *gradOutput, Dtype *gradWeight, int64_t *offset2bag, int64_t *count, Dtype defaultScale, ptrdiff_t numel, int64_t stride, int mode, int64_t *bag_size) { int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { do { const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weightRow = ((int) input[idx]) * stride; // Note: only this line changes from LookupTable_accgradParametersKernel const int origRow = ((int) indices[idx]); const int seq_number = offset2bag[origRow]; const int gradOutputRow = ((int) seq_number) * stride; const Acctype scale = count ? ScalarConvert<Dtype, Acctype>::to(defaultScale) / count[idx] : ScalarConvert<Dtype, Acctype>::to(defaultScale); Acctype gradient[SZ]; Acctype weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * WARP_SIZE; if (featureDim < stride) { gradient[ii] = ScalarConvert<Dtype, Acctype>::to(gradOutput[gradOutputRow + featureDim]); if (mode == MODE_MEAN) { gradient[ii] /= bag_size[seq_number]; } weight[ii] = ScalarConvert<Dtype, Acctype>::to(gradWeight[weightRow + featureDim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * WARP_SIZE; if (featureDim < stride) { gradWeight[weightRow + featureDim] = ScalarConvert<Acctype, Dtype>::to(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } #include <THHUNN/generic/LookupTableBag.hip> #include <THH/THHGenerateFloatTypes.h>
b16c109493f6daddb5c0aa16e36e1a03e843e954.cu
#include <THCUNN/THCUNN.h> #include <THCUNN/common.h> #include <THC/THCTensor.hpp> #include <THC/THCThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/transform_reduce.h> #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ #include <thrust/system/cuda/execution_policy.h> #endif #include <thrust/unique.h> #include <TH/THHalf.h> #include <THCUNN/THCHalfAutoNumerics.cuh> #include <THC/THCTensorSort.cuh> #if defined(__HIP_PLATFORM_HCC__) const int WARP_SIZE = 64; #else const int WARP_SIZE = 32; #endif const int MODE_SUM = 0; const int MODE_MEAN = 1; template <typename Dtype, typename Acctype> __global__ void cunn_LookupTableBag_updateOutputKernel( int64_t *input, int64_t *offsets, Dtype *weight, Dtype *output, int64_t *offset2bag, int64_t numIndices, int64_t numBags, int64_t stride, int mode, int64_t *bag_size) { // the strategy here is that each bag x feature is handled by a single thread int64_t chunksPerBag = THCCeilDiv(stride, (int64_t) blockDim.x); int64_t numChunks = numBags * chunksPerBag; int64_t chunkOffset = blockIdx.x * blockDim.y + threadIdx.y; int64_t chunkStride = gridDim.x * blockDim.y; for (int64_t chunk = chunkOffset; chunk < numChunks; chunk += chunkStride) { int64_t featureDim = (chunk % chunksPerBag) * blockDim.x + threadIdx.x; if (featureDim < stride) { int64_t bag = chunk / chunksPerBag; Dtype* weightFeat = weight + featureDim; int64_t begin = offsets[bag]; int64_t end = (bag < numBags - 1) ? (offsets[bag + 1]) : numIndices; assert(end >= begin); Acctype weightFeatSum = ScalarConvert<float, Acctype>::to(0); int64_t bag_size_ = 0; for (int64_t emb = begin; emb < end; emb++) { const int weightRow = ((int) input[emb]) * stride; weightFeatSum += ScalarConvert<Dtype, Acctype>::to(weightFeat[weightRow]); bag_size_ ++; if (featureDim == 0) { offset2bag[emb] = bag; } } if (mode == MODE_MEAN) { weightFeatSum = weightFeatSum / ScalarConvert<int64_t, Acctype>::to(bag_size_); bag_size[bag] = bag_size_; } (void) MODE_SUM; //silence warnings about unused MODE_SUM; output[bag * stride + featureDim] = ScalarConvert<Acctype, Dtype>::to(weightFeatSum); } } } // FIXME: removed the accGradParametersKernelByFeature case present in // LookupTable. That kernel is faster at small sizes (<768 indices), which // does not need LookupTableBag (LookupTable + Sum works fine), but would // still be nice to not be slow in that case. template <typename Dtype, typename Acctype> __global__ void cunn_LookupTableBag_accGradParametersKernel( int64_t *input, int64_t *indices, Dtype *gradOutput, Dtype *gradWeight, int64_t *offset2bag, int64_t *count, Dtype defaultScale, ptrdiff_t numel, int64_t stride, int mode, int64_t *bag_size) { int idx = blockIdx.x * 4 + threadIdx.y; // Each warp is responsible for an input into the LookupTable. // If the preceding input has the same as this input, then the warp // exits immediately. The warp also processes subsequent inputs with the // same value. // // Input Warp // 1 <warp 1> // 1 <warp 1> (<warp 2> exits without doing any work) // 5 <warp 3> // 8 <warp 4> // Number of values proceessed by each thread (grain size) const int SZ = 4; if (idx < numel && (idx == 0 || input[idx] != input[idx - 1])) { do { const int startFeature = threadIdx.x + blockIdx.y * blockDim.x * SZ; const int weightRow = ((int) input[idx]) * stride; // Note: only this line changes from LookupTable_accgradParametersKernel const int origRow = ((int) indices[idx]); const int seq_number = offset2bag[origRow]; const int gradOutputRow = ((int) seq_number) * stride; const Acctype scale = count ? ScalarConvert<Dtype, Acctype>::to(defaultScale) / count[idx] : ScalarConvert<Dtype, Acctype>::to(defaultScale); Acctype gradient[SZ]; Acctype weight[SZ]; #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * WARP_SIZE; if (featureDim < stride) { gradient[ii] = ScalarConvert<Dtype, Acctype>::to(gradOutput[gradOutputRow + featureDim]); if (mode == MODE_MEAN) { gradient[ii] /= bag_size[seq_number]; } weight[ii] = ScalarConvert<Dtype, Acctype>::to(gradWeight[weightRow + featureDim]); } } #pragma unroll for (int ii = 0; ii < SZ; ii++) { weight[ii] += gradient[ii] * scale; } #pragma unroll for (int ii = 0; ii < SZ; ii++) { int featureDim = startFeature + ii * WARP_SIZE; if (featureDim < stride) { gradWeight[weightRow + featureDim] = ScalarConvert<Acctype, Dtype>::to(weight[ii]); } } idx++; } while (idx < numel && input[idx] == input[idx - 1]); } } #include <THCUNN/generic/LookupTableBag.cu> #include <THC/THCGenerateFloatTypes.h>
6d77934834f3b9ae0c3c84052c35d6089e06c3d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void getAggregateStartIndicesKernel(int size, int *fineAggregateSort, int *aggregateRemapIndex) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { if(idx == 0 || fineAggregateSort[idx] != fineAggregateSort[idx - 1]) { aggregateRemapIndex[fineAggregateSort[idx]] = idx; } } }
6d77934834f3b9ae0c3c84052c35d6089e06c3d0.cu
#include "includes.h" __global__ void getAggregateStartIndicesKernel(int size, int *fineAggregateSort, int *aggregateRemapIndex) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { if(idx == 0 || fineAggregateSort[idx] != fineAggregateSort[idx - 1]) { aggregateRemapIndex[fineAggregateSort[idx]] = idx; } } }
1d975390723bc8b3dafff7cb041b6eedd55299f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <comm_quda.h> #include <gauge_fix_ovr_extra.h> #include <thrust_helper.cuh> namespace quda { #if defined(GPU_GAUGE_ALG) && defined(MULTI_GPU) struct BorderIdArg { int X[4]; // grid dimensions int border[4]; BorderIdArg(int X_[4], int border_[4]) { for ( int dir = 0; dir < 4; ++dir ) border[dir] = border_[dir]; for ( int dir = 0; dir < 4; ++dir ) X[dir] = X_[dir]; } }; __global__ void ComputeBorderPointsActiveFaceIndex(BorderIdArg arg, int *faceindices, int facesize, int faceid, int parity){ int idd = blockDim.x * blockIdx.x + threadIdx.x; if ( idd < facesize ) { int borderid = 0; int idx = idd; if ( idx >= facesize / 2 ) { borderid = arg.X[faceid] - 1; idx -= facesize / 2; } int X[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; int za, xodd; switch ( faceid ) { case 0: //X FACE za = idx / ( X[1] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[0] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[1] = (2 * idx + xodd) - za * X[1]; break; case 1: //Y FACE za = idx / ( X[0] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[1] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 2: //Z FACE za = idx / ( X[0] / 2); x[3] = za / X[1]; x[1] = za - x[3] * X[1]; x[2] = borderid; xodd = (borderid + x[1] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 3: //T FACE za = idx / ( X[0] / 2); x[2] = za / X[1]; x[1] = za - x[2] * X[1]; x[3] = borderid; xodd = (borderid + x[1] + x[2] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]);; faceindices[idd] = idx; } } /** * @brief Pre-calculate lattice border points used by the gauge fixing with overrelaxation in multi-GPU implementation */ void PreCalculateLatticeIndices(size_t faceVolume[4], size_t faceVolumeCB[4], int X[4], int border[4], \ int &threads, int *borderpoints[2]){ BorderIdArg arg(X, border); int nlinksfaces = 0; for ( int dir = 0; dir < 4; ++dir ) if ( comm_dim_partitioned(dir)) nlinksfaces += faceVolume[dir]; thrust::device_ptr<int> array_faceT[2]; thrust::device_ptr<int> array_interiorT[2]; for ( int i = 0; i < 2; i++ ) { //even and odd ids borderpoints[i] = static_cast<int*>(pool_device_malloc(nlinksfaces * sizeof(int) )); hipMemset(borderpoints[i], 0, nlinksfaces * sizeof(int) ); array_faceT[i] = thrust::device_pointer_cast(borderpoints[i]); } dim3 nthreads(128, 1, 1); int start = 0; for ( int dir = 0; dir < 4; ++dir ) { if ( comm_dim_partitioned(dir)) { dim3 blocks((faceVolume[dir] + nthreads.x - 1) / nthreads.x,1,1); for ( int oddbit = 0; oddbit < 2; oddbit++ ) ComputeBorderPointsActiveFaceIndex << < blocks, nthreads >> > (arg, borderpoints[oddbit] + start, faceVolume[dir], dir, oddbit); start += faceVolume[dir]; } } int size[2]; for ( int i = 0; i < 2; i++ ) { //sort and remove duplicated lattice indices thrust_allocator alloc; thrust::sort(thrust::hip::par(alloc), array_faceT[i], array_faceT[i] + nlinksfaces); thrust::device_ptr<int> new_end = thrust::unique(array_faceT[i], array_faceT[i] + nlinksfaces); size[i] = thrust::raw_pointer_cast(new_end) - thrust::raw_pointer_cast(array_faceT[i]); } if ( size[0] == size[1] ) threads = size[0]; else errorQuda("BORDER: Even and Odd sizes does not match, not supported!!!!, %d:%d",size[0],size[1]); } #endif // GPU_GAUGE_ALG && MULTI_GPU }
1d975390723bc8b3dafff7cb041b6eedd55299f6.cu
#include <comm_quda.h> #include <gauge_fix_ovr_extra.h> #include <thrust_helper.cuh> namespace quda { #if defined(GPU_GAUGE_ALG) && defined(MULTI_GPU) struct BorderIdArg { int X[4]; // grid dimensions int border[4]; BorderIdArg(int X_[4], int border_[4]) { for ( int dir = 0; dir < 4; ++dir ) border[dir] = border_[dir]; for ( int dir = 0; dir < 4; ++dir ) X[dir] = X_[dir]; } }; __global__ void ComputeBorderPointsActiveFaceIndex(BorderIdArg arg, int *faceindices, int facesize, int faceid, int parity){ int idd = blockDim.x * blockIdx.x + threadIdx.x; if ( idd < facesize ) { int borderid = 0; int idx = idd; if ( idx >= facesize / 2 ) { borderid = arg.X[faceid] - 1; idx -= facesize / 2; } int X[4]; for ( int dr = 0; dr < 4; ++dr ) X[dr] = arg.X[dr]; int x[4]; int za, xodd; switch ( faceid ) { case 0: //X FACE za = idx / ( X[1] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[0] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[1] = (2 * idx + xodd) - za * X[1]; break; case 1: //Y FACE za = idx / ( X[0] / 2); x[3] = za / X[2]; x[2] = za - x[3] * X[2]; x[1] = borderid; xodd = (borderid + x[2] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 2: //Z FACE za = idx / ( X[0] / 2); x[3] = za / X[1]; x[1] = za - x[3] * X[1]; x[2] = borderid; xodd = (borderid + x[1] + x[3] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; case 3: //T FACE za = idx / ( X[0] / 2); x[2] = za / X[1]; x[1] = za - x[2] * X[1]; x[3] = borderid; xodd = (borderid + x[1] + x[2] + parity) & 1; x[0] = (2 * idx + xodd) - za * X[0]; break; } idx = (((x[3] * X[2] + x[2]) * X[1] + x[1]) * X[0] + x[0]);; faceindices[idd] = idx; } } /** * @brief Pre-calculate lattice border points used by the gauge fixing with overrelaxation in multi-GPU implementation */ void PreCalculateLatticeIndices(size_t faceVolume[4], size_t faceVolumeCB[4], int X[4], int border[4], \ int &threads, int *borderpoints[2]){ BorderIdArg arg(X, border); int nlinksfaces = 0; for ( int dir = 0; dir < 4; ++dir ) if ( comm_dim_partitioned(dir)) nlinksfaces += faceVolume[dir]; thrust::device_ptr<int> array_faceT[2]; thrust::device_ptr<int> array_interiorT[2]; for ( int i = 0; i < 2; i++ ) { //even and odd ids borderpoints[i] = static_cast<int*>(pool_device_malloc(nlinksfaces * sizeof(int) )); cudaMemset(borderpoints[i], 0, nlinksfaces * sizeof(int) ); array_faceT[i] = thrust::device_pointer_cast(borderpoints[i]); } dim3 nthreads(128, 1, 1); int start = 0; for ( int dir = 0; dir < 4; ++dir ) { if ( comm_dim_partitioned(dir)) { dim3 blocks((faceVolume[dir] + nthreads.x - 1) / nthreads.x,1,1); for ( int oddbit = 0; oddbit < 2; oddbit++ ) ComputeBorderPointsActiveFaceIndex << < blocks, nthreads >> > (arg, borderpoints[oddbit] + start, faceVolume[dir], dir, oddbit); start += faceVolume[dir]; } } int size[2]; for ( int i = 0; i < 2; i++ ) { //sort and remove duplicated lattice indices thrust_allocator alloc; thrust::sort(thrust::cuda::par(alloc), array_faceT[i], array_faceT[i] + nlinksfaces); thrust::device_ptr<int> new_end = thrust::unique(array_faceT[i], array_faceT[i] + nlinksfaces); size[i] = thrust::raw_pointer_cast(new_end) - thrust::raw_pointer_cast(array_faceT[i]); } if ( size[0] == size[1] ) threads = size[0]; else errorQuda("BORDER: Even and Odd sizes does not match, not supported!!!!, %d:%d",size[0],size[1]); } #endif // GPU_GAUGE_ALG && MULTI_GPU }
483dd6dc56b0e4a2ea1cee902ace3a8a90f6882f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<math.h> #include<iostream> using namespace std; __global__ void sum(float* input) { int tid = threadIdx.x; int step_size = 1; int number_of_threads = blockDim.x; float aux_size = (float)number_of_threads; while (number_of_threads > 0) { if (tid < number_of_threads) { int fst = tid*step_size * 2; int snd = fst + step_size; /*if (input[fst] > input[snd] && input[snd] > 0) input[fst] = input[snd];*/ input[fst] = input[fst] + input[snd]; } step_size = step_size*2; if (number_of_threads != 1) { aux_size = aux_size / 2; number_of_threads = (int)ceil(aux_size); } else number_of_threads = 0; } } int main(int argc, char const* argv[]) { int count = 10; float size = count * sizeof(float); float h[10]; //srand(100); cout << "AARAY : " <<endl; for (int i = 0; i < count; i++) { cout << i << "th element" << endl; cin >> h[i]; //rand() % count; } cout << "AARAY : " << endl; for (int i = 0; i < count; i++) { cout << h[i] << " "; } float* d; hipMalloc(&d, size); hipMemcpy(d, h, size, hipMemcpyHostToDevice); sum << <1, (count / 2) + 1 >> > (d); float result; hipMemcpy(&result, d, sizeof(float), hipMemcpyDeviceToHost); cout << "SUM :" << (float)result/(float)count << endl; float mean = (float)result / (float)count; for (int i = 0; i < count; i++) { h[i] = (h[i] - mean) * (h[i] - mean); cout << h[i] << " "; } float* f; hipMalloc(&f, size); hipMemcpy(f, h, size, hipMemcpyHostToDevice); sum << <1, (count / 2) + 1 >> > (f); float resultfil; hipMemcpy(&resultfil, f, sizeof(float), hipMemcpyDeviceToHost); cout << "SD :" <<sqrt(resultfil)<< endl; getchar(); hipFree(d); hipFree(f); return 0; }
483dd6dc56b0e4a2ea1cee902ace3a8a90f6882f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<math.h> #include<iostream> using namespace std; __global__ void sum(float* input) { int tid = threadIdx.x; int step_size = 1; int number_of_threads = blockDim.x; float aux_size = (float)number_of_threads; while (number_of_threads > 0) { if (tid < number_of_threads) { int fst = tid*step_size * 2; int snd = fst + step_size; /*if (input[fst] > input[snd] && input[snd] > 0) input[fst] = input[snd];*/ input[fst] = input[fst] + input[snd]; } step_size = step_size*2; if (number_of_threads != 1) { aux_size = aux_size / 2; number_of_threads = (int)ceil(aux_size); } else number_of_threads = 0; } } int main(int argc, char const* argv[]) { int count = 10; float size = count * sizeof(float); float h[10]; //srand(100); cout << "AARAY : " <<endl; for (int i = 0; i < count; i++) { cout << i << "th element" << endl; cin >> h[i]; //rand() % count; } cout << "AARAY : " << endl; for (int i = 0; i < count; i++) { cout << h[i] << " "; } float* d; cudaMalloc(&d, size); cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); sum << <1, (count / 2) + 1 >> > (d); float result; cudaMemcpy(&result, d, sizeof(float), cudaMemcpyDeviceToHost); cout << "SUM :" << (float)result/(float)count << endl; float mean = (float)result / (float)count; for (int i = 0; i < count; i++) { h[i] = (h[i] - mean) * (h[i] - mean); cout << h[i] << " "; } float* f; cudaMalloc(&f, size); cudaMemcpy(f, h, size, cudaMemcpyHostToDevice); sum << <1, (count / 2) + 1 >> > (f); float resultfil; cudaMemcpy(&resultfil, f, sizeof(float), cudaMemcpyDeviceToHost); cout << "SD :" <<sqrt(resultfil)<< endl; getchar(); cudaFree(d); cudaFree(f); return 0; }
d56e6273302bd5473d5fbdaaf74d270176efb499.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions normal c -> d s */ #include "common_magma.h" #define PRECISION_c /* The version for fermi can be found in csymv_fermi.cu */ /* TODO: generate csymv_tesla.cu from chemv_tesla.cu somehow. * Basically just strip out cuConj calls. */ #define symv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Lower case, where n is multiple of block size (symv_bs) */ __global__ void csymv_kernel_tesla_L_special( int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex * __restrict__ WC) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; // la must be at least half_thread_x*bank_shift = 32x33 = 1056; // quarter_thread_x*(thread_x+2) = 16*(64+2) = 1056 // (was thread_x+1 here, but thread_x+3 in chemv_tesla.cu; +1 is insufficient) __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3]; /* Why +3? */ __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2[thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += ty_* lda + tx_; if ( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_; ty = ty_; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_ * 4 + 4); i++) { if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = la[0][ i * bank_shift + tx_]; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); magmaFloatComplex res2 = MAGMA_C_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; A -= half_thread_x*lda; res_ = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_C_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda * blkc * thread_x; x = x - blkc * thread_x * incx; A += 4 * ty* lda; A += tx; int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) { b[j] = buff[ty_*4+j]; } #pragma unroll for( int i=0; i < thread_x*blkc; i += thread_x ) { res_ = MAGMA_C_ZERO; count++; if ( ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff2[ quarter_thread_x*k + ty*4 + j]; la[j + ty*4][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { res_ += la[tx_][ty_*4+j] * b[j]; } b[4+k] = res_; __syncthreads(); A += lda * quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void csymv_kernel_tesla_L_generic( int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex * __restrict__ WC, int m_mod_thread_x) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3]; __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2[thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += lda * ty_; int trackA; if ( blkc == ( gridDim.x - 1 ) ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff[tx] = MAGMA_C_ZERO; } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } trackA = tx_; A += trackA; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_; ty = ty_; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_*4+4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][i*bank_shift+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; if ( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A += trackA+half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 99999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA+half_thread_x*lda; A += tx_; A += half_thread_x + half_thread_x*lda; } else { A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); magmaFloatComplex res2; res2 = MAGMA_C_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; res_ = MAGMA_C_ZERO; A -= half_thread_x*lda; if ( blkc == ( gridDim.x - 1 ) ) { A -= tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; #pragma unroll for(int j=0; j < half_thread_x; j += 8) if ( ( ty_ + j ) > m_mod_thread_x ) { tr[j/8] = MAGMA_C_MAKE( 99999, 0 ); } else tr[j/8] = A[ j * lda]; A -= trackA; A += tx_; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_C_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda*break_d; x = x - break_d*incx; A += 4 * ty* lda; if ( blkc == ( gridDim.x - 1 ) ) { if ( tx <= m_mod_thread_x ) A += tx; else A += m_mod_thread_x; } else{ A += tx; } int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) b[j] = buff[ty_*4+j]; #pragma unroll for( int i=0; i < break_d; i += thread_x ) { res_ = MAGMA_C_ZERO; count++; if ( ty == 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) res_ += la[tx_][ty_*4+j]* b[j]; b[4+k] = res_; __syncthreads(); A += lda* quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx]; WC[0+lda*(blkc)] = res; } } __global__ void csymv_kernel_tesla_L_update( int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex * __restrict__ WC ) { int i; int tx = threadIdx.x; int ind = blockIdx.x * thread_x + tx; magmaFloatComplex Ca; Ca = MAGMA_C_ZERO; WC += ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i < n; i += thread_x) { Ca += WC[0]; WC += thread_x; } if ( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; } extern "C" void magmablas_csymv_tesla_L( magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy, magmaFloatComplex *dwork) { magma_int_t blocks = (n - 1)/symv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); /* * If matrix size is multiple of symv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % symv_bs == 0 ) { hipLaunchKernelGGL(( csymv_kernel_tesla_L_special), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, A, lda, x, incx, beta, y, incy, dwork); } else{ magma_int_t m_mod_thread_x = (n % symv_bs) - 1; hipLaunchKernelGGL(( csymv_kernel_tesla_L_generic), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x); } dim3 threads_u(symv_bs, 1, 1); hipLaunchKernelGGL(( csymv_kernel_tesla_L_update), dim3(grid), dim3(threads_u), 0, magma_stream , n, alpha, A, lda, x, incx, beta, y, incy, dwork); } /************************************************************************* Purpose ======= magmablas_csymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. the interface of magmablas_csymv_work is different from magmablas_csymv in the last argument dwork As magma implements csymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dwork is the working space 2) sum the intermediate values and store the final result in y. The size of dwork is lda * ceil(n/thread_x) where thread_x = 64 magamblasw_csymv requires users to explicitly a working space, while magmablas_csymv is a wrapper routine of magmabalsw_csymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call csymv frequently, we suggest to use magmablas_csymv_work instead of magmablas_csymv. As the overhead of allocating and free in device memory in magmablas_csymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_csymv_tesla_work( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy, magmaFloatComplex *dwork, magma_int_t lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasCsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { magmablas_csymv_tesla_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_csymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA COMPLEX*16. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_csymv_tesla( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasCsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { magmaFloatComplex *dwork; magma_int_t blocks = (n - 1) / thread_x + 1; magma_int_t lwork = lda * (blocks + 1); // TODO deal with error magma_cmalloc( &dwork, lwork ); magmablas_csymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork); magma_free( dwork ); } return MAGMA_SUCCESS; }
d56e6273302bd5473d5fbdaaf74d270176efb499.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions normal c -> d s */ #include "common_magma.h" #define PRECISION_c /* The version for fermi can be found in csymv_fermi.cu */ /* TODO: generate csymv_tesla.cu from chemv_tesla.cu somehow. * Basically just strip out cuConj calls. */ #define symv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Lower case, where n is multiple of block size (symv_bs) */ __global__ void csymv_kernel_tesla_L_special( int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex * __restrict__ WC) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; // la must be at least half_thread_x*bank_shift = 32x33 = 1056; // quarter_thread_x*(thread_x+2) = 16*(64+2) = 1056 // (was thread_x+1 here, but thread_x+3 in chemv_tesla.cu; +1 is insufficient) __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3]; /* Why +3? */ __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2[thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += ty_* lda + tx_; if ( ty == 0 ) { buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_; ty = ty_; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_ * 4 + 4); i++) { if ( i < tx_ ) { la[0][bank_shift * tx_ + i] = la[0][ i * bank_shift + tx_]; } else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); magmaFloatComplex res2 = MAGMA_C_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; A -= half_thread_x*lda; res_ = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_C_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda * blkc * thread_x; x = x - blkc * thread_x * incx; A += 4 * ty* lda; A += tx; int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) { b[j] = buff[ty_*4+j]; } #pragma unroll for( int i=0; i < thread_x*blkc; i += thread_x ) { res_ = MAGMA_C_ZERO; count++; if ( ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff2[ quarter_thread_x*k + ty*4 + j]; la[j + ty*4][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) { res_ += la[tx_][ty_*4+j] * b[j]; } b[4+k] = res_; __syncthreads(); A += lda * quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void csymv_kernel_tesla_L_generic( int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex * __restrict__ WC, int m_mod_thread_x) { int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+3]; __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2[thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx) * incx; A += break_d * (lda+1); A += lda * ty_; int trackA; if ( blkc == ( gridDim.x - 1 ) ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff[tx] = MAGMA_C_ZERO; } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } trackA = tx_; A += trackA; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_; ty = ty_; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_*4+4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][i*bank_shift+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_== 0 ) { res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; if ( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A += trackA+half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { la[0][bank_shift*(ty_+j)+tx_] = MAGMA_C_MAKE( 99999, 0 ); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A -= trackA+half_thread_x*lda; A += tx_; A += half_thread_x + half_thread_x*lda; } else { A += half_thread_x + half_thread_x*lda; #pragma unroll for(int j=0; j < half_thread_x; j += 8) { la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4); i++) { if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); magmaFloatComplex res2; res2 = MAGMA_C_ZERO; if ( ty_== 1 ) { res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); res = MAGMA_C_ZERO; res_ = MAGMA_C_ZERO; A -= half_thread_x*lda; if ( blkc == ( gridDim.x - 1 ) ) { A -= tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA; #pragma unroll for(int j=0; j < half_thread_x; j += 8) if ( ( ty_ + j ) > m_mod_thread_x ) { tr[j/8] = MAGMA_C_MAKE( 99999, 0 ); } else tr[j/8] = A[ j * lda]; A -= trackA; A += tx_; } else { #pragma unroll for(int j=0; j < half_thread_x; j += 8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_] = res; __syncthreads(); if ( ty_ == 1 ) { res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res2 = MAGMA_C_ZERO; } __syncthreads(); la[0][bank_shift*tx_+ty_] = res_; __syncthreads(); if ( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { res1 = MAGMA_C_ZERO; } A -= half_thread_x; __syncthreads(); tx = threadIdx.x; ty = threadIdx.y; if ( ty_ == 0 && ty == 0 ) res = res1; else if ( ty_ == 1 && ty == 0 ) res = res2; else { res = MAGMA_C_ZERO; } A -= ty_* lda; A -= tx_; A = A - lda*break_d; x = x - break_d*incx; A += 4 * ty* lda; if ( blkc == ( gridDim.x - 1 ) ) { if ( tx <= m_mod_thread_x ) A += tx; else A += m_mod_thread_x; } else{ A += tx; } int wc_c = 0; int count = 0; tx_ = td % quarter_thread_x; ty_ = td / quarter_thread_x; WC -= tx; WC += tx_; #pragma unroll for(int j=0; j < 4; j++) b[j] = buff[ty_*4+j]; #pragma unroll for( int i=0; i < break_d; i += thread_x ) { res_ = MAGMA_C_ZERO; count++; if ( ty == 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0; k < 4; k++ ) { #pragma unroll for(int j=0; j < 4; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4; j++) { res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); res_ = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < 4; j++) res_ += la[tx_][ty_*4+j]* b[j]; b[4+k] = res_; __syncthreads(); A += lda* quarter_thread_x; } #pragma unroll for(int k=0; k < 4; k++) { la[tx_][ty_+quarter_thread_x*k] = b[4+k]; } __syncthreads(); if ( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][ 0+k] + la[tx_][ 1+k] + la[tx_][ 2+k] + la[tx_][ 3+k] + la[tx_][ 4+k] + la[tx_][ 5+k] + la[tx_][ 6+k] + la[tx_][ 7+k] + la[tx_][ 8+k] + la[tx_][ 9+k] + la[tx_][10+k] + la[tx_][11+k] + la[tx_][12+k] + la[tx_][13+k] + la[tx_][14+k] + la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC += tx; WC -= tx_; la[ty][tx] = res; __syncthreads(); if ( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx]; WC[0+lda*(blkc)] = res; } } __global__ void csymv_kernel_tesla_L_update( int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy, magmaFloatComplex * __restrict__ WC ) { int i; int tx = threadIdx.x; int ind = blockIdx.x * thread_x + tx; magmaFloatComplex Ca; Ca = MAGMA_C_ZERO; WC += ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i < n; i += thread_x) { Ca += WC[0]; WC += thread_x; } if ( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; } extern "C" void magmablas_csymv_tesla_L( magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy, magmaFloatComplex *dwork) { magma_int_t blocks = (n - 1)/symv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); /* * If matrix size is multiple of symv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % symv_bs == 0 ) { csymv_kernel_tesla_L_special<<< grid, threads, 0, magma_stream >>> (n, alpha, A, lda, x, incx, beta, y, incy, dwork); } else{ magma_int_t m_mod_thread_x = (n % symv_bs) - 1; csymv_kernel_tesla_L_generic<<< grid, threads, 0, magma_stream >>> (n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x); } dim3 threads_u(symv_bs, 1, 1); csymv_kernel_tesla_L_update<<< grid, threads_u, 0, magma_stream >>> (n, alpha, A, lda, x, incx, beta, y, incy, dwork); } /************************************************************************* Purpose ======= magmablas_csymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. the interface of magmablas_csymv_work is different from magmablas_csymv in the last argument dwork As magma implements csymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dwork is the working space 2) sum the intermediate values and store the final result in y. The size of dwork is lda * ceil(n/thread_x) where thread_x = 64 magamblasw_csymv requires users to explicitly a working space, while magmablas_csymv is a wrapper routine of magmabalsw_csymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call csymv frequently, we suggest to use magmablas_csymv_work instead of magmablas_csymv. As the overhead of allocating and free in device memory in magmablas_csymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_csymv_tesla_work( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy, magmaFloatComplex *dwork, magma_int_t lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasCsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { magmablas_csymv_tesla_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_csymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA COMPLEX*16. On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_csymv_tesla( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ /* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasCsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy); #endif } else { magmaFloatComplex *dwork; magma_int_t blocks = (n - 1) / thread_x + 1; magma_int_t lwork = lda * (blocks + 1); // TODO deal with error magma_cmalloc( &dwork, lwork ); magmablas_csymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork); magma_free( dwork ); } return MAGMA_SUCCESS; }
bd057c05f3a461ce527098667ad01336bb97d1d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> #include <stdio.h> #include <math.h> #include <float.h> //#include "caffe/fast_rcnn_layers.hpp" #include "caffe/roi_alignment_layer.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> //__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, // const Dtype spatial_scale, const int channels, const int height, // const int width, const int pooled_height, const int pooled_width, // const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) __global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data_topleft, int* argmax_data_topright, int* argmax_data_bottomleft, int* argmax_data_bottomright, float* dh_ratio_data, float* dw_ratio_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = (float)(bottom_rois[1] * spatial_scale); float roi_start_h = (float)(bottom_rois[2] * spatial_scale); float roi_end_w = (float)(bottom_rois[3] * spatial_scale); float roi_end_h = (float)(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (roi_height) / ((float)(pooled_height)); float bin_size_w = (roi_width) / ((float)(pooled_width)); float hstart = ((float)(ph)) * bin_size_h; float wstart = ((float)(pw)) * bin_size_w; float hend = ((float)(ph + 1)) * bin_size_h; float wend = ((float)(pw + 1)) * bin_size_w; // Add roi offsets and clip to input boundaries hstart = fminf(fmaxf(hstart + roi_start_h, 0), height-1); hend = fminf(fmaxf(hend + roi_start_h, 0), height-1); wstart = fminf(fmaxf(wstart + roi_start_w, 0), width-1); wend = fminf(fmaxf(wend + roi_start_w, 0), width-1); bool is_empty = (hend < hstart) || (wend < wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; if (is_empty) { top_data[index] = maxval; // argmax_data[index] = maxidx; argmax_data_topleft[index] = maxidx; argmax_data_topright[index] = maxidx; argmax_data_bottomleft[index] = maxidx; argmax_data_bottomright[index] = maxidx; dh_ratio_data[index] = 0.0; dw_ratio_data[index] = 0.0; } else { float centerx = (float)(wstart + wend)/2.0; float centery = (float)(hstart + hend)/2.0; int cy_top = static_cast<int>(floor(centery)); int cy_bottom = static_cast<int>(ceil(centery)); int cx_left = static_cast<int>(floor(centerx)); int cx_right = static_cast<int>(ceil(centerx)); cy_top = min(max(cy_top, 0), height-1); cy_bottom = min(max(cy_bottom, 0), height-1); cx_left = min(max(cx_left, 0), width-1); cx_right = min(max(cx_right, 0), width-1); int topleft = cy_top * width + cx_left; int topright = cy_top * width + cx_right; int bottomleft = cy_bottom * width + cx_left; int bottomright = cy_bottom * width + cx_right; // bilinear interpolation float y_ratio = centery - (float)(cy_top); float x_ratio = centerx - (float)(cx_left); maxval = bottom_data[topleft] * (1-y_ratio) * (1-x_ratio) + bottom_data[topright] * (1-y_ratio) * (x_ratio) + bottom_data[bottomleft] * (y_ratio) * (1 - x_ratio) + bottom_data[bottomright] * (y_ratio) * (x_ratio); top_data[index] = maxval; // argmax_data[index] = maxidx; argmax_data_topleft[index] = topleft; argmax_data_topright[index] = topright; argmax_data_bottomleft[index] = bottomleft; argmax_data_bottomright[index] = bottomright; dh_ratio_data[index] = y_ratio; dw_ratio_data[index] = x_ratio; } } } template <typename Dtype> void ROIAlignmentLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); // int* argmax_data = max_idx_.mutable_gpu_data(); ///////////////////////////////////////////////// int* argmax_data_topleft = max_idx_topleft.mutable_gpu_data(); int* argmax_data_topright = max_idx_topright.mutable_gpu_data(); int* argmax_data_bottomleft = max_idx_bottomleft.mutable_gpu_data(); int* argmax_data_bottomright = max_idx_bottomright.mutable_gpu_data(); float* dh_ratio_data = dh_ratio.mutable_gpu_data(); float* dw_ratio_data = dw_ratio.mutable_gpu_data(); ///////////////////////////////////////////////// int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) // hipLaunchKernelGGL(( ROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, // count, bottom_data, spatial_scale_, channels_, height_, width_, // pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); hipLaunchKernelGGL(( ROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data_topleft, argmax_data_topright, argmax_data_bottomleft, argmax_data_bottomright, dh_ratio_data, dw_ratio_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> //__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, // const int* argmax_data, const int num_rois, const Dtype spatial_scale, // const int channels, const int height, const int width, // const int pooled_height, const int pooled_width, Dtype* bottom_diff, // const Dtype* bottom_rois) __global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data_topleft, const int* argmax_data_topright, const int* argmax_data_bottomleft, const int* argmax_data_bottomright, const float* dh_ratio_data, const float* dw_ratio_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data (in conv5_3) int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0.; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind){ continue; } float roi_start_w = (float)(offset_bottom_rois[1] * spatial_scale); float roi_start_h = (float)(offset_bottom_rois[2] * spatial_scale); float roi_end_w = (float)(offset_bottom_rois[3] * spatial_scale); float roi_end_h = (float)(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) //const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); const bool in_roi = (w >= roi_start_w-1 && w <= roi_end_w+1 && h >= roi_start_h-1 && h <= roi_end_h+1); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; // const int* offset_argmax_data = argmax_data + offset; /////////////////////////////////////////////////////// const int* offset_argmax_data_topright = argmax_data_topright + offset; const int* offset_argmax_data_topleft = argmax_data_topleft + offset; const int* offset_argmax_data_bottomleft = argmax_data_bottomleft + offset; const int* offset_argmax_data_bottomright = argmax_data_bottomright + offset; const float* offset_dh_ratio_data = dh_ratio_data + offset; const float* offset_dw_ratio_data = dw_ratio_data + offset; /////////////////////////////////////////////////////// // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (roi_height) / ((float)(pooled_height)); float bin_size_w = (roi_width) / ((float)(pooled_width)); int phstart = floor(((float)h - roi_start_h) / bin_size_h); int phend = ceil(((float)h - roi_start_h) / bin_size_h); int pwstart = floor(((float)w - roi_start_w) / bin_size_w); int pwend = ceil(((float)w - roi_start_w) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); phstart = 0; phend = pooled_height; pwstart = 0; pwend = pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int topright = offset_argmax_data_topright[ph * pooled_width + pw]; int topleft = offset_argmax_data_topleft[ph * pooled_width + pw]; int bottomleft = offset_argmax_data_bottomleft[ph * pooled_width + pw]; int bottomright = offset_argmax_data_bottomright[ph * pooled_width + pw]; float y_ratio = offset_dh_ratio_data[ph * pooled_width + pw]; float x_ratio = offset_dw_ratio_data[ph * pooled_width + pw]; if (topleft == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw] * (1. - y_ratio)*(1. - x_ratio)); } if (topright == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw]*(1. -y_ratio)*(x_ratio)); } if (bottomleft == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw]* (y_ratio) * (1. - x_ratio)); } if (bottomright == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw]*(y_ratio) * (x_ratio)); } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIAlignmentLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // const int* argmax_data = max_idx_.gpu_data(); ///////////////////////////////////////////////////////////////// const int* argmax_data_topleft = max_idx_topleft.gpu_data(); const int* argmax_data_topright = max_idx_topright.gpu_data(); const int* argmax_data_bottomleft = max_idx_bottomleft.gpu_data(); const int* argmax_data_bottomright = max_idx_bottomright.gpu_data(); const float* dh_ratio_data = dh_ratio.gpu_data(); const float* dw_ratio_data = dw_ratio.gpu_data(); //////////////////////////////////////////////////////////////// // NOLINT_NEXT_LINE(whitespace/operators) // hipLaunchKernelGGL(( ROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, // count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, // height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); hipLaunchKernelGGL(( ROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, argmax_data_topleft, argmax_data_topright, argmax_data_bottomleft, argmax_data_bottomright, dh_ratio_data, dw_ratio_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignmentLayer); } // namespace caffe
bd057c05f3a461ce527098667ad01336bb97d1d4.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cfloat> #include <stdio.h> #include <math.h> #include <float.h> //#include "caffe/fast_rcnn_layers.hpp" #include "caffe/roi_alignment_layer.hpp" using std::max; using std::min; namespace caffe { template <typename Dtype> //__global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, // const Dtype spatial_scale, const int channels, const int height, // const int width, const int pooled_height, const int pooled_width, // const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) __global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_data_topleft, int* argmax_data_topright, int* argmax_data_bottomleft, int* argmax_data_bottomright, float* dh_ratio_data, float* dw_ratio_data) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; float roi_start_w = (float)(bottom_rois[1] * spatial_scale); float roi_start_h = (float)(bottom_rois[2] * spatial_scale); float roi_end_w = (float)(bottom_rois[3] * spatial_scale); float roi_end_h = (float)(bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (roi_height) / ((float)(pooled_height)); float bin_size_w = (roi_width) / ((float)(pooled_width)); float hstart = ((float)(ph)) * bin_size_h; float wstart = ((float)(pw)) * bin_size_w; float hend = ((float)(ph + 1)) * bin_size_h; float wend = ((float)(pw + 1)) * bin_size_w; // Add roi offsets and clip to input boundaries hstart = fminf(fmaxf(hstart + roi_start_h, 0), height-1); hend = fminf(fmaxf(hend + roi_start_h, 0), height-1); wstart = fminf(fmaxf(wstart + roi_start_w, 0), width-1); wend = fminf(fmaxf(wend + roi_start_w, 0), width-1); bool is_empty = (hend < hstart) || (wend < wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; bottom_data += (roi_batch_ind * channels + c) * height * width; if (is_empty) { top_data[index] = maxval; // argmax_data[index] = maxidx; argmax_data_topleft[index] = maxidx; argmax_data_topright[index] = maxidx; argmax_data_bottomleft[index] = maxidx; argmax_data_bottomright[index] = maxidx; dh_ratio_data[index] = 0.0; dw_ratio_data[index] = 0.0; } else { float centerx = (float)(wstart + wend)/2.0; float centery = (float)(hstart + hend)/2.0; int cy_top = static_cast<int>(floor(centery)); int cy_bottom = static_cast<int>(ceil(centery)); int cx_left = static_cast<int>(floor(centerx)); int cx_right = static_cast<int>(ceil(centerx)); cy_top = min(max(cy_top, 0), height-1); cy_bottom = min(max(cy_bottom, 0), height-1); cx_left = min(max(cx_left, 0), width-1); cx_right = min(max(cx_right, 0), width-1); int topleft = cy_top * width + cx_left; int topright = cy_top * width + cx_right; int bottomleft = cy_bottom * width + cx_left; int bottomright = cy_bottom * width + cx_right; // bilinear interpolation float y_ratio = centery - (float)(cy_top); float x_ratio = centerx - (float)(cx_left); maxval = bottom_data[topleft] * (1-y_ratio) * (1-x_ratio) + bottom_data[topright] * (1-y_ratio) * (x_ratio) + bottom_data[bottomleft] * (y_ratio) * (1 - x_ratio) + bottom_data[bottomright] * (y_ratio) * (x_ratio); top_data[index] = maxval; // argmax_data[index] = maxidx; argmax_data_topleft[index] = topleft; argmax_data_topright[index] = topright; argmax_data_bottomleft[index] = bottomleft; argmax_data_bottomright[index] = bottomright; dh_ratio_data[index] = y_ratio; dw_ratio_data[index] = x_ratio; } } } template <typename Dtype> void ROIAlignmentLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); // int* argmax_data = max_idx_.mutable_gpu_data(); ///////////////////////////////////////////////// int* argmax_data_topleft = max_idx_topleft.mutable_gpu_data(); int* argmax_data_topright = max_idx_topright.mutable_gpu_data(); int* argmax_data_bottomleft = max_idx_bottomleft.mutable_gpu_data(); int* argmax_data_bottomright = max_idx_bottomright.mutable_gpu_data(); float* dh_ratio_data = dh_ratio.mutable_gpu_data(); float* dw_ratio_data = dw_ratio.mutable_gpu_data(); ///////////////////////////////////////////////// int count = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) // ROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( // count, bottom_data, spatial_scale_, channels_, height_, width_, // pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data); ROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data_topleft, argmax_data_topright, argmax_data_bottomleft, argmax_data_bottomright, dh_ratio_data, dw_ratio_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> //__global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, // const int* argmax_data, const int num_rois, const Dtype spatial_scale, // const int channels, const int height, const int width, // const int pooled_height, const int pooled_width, Dtype* bottom_diff, // const Dtype* bottom_rois) __global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, const int* argmax_data_topleft, const int* argmax_data_topright, const int* argmax_data_bottomleft, const int* argmax_data_bottomright, const float* dh_ratio_data, const float* dw_ratio_data, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data (in conv5_3) int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0.; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind){ continue; } float roi_start_w = (float)(offset_bottom_rois[1] * spatial_scale); float roi_start_h = (float)(offset_bottom_rois[2] * spatial_scale); float roi_end_w = (float)(offset_bottom_rois[3] * spatial_scale); float roi_end_h = (float)(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) //const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); const bool in_roi = (w >= roi_start_w-1 && w <= roi_end_w+1 && h >= roi_start_h-1 && h <= roi_end_h+1); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; // const int* offset_argmax_data = argmax_data + offset; /////////////////////////////////////////////////////// const int* offset_argmax_data_topright = argmax_data_topright + offset; const int* offset_argmax_data_topleft = argmax_data_topleft + offset; const int* offset_argmax_data_bottomleft = argmax_data_bottomleft + offset; const int* offset_argmax_data_bottomright = argmax_data_bottomright + offset; const float* offset_dh_ratio_data = dh_ratio_data + offset; const float* offset_dw_ratio_data = dw_ratio_data + offset; /////////////////////////////////////////////////////// // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1, 1); float roi_height = fmaxf(roi_end_h - roi_start_h + 1, 1); float bin_size_h = (roi_height) / ((float)(pooled_height)); float bin_size_w = (roi_width) / ((float)(pooled_width)); int phstart = floor(((float)h - roi_start_h) / bin_size_h); int phend = ceil(((float)h - roi_start_h) / bin_size_h); int pwstart = floor(((float)w - roi_start_w) / bin_size_w); int pwend = ceil(((float)w - roi_start_w) / bin_size_w); phstart = min(max(phstart, 0), pooled_height); phend = min(max(phend, 0), pooled_height); pwstart = min(max(pwstart, 0), pooled_width); pwend = min(max(pwend, 0), pooled_width); phstart = 0; phend = pooled_height; pwstart = 0; pwend = pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int topright = offset_argmax_data_topright[ph * pooled_width + pw]; int topleft = offset_argmax_data_topleft[ph * pooled_width + pw]; int bottomleft = offset_argmax_data_bottomleft[ph * pooled_width + pw]; int bottomright = offset_argmax_data_bottomright[ph * pooled_width + pw]; float y_ratio = offset_dh_ratio_data[ph * pooled_width + pw]; float x_ratio = offset_dw_ratio_data[ph * pooled_width + pw]; if (topleft == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw] * (1. - y_ratio)*(1. - x_ratio)); } if (topright == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw]*(1. -y_ratio)*(x_ratio)); } if (bottomleft == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw]* (y_ratio) * (1. - x_ratio)); } if (bottomright == (h * width + w)) { gradient += (offset_top_diff[ph * pooled_width + pw]*(y_ratio) * (x_ratio)); } } } } bottom_diff[index] = gradient; } } template <typename Dtype> void ROIAlignmentLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // const int* argmax_data = max_idx_.gpu_data(); ///////////////////////////////////////////////////////////////// const int* argmax_data_topleft = max_idx_topleft.gpu_data(); const int* argmax_data_topright = max_idx_topright.gpu_data(); const int* argmax_data_bottomleft = max_idx_bottomleft.gpu_data(); const int* argmax_data_bottomright = max_idx_bottomright.gpu_data(); const float* dh_ratio_data = dh_ratio.gpu_data(); const float* dw_ratio_data = dw_ratio.gpu_data(); //////////////////////////////////////////////////////////////// // NOLINT_NEXT_LINE(whitespace/operators) // ROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( // count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_, // height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); ROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, argmax_data_topleft, argmax_data_topright, argmax_data_bottomleft, argmax_data_bottomright, dh_ratio_data, dw_ratio_data, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignmentLayer); } // namespace caffe
24d5977c07af709cba03597c8d3b32752d5592e6.hip
// !!! This is a file automatically generated by hipify!!! /* * gapped_extender_test.cpp * * Created on: 2012/11/20 * Author: shu */ #include <gtest/gtest.h> #include <string> #include <stdint.h> #include <fstream> #include <limits.h> #include "../src/score_matrix.h" #include "../src/alphabet_coder.h" #include "../src/sequence_type.h" #include "../src/protein_type.h" #include "../src/dna_type.h" #include "../src/score_matrix.h" #include "../src/edit_blocks.h" #include "../src/ungapped_extender_gpu.h" #include "../src/cuda_common.h" #include "../src/aligner_gpu_data.h" #include <thrust/host_vector.h> #include <thrust/system/hip/experimental/pinned_allocator.h> #include <thrust/device_vector.h> #include <thrust/copy.h> using namespace std; class UngappedExtenderGpuTest: public ::testing::Test { protected: virtual void SetUp() { } virtual void TearDown() { } }; TEST_F(UngappedExtenderGpuTest, ExtendShortSequencesForwardWithMinXDrop) { DnaType type; AlphabetCoder coder(type); AlphabetCoder::Code delimiter_code = coder.GetMaxCode() + 1; ScoreMatrix score_matrix("test_matrix", 4, 2, -1); string seq0("AGCAC"); vector<AlphabetCoder::Code> encoded_seq0(seq0.size() + 2); encoded_seq0[0] = delimiter_code; coder.Encode(&seq0[0], seq0.size(), &encoded_seq0[1]); encoded_seq0[encoded_seq0.size() - 1] = delimiter_code; string seq1("AGAAG"); vector<AlphabetCoder::Code> encoded_seq1(seq1.size() + 2); encoded_seq1[0] = delimiter_code; coder.Encode(&seq1[0], seq1.size(), &encoded_seq1[1]); encoded_seq1[encoded_seq1.size() - 1] = delimiter_code; vector<int> sequence0_cutoff(1, 1); vector<int> sequence0_trigger(1, 3); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&encoded_seq0[0], encoded_seq0.size()); gpu_data.SetGpuDatabaseSequence(&encoded_seq1[0], encoded_seq1.size()); gpu_data.SetGpuScoreMatrix(score_matrix.GetMatrix(), score_matrix.GetNumberLetters()); gpu_data.SetGpuUngappedExtensionCutoffs(&sequence0_cutoff[0], sequence0_cutoff.size()); gpu_data.SetGpuGappedExtensionTriggers(&sequence0_trigger[0], sequence0_trigger.size()); UngappedExtenderGpu e; e.SetQueries(delimiter_code, gpu_data.GetGpuQueriesSequence(), gpu_data.GetGpuUngappedExtensionCutoffs(), gpu_data.GetGpuGappedExtensionTriggers()); e.SetDatabase(gpu_data.GetGpuDatabaseSequence()); e.SetScoreMatrix(gpu_data.GetGpuScoreMatrix(), score_matrix.GetNumberLetters()); size_t number_extensions = 1; thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > ids(1, 0); thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence0_positions( number_extensions); thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence1_positions( number_extensions); thrust::host_vector<int, thrust::hip::experimental::pinned_allocator<int> > scores( number_extensions, 0); sequence0_positions[0] = 1; sequence1_positions[0] = 1; scores[0] = 0; hipStream_t stream; thrust::device_vector<uint32_t> d_ids(ids.size()); thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<bool> d_scores(sequence1_positions.size()); hipStreamCreate(&stream); e.ExtendWithTriggerAsync(number_extensions, &ids[0], &sequence0_positions[0], &sequence1_positions[0], &scores[0], thrust::raw_pointer_cast(d_ids.data()), thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_scores.data()), stream); hipStreamSynchronize(stream); hipStreamDestroy(stream); EXPECT_EQ(4, scores[0]); } TEST_F(UngappedExtenderGpuTest, ExtendShortSequencesForward) { DnaType type; AlphabetCoder coder(type); AlphabetCoder::Code delimiter_code = coder.GetMaxCode() + 1; ScoreMatrix score_matrix("test_matrix", 4, 2, -1); string seq0("AGCAC"); vector<AlphabetCoder::Code> encoded_seq0(seq0.size() + 2); encoded_seq0[0] = delimiter_code; coder.Encode(&seq0[0], seq0.size(), &encoded_seq0[1]); encoded_seq0[encoded_seq0.size() - 1] = delimiter_code; string seq1("AGCAG"); vector<AlphabetCoder::Code> encoded_seq1(seq1.size() + 2); encoded_seq1[0] = delimiter_code; coder.Encode(&seq1[0], seq1.size(), &encoded_seq1[1]); encoded_seq1[encoded_seq1.size() - 1] = delimiter_code; vector<int> sequence0_cutoff(1, 1024); vector<int> sequence0_trigger(1, 7); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&encoded_seq0[0], encoded_seq0.size()); gpu_data.SetGpuDatabaseSequence(&encoded_seq1[0], encoded_seq1.size()); gpu_data.SetGpuScoreMatrix(score_matrix.GetMatrix(), score_matrix.GetNumberLetters()); gpu_data.SetGpuUngappedExtensionCutoffs(&sequence0_cutoff[0], sequence0_cutoff.size()); gpu_data.SetGpuGappedExtensionTriggers(&sequence0_trigger[0], sequence0_trigger.size()); UngappedExtenderGpu e; e.SetQueries(delimiter_code, gpu_data.GetGpuQueriesSequence(), gpu_data.GetGpuUngappedExtensionCutoffs(), gpu_data.GetGpuGappedExtensionTriggers()); e.SetDatabase(gpu_data.GetGpuDatabaseSequence()); e.SetScoreMatrix(gpu_data.GetGpuScoreMatrix(), score_matrix.GetNumberLetters()); size_t number_extensions = 1; thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > ids(1, 0); thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence0_positions( number_extensions); thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence1_positions( number_extensions); thrust::host_vector<int, thrust::hip::experimental::pinned_allocator<int> > scores( number_extensions, 0); sequence0_positions[0] = 1; sequence1_positions[0] = 1; scores[0] = 0; hipStream_t stream; thrust::device_vector<uint32_t> d_ids(ids.size()); thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<int> d_scores(sequence1_positions.size()); hipStreamCreate(&stream); e.ExtendWithTriggerAsync(number_extensions, &ids[0], &sequence0_positions[0], &sequence1_positions[0], &scores[0], thrust::raw_pointer_cast(d_ids.data()), thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_scores.data()), stream); hipStreamSynchronize(stream); hipStreamDestroy(stream); EXPECT_EQ(8, scores[0]); } TEST_F(UngappedExtenderGpuTest, ExtendShortSequencesReverse) { DnaType type; AlphabetCoder coder(type); AlphabetCoder::Code delimiter_code = coder.GetMaxCode() + 1; ScoreMatrix score_matrix("test_matrix", 4, 2, -1); string seq0("CAGCA"); vector<AlphabetCoder::Code> encoded_seq0(seq0.size() + 2); encoded_seq0[0] = delimiter_code; coder.Encode(&seq0[0], seq0.size(), &encoded_seq0[1]); encoded_seq0[encoded_seq0.size() - 1] = delimiter_code; string seq1("GAGCA"); vector<AlphabetCoder::Code> encoded_seq1(seq1.size() + 2); encoded_seq1[0] = delimiter_code; coder.Encode(&seq1[0], seq1.size(), &encoded_seq1[1]); encoded_seq1[encoded_seq1.size() - 1] = delimiter_code; vector<int> sequence0_cutoff(1, 1024); vector<int> sequence0_trigger(1, 7); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&encoded_seq0[0], encoded_seq0.size()); gpu_data.SetGpuDatabaseSequence(&encoded_seq1[0], encoded_seq1.size()); gpu_data.SetGpuScoreMatrix(score_matrix.GetMatrix(), score_matrix.GetNumberLetters()); gpu_data.SetGpuUngappedExtensionCutoffs(&sequence0_cutoff[0], sequence0_cutoff.size()); gpu_data.SetGpuGappedExtensionTriggers(&sequence0_trigger[0], sequence0_trigger.size()); UngappedExtenderGpu e; e.SetQueries(delimiter_code, gpu_data.GetGpuQueriesSequence(), gpu_data.GetGpuUngappedExtensionCutoffs(), gpu_data.GetGpuGappedExtensionTriggers()); e.SetDatabase(gpu_data.GetGpuDatabaseSequence()); e.SetScoreMatrix(gpu_data.GetGpuScoreMatrix(), score_matrix.GetNumberLetters()); size_t number_extensions = 1; thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > ids(1, 0); thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence0_positions( number_extensions); thrust::host_vector<uint32_t, thrust::hip::experimental::pinned_allocator<uint32_t> > sequence1_positions( number_extensions); thrust::host_vector<int, thrust::hip::experimental::pinned_allocator<int> > scores( number_extensions, 0); sequence0_positions[0] = encoded_seq0.size() - 2; sequence1_positions[0] = encoded_seq1.size() - 2; scores[0] = 0; hipStream_t stream; thrust::device_vector<uint32_t> d_ids(ids.size()); thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<int> d_scores(sequence1_positions.size()); hipStreamCreate(&stream); e.ExtendWithTriggerAsync(number_extensions, &ids[0], &sequence0_positions[0], &sequence1_positions[0], &scores[0], thrust::raw_pointer_cast(d_ids.data()), thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_scores.data()), stream); hipStreamSynchronize(stream); hipStreamDestroy(stream); EXPECT_EQ(8, scores[0]); }
24d5977c07af709cba03597c8d3b32752d5592e6.cu
/* * gapped_extender_test.cpp * * Created on: 2012/11/20 * Author: shu */ #include <gtest/gtest.h> #include <string> #include <stdint.h> #include <fstream> #include <limits.h> #include "../src/score_matrix.h" #include "../src/alphabet_coder.h" #include "../src/sequence_type.h" #include "../src/protein_type.h" #include "../src/dna_type.h" #include "../src/score_matrix.h" #include "../src/edit_blocks.h" #include "../src/ungapped_extender_gpu.h" #include "../src/cuda_common.h" #include "../src/aligner_gpu_data.h" #include <thrust/host_vector.h> #include <thrust/system/cuda/experimental/pinned_allocator.h> #include <thrust/device_vector.h> #include <thrust/copy.h> using namespace std; class UngappedExtenderGpuTest: public ::testing::Test { protected: virtual void SetUp() { } virtual void TearDown() { } }; TEST_F(UngappedExtenderGpuTest, ExtendShortSequencesForwardWithMinXDrop) { DnaType type; AlphabetCoder coder(type); AlphabetCoder::Code delimiter_code = coder.GetMaxCode() + 1; ScoreMatrix score_matrix("test_matrix", 4, 2, -1); string seq0("AGCAC"); vector<AlphabetCoder::Code> encoded_seq0(seq0.size() + 2); encoded_seq0[0] = delimiter_code; coder.Encode(&seq0[0], seq0.size(), &encoded_seq0[1]); encoded_seq0[encoded_seq0.size() - 1] = delimiter_code; string seq1("AGAAG"); vector<AlphabetCoder::Code> encoded_seq1(seq1.size() + 2); encoded_seq1[0] = delimiter_code; coder.Encode(&seq1[0], seq1.size(), &encoded_seq1[1]); encoded_seq1[encoded_seq1.size() - 1] = delimiter_code; vector<int> sequence0_cutoff(1, 1); vector<int> sequence0_trigger(1, 3); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&encoded_seq0[0], encoded_seq0.size()); gpu_data.SetGpuDatabaseSequence(&encoded_seq1[0], encoded_seq1.size()); gpu_data.SetGpuScoreMatrix(score_matrix.GetMatrix(), score_matrix.GetNumberLetters()); gpu_data.SetGpuUngappedExtensionCutoffs(&sequence0_cutoff[0], sequence0_cutoff.size()); gpu_data.SetGpuGappedExtensionTriggers(&sequence0_trigger[0], sequence0_trigger.size()); UngappedExtenderGpu e; e.SetQueries(delimiter_code, gpu_data.GetGpuQueriesSequence(), gpu_data.GetGpuUngappedExtensionCutoffs(), gpu_data.GetGpuGappedExtensionTriggers()); e.SetDatabase(gpu_data.GetGpuDatabaseSequence()); e.SetScoreMatrix(gpu_data.GetGpuScoreMatrix(), score_matrix.GetNumberLetters()); size_t number_extensions = 1; thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > ids(1, 0); thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence0_positions( number_extensions); thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence1_positions( number_extensions); thrust::host_vector<int, thrust::cuda::experimental::pinned_allocator<int> > scores( number_extensions, 0); sequence0_positions[0] = 1; sequence1_positions[0] = 1; scores[0] = 0; cudaStream_t stream; thrust::device_vector<uint32_t> d_ids(ids.size()); thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<bool> d_scores(sequence1_positions.size()); cudaStreamCreate(&stream); e.ExtendWithTriggerAsync(number_extensions, &ids[0], &sequence0_positions[0], &sequence1_positions[0], &scores[0], thrust::raw_pointer_cast(d_ids.data()), thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_scores.data()), stream); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); EXPECT_EQ(4, scores[0]); } TEST_F(UngappedExtenderGpuTest, ExtendShortSequencesForward) { DnaType type; AlphabetCoder coder(type); AlphabetCoder::Code delimiter_code = coder.GetMaxCode() + 1; ScoreMatrix score_matrix("test_matrix", 4, 2, -1); string seq0("AGCAC"); vector<AlphabetCoder::Code> encoded_seq0(seq0.size() + 2); encoded_seq0[0] = delimiter_code; coder.Encode(&seq0[0], seq0.size(), &encoded_seq0[1]); encoded_seq0[encoded_seq0.size() - 1] = delimiter_code; string seq1("AGCAG"); vector<AlphabetCoder::Code> encoded_seq1(seq1.size() + 2); encoded_seq1[0] = delimiter_code; coder.Encode(&seq1[0], seq1.size(), &encoded_seq1[1]); encoded_seq1[encoded_seq1.size() - 1] = delimiter_code; vector<int> sequence0_cutoff(1, 1024); vector<int> sequence0_trigger(1, 7); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&encoded_seq0[0], encoded_seq0.size()); gpu_data.SetGpuDatabaseSequence(&encoded_seq1[0], encoded_seq1.size()); gpu_data.SetGpuScoreMatrix(score_matrix.GetMatrix(), score_matrix.GetNumberLetters()); gpu_data.SetGpuUngappedExtensionCutoffs(&sequence0_cutoff[0], sequence0_cutoff.size()); gpu_data.SetGpuGappedExtensionTriggers(&sequence0_trigger[0], sequence0_trigger.size()); UngappedExtenderGpu e; e.SetQueries(delimiter_code, gpu_data.GetGpuQueriesSequence(), gpu_data.GetGpuUngappedExtensionCutoffs(), gpu_data.GetGpuGappedExtensionTriggers()); e.SetDatabase(gpu_data.GetGpuDatabaseSequence()); e.SetScoreMatrix(gpu_data.GetGpuScoreMatrix(), score_matrix.GetNumberLetters()); size_t number_extensions = 1; thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > ids(1, 0); thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence0_positions( number_extensions); thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence1_positions( number_extensions); thrust::host_vector<int, thrust::cuda::experimental::pinned_allocator<int> > scores( number_extensions, 0); sequence0_positions[0] = 1; sequence1_positions[0] = 1; scores[0] = 0; cudaStream_t stream; thrust::device_vector<uint32_t> d_ids(ids.size()); thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<int> d_scores(sequence1_positions.size()); cudaStreamCreate(&stream); e.ExtendWithTriggerAsync(number_extensions, &ids[0], &sequence0_positions[0], &sequence1_positions[0], &scores[0], thrust::raw_pointer_cast(d_ids.data()), thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_scores.data()), stream); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); EXPECT_EQ(8, scores[0]); } TEST_F(UngappedExtenderGpuTest, ExtendShortSequencesReverse) { DnaType type; AlphabetCoder coder(type); AlphabetCoder::Code delimiter_code = coder.GetMaxCode() + 1; ScoreMatrix score_matrix("test_matrix", 4, 2, -1); string seq0("CAGCA"); vector<AlphabetCoder::Code> encoded_seq0(seq0.size() + 2); encoded_seq0[0] = delimiter_code; coder.Encode(&seq0[0], seq0.size(), &encoded_seq0[1]); encoded_seq0[encoded_seq0.size() - 1] = delimiter_code; string seq1("GAGCA"); vector<AlphabetCoder::Code> encoded_seq1(seq1.size() + 2); encoded_seq1[0] = delimiter_code; coder.Encode(&seq1[0], seq1.size(), &encoded_seq1[1]); encoded_seq1[encoded_seq1.size() - 1] = delimiter_code; vector<int> sequence0_cutoff(1, 1024); vector<int> sequence0_trigger(1, 7); AlignerGpuData gpu_data; gpu_data.SetGpuQueriesSequence(&encoded_seq0[0], encoded_seq0.size()); gpu_data.SetGpuDatabaseSequence(&encoded_seq1[0], encoded_seq1.size()); gpu_data.SetGpuScoreMatrix(score_matrix.GetMatrix(), score_matrix.GetNumberLetters()); gpu_data.SetGpuUngappedExtensionCutoffs(&sequence0_cutoff[0], sequence0_cutoff.size()); gpu_data.SetGpuGappedExtensionTriggers(&sequence0_trigger[0], sequence0_trigger.size()); UngappedExtenderGpu e; e.SetQueries(delimiter_code, gpu_data.GetGpuQueriesSequence(), gpu_data.GetGpuUngappedExtensionCutoffs(), gpu_data.GetGpuGappedExtensionTriggers()); e.SetDatabase(gpu_data.GetGpuDatabaseSequence()); e.SetScoreMatrix(gpu_data.GetGpuScoreMatrix(), score_matrix.GetNumberLetters()); size_t number_extensions = 1; thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > ids(1, 0); thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence0_positions( number_extensions); thrust::host_vector<uint32_t, thrust::cuda::experimental::pinned_allocator<uint32_t> > sequence1_positions( number_extensions); thrust::host_vector<int, thrust::cuda::experimental::pinned_allocator<int> > scores( number_extensions, 0); sequence0_positions[0] = encoded_seq0.size() - 2; sequence1_positions[0] = encoded_seq1.size() - 2; scores[0] = 0; cudaStream_t stream; thrust::device_vector<uint32_t> d_ids(ids.size()); thrust::device_vector<uint32_t> d_sequence0_positions( sequence0_positions.size()); thrust::device_vector<uint32_t> d_sequence1_positions( sequence1_positions.size()); thrust::device_vector<int> d_scores(sequence1_positions.size()); cudaStreamCreate(&stream); e.ExtendWithTriggerAsync(number_extensions, &ids[0], &sequence0_positions[0], &sequence1_positions[0], &scores[0], thrust::raw_pointer_cast(d_ids.data()), thrust::raw_pointer_cast(d_sequence0_positions.data()), thrust::raw_pointer_cast(d_sequence1_positions.data()), thrust::raw_pointer_cast(d_scores.data()), stream); cudaStreamSynchronize(stream); cudaStreamDestroy(stream); EXPECT_EQ(8, scores[0]); }
780db9fb3fc93a0be6fa234c02751de8ea961453.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Transpose.cu // MNN // // Created by MNN on b'2021/12/09'. // Copyright 2018, Alibaba Group Holding Limited // #include "Transpose_hip.cuh" #include "core/Macro.h" #include "MNNCUDADefine.hpp" #include "MNNCUDAFunction.cuh" namespace MNN { namespace CUDA { template<typename T0, typename T1> __global__ void UNPACKCOMMON_4(const T0 *input, T1 *output, const int total, int inside, int axis, int outside, int insideStride, int axisStride, DivModFast is, DivModFast cs ) { int axisAlign = UP_DIV(axis, PACK_NUMBER/ 4) * PACK_NUMBER / 4;; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmp, x, y, z; cs.divmod(i, tmp, y); is.divmod(tmp, z, x); int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N int dstOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } } } template<typename T0, typename T1> __global__ void UNPACKCOMMON(const T0 *input, T1 *output, int inside, int axis, int outside, int insideStride, int axisStride ) { int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;; int total = axisAlign * inside * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmpI = i / axisAlign; int y = i % axisAlign; int x = tmpI % inside; int z = tmpI / inside; int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N int dstOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } } } template<typename T0, typename T1> __global__ void PACKCOMMON_4(const T0 *input, T1 *output, int inside, int axis, int outside, int insideStride, int axisStride, DivModFast is, DivModFast cs ) { int axisAlign = UP_DIV(axis, PACK_NUMBER/ 4) * PACK_NUMBER / 4;; int total = axisAlign * inside * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmp, x, y, z; cs.divmod(i, tmp, y); is.divmod(tmp, z, x); int dstOffset = (z * inside + x) * axisAlign + y; int srcOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } else { output[dstOffset] = {0, 0, 0, 0}; } } } template<typename T0, typename T1> __global__ void PACKCOMMON(const T0 *input, T1 *output, int inside, int axis, int outside, int insideStride, int axisStride ) { int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;; int total = axisAlign * inside * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmpI = i / axisAlign; int y = i % axisAlign; int x = tmpI % inside; int z = tmpI / inside; int dstOffset = (z * inside + x) * axisAlign + y; int srcOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } else { output[dstOffset] = 0.0; } } } void PackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; if (info->axis % 4 == 0 && info->axisStride == 1 && \ bytes == 4 && info->insideStride == info->axis) { int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4; DivModFast is(info->inside); DivModFast cs(axis_pack); hipLaunchKernelGGL(( PACKCOMMON_4), dim3(cores), dim3(threadNumbers), 0, 0, (const int4*)input, (int4*)output, info->inside, info->axis / 4, info->outside, info->insideStride / 4, info->axisStride, is, cs); return; } switch (bytes) { case 4: hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 2: hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 1: hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const int8_t*)input, (int8_t*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; default: break; } } void UnpackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; if (info->axis % 4 == 0 && info->axisStride == 1 && bytes == 4 && info->insideStride == info->axis) { int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4; DivModFast is(info->inside); DivModFast cs(axis_pack); const int maxCount = info->inside * axis_pack * info->outside; int block_num = runtime->blocks_num(maxCount); int block_size = runtime->threads_num(); hipLaunchKernelGGL(( UNPACKCOMMON_4), dim3(block_num), dim3(block_size), 0, 0, (const int4*)input, (int4*)output, maxCount, info->inside, info->axis / 4, info->outside, info->insideStride / 4, info->axisStride, is, cs); return; } switch (bytes) { case 4: hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 2: hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 1: hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const int8_t*)input, (int8_t*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; default: break; } } void PackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } void PackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; hipLaunchKernelGGL(( PACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } void UnpackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const half*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } void UnpackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; hipLaunchKernelGGL(( UNPACKCOMMON), dim3(cores), dim3(threadNumbers), 0, 0, (const float*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } template<typename T> __global__ void TRANSPOSE(const T *input, T *output, const TransposeParam* param) { size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < param->total) { int x = i % param->dims[0]; int tmp = i / param->dims[0]; int y = tmp % param->dims[1]; int z = tmp / param->dims[1]; int srcOffset = param->srcStride * z + y + x * param->dims[2]; int dstOffset = param->dstStride * z + x + y * param->dims[3]; output[dstOffset] = input[srcOffset]; } } #define LOCAL_DIM 8 template <typename T> __global__ void TRANSPOSE_LOCAL(const T* input, T *output, const TransposeParam* param) { __shared__ T localM[LOCAL_DIM][LOCAL_DIM + 1]; int num = blockIdx.z; for (int n = num; n < param->size; n += gridDim.z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < param->dims[0] && y < param->dims[1]) { int offset = n * param->srcStride + x * param->dims[2] + y; localM[threadIdx.y][threadIdx.x] = input[offset]; } __syncthreads(); x = blockIdx.y * blockDim.y + threadIdx.x; y = blockIdx.x * blockDim.x + threadIdx.y; if (x < param->dims[1] && y < param->dims[0]) { int offset = n * param->dstStride + x * param->dims[3] + y; output[offset] = localM[threadIdx.x][threadIdx.y]; } } } void Transpose(uint8_t* output, const uint8_t* input, const TransposeParam* cpuParam, const TransposeParam* gpuRegion, int bytes, CUDARuntime* runtime) { int count = cpuParam->total; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); auto out = output + bytes * cpuParam->dstOffset; auto inp = input + bytes * cpuParam->srcOffset; if (runtime->prop().maxThreadsPerBlock >= LOCAL_DIM * LOCAL_DIM && (cpuParam->dims[0] >= LOCAL_DIM || cpuParam->dims[1] >= LOCAL_DIM)) { dim3 localSize(LOCAL_DIM, LOCAL_DIM, 1); //printf("%d, %d - %d, %d - %d\n", cpuParam->size, cpuParam->dims[0], cpuParam->dims[1], cpuParam->dims[2], cpuParam->dims[3]); int globalZ = ALIMIN(runtime->prop().multiProcessorCount, cpuParam->size); dim3 globalSize(UP_DIV(cpuParam->dims[0], LOCAL_DIM), UP_DIV(cpuParam->dims[1], LOCAL_DIM), globalZ); switch (bytes) { case 4: hipLaunchKernelGGL(( TRANSPOSE_LOCAL), dim3(globalSize), dim3(localSize), 0, 0, (const float *)inp, (float *)out, gpuRegion); break; case 2: hipLaunchKernelGGL(( TRANSPOSE_LOCAL), dim3(globalSize), dim3(localSize), 0, 0, (const half *)inp, (half *)out, gpuRegion); break; case 1: hipLaunchKernelGGL(( TRANSPOSE_LOCAL), dim3(globalSize), dim3(localSize), 0, 0, (const int8_t *)inp, (int8_t *)out, gpuRegion); break; default: break; } return; } switch (bytes) { case 4: hipLaunchKernelGGL(( TRANSPOSE), dim3(block_num), dim3(threads_num), 0, 0, (int*)inp, (int*)out, gpuRegion); break; case 2: hipLaunchKernelGGL(( TRANSPOSE), dim3(block_num), dim3(threads_num), 0, 0, (int16_t*)inp, (int16_t*)out, gpuRegion); break; case 1: hipLaunchKernelGGL(( TRANSPOSE), dim3(block_num), dim3(threads_num), 0, 0, (int8_t*)inp, (int8_t*)out, gpuRegion); break; default: break; } } template<typename T0, typename T1> __global__ void NCHW_2_NHWC8(const T0* input, T1* output, const int maxCount, const int channel, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int chnlp_idx = index % channel_pack; int temp = index / channel_pack; int area_idx = temp % area; int batch_idx = temp / area; if(chnlp_idx >= channel) { output[index] = (T1)0.0f; continue; } output[index] = (T1)input[(batch_idx * channel + chnlp_idx) * area + area_idx]; } } template<typename T0, typename T1> __global__ void NHWC8_2_NCHW(const T0* input, T1* output, const int maxCount, const int channel, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int area_idx = index % area; int temp = index / area; int channel_idx = temp % channel; int batch_idx = temp / channel; output[index] = (T1)input[(batch_idx * area + area_idx) * channel_pack + channel_idx]; } } template<typename T0, typename T1> __global__ void NCHW_2_NCHW(const T0* input, T1* output, const int maxCount ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { output[index] = (T1)input[index]; } } template<typename T0, typename T1> __global__ void C4NHW4_2_NHWC8(const T0* input, T1* output, const int maxCount, const int batch, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int c_idx = index % channel_pack; int temp = index / channel_pack; int hw_idx = temp % area; int batch_idx = temp / area; int c4_idx = c_idx >> 2; int cL_idx = c_idx & 3; output[index] = (T1)input[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx]; } } template<typename T0, typename T1> __global__ void NHWC8_2_C4NHW4(const T0* input, T1* output, const int maxCount, const int batch, const int channel, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int c_idx = index % channel_pack; int temp = index / channel_pack; int hw_idx = temp % area; int batch_idx = temp / area; int channel_8 = ((channel + 7) / 8) * 8; int c4_idx = c_idx >> 2; int cL_idx = c_idx & 3; output[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx] = (T1)input[(batch_idx * area + hw_idx) * channel_8 + c_idx]; } } template<class T0, class T1> static void insideFormatConvert(T0* input, T1* output, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \ const int area, const int batch, const int channel) { if(srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); hipLaunchKernelGGL(( NCHW_2_NHWC8<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8); checkKernelErrors; return; } if((srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NCHW) || \ (srcDataFormat == MNN_DATA_FORMAT_NHWC && dstDataFormat == MNN_DATA_FORMAT_NHWC)) { const int maxCount = batch * area * channel; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); hipLaunchKernelGGL(( NCHW_2_NCHW<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount); checkKernelErrors; return; } if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NCHW) { const int maxCount = batch * area * channel; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); hipLaunchKernelGGL(( NHWC8_2_NCHW<T0, T1>), dim3(block_num), dim3(block_size), 0, 0, input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8); checkKernelErrors; return; } MNN_PRINT("insideFormatConvert form %d to %d, not support\n", (int)srcDataFormat, (int)dstDataFormat); } void FormatConvert(void* output, void* input, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \ const int area, const int batch, const int channel, const Tensor* srcTensor, bool isFp16, bool srcDevice, bool dstDevice) { //MNN_PRINT("FormatConvert size batch:%d - plane:%d - channel:%d, %d-%d, %d-%d\n", batch, area, channel, srcDataFormat, dstDataFormat, srcDevice, dstDevice); if(batch == 0 || area == 0 || channel == 0) { return; } if(srcTensor->getType().bits == 8) { if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) { if(!srcDevice && dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); hipLaunchKernelGGL(( C4NHW4_2_NHWC8), dim3(block_num), dim3(block_size), 0, 0, (int8_t *)input, (int8_t *)output, maxCount, batch, area, UP_DIV(channel, 8) * 8); checkKernelErrors; return; } if(srcDevice && !dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 4) * 4; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); hipLaunchKernelGGL(( NHWC8_2_C4NHW4), dim3(block_num), dim3(block_size), 0, 0, (int8_t *)input, (int8_t *)output, maxCount, batch, channel, area, UP_DIV(channel, 4) * 4); checkKernelErrors; return; } } insideFormatConvert<int8_t, int8_t>((int8_t *)input, (int8_t *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); return; } isFp16 = isFp16 & (halide_type_float == srcTensor->getType().code); if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) { if(!srcDevice && dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); if(isFp16) { hipLaunchKernelGGL(( C4NHW4_2_NHWC8), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (half *)output, maxCount, batch, area, UP_DIV(channel, 8) * 8); checkKernelErrors; } else { hipLaunchKernelGGL(( C4NHW4_2_NHWC8), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (float *)output, maxCount, batch, area, UP_DIV(channel, 8) * 8); checkKernelErrors; } return; } if(srcDevice && !dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 4) * 4; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); if(isFp16) { hipLaunchKernelGGL(( NHWC8_2_C4NHW4), dim3(block_num), dim3(block_size), 0, 0, (half *)input, (float *)output, maxCount, batch, channel, area, UP_DIV(channel, 4) * 4); checkKernelErrors; } else { hipLaunchKernelGGL(( NHWC8_2_C4NHW4), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (float *)output, maxCount, batch, channel, area, UP_DIV(channel, 4) * 4); checkKernelErrors; } return; } if(srcDevice && dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); if(isFp16) { hipLaunchKernelGGL(( NCHW_2_NCHW<half, half>), dim3(block_num), dim3(block_size), 0, 0, (half *)input, (half *)output, maxCount); checkKernelErrors; } else { hipLaunchKernelGGL(( NCHW_2_NCHW<float, float>), dim3(block_num), dim3(block_size), 0, 0, (float *)input, (float *)output, maxCount); checkKernelErrors; } return; } } if(!srcDevice) { if(isFp16) { insideFormatConvert<float, half>((float *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } else { insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } } else if(!dstDevice) { if(isFp16) { insideFormatConvert<half, float>((half *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } else { insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } } else { if(isFp16) { insideFormatConvert<half, half>((half *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } else { insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } } } }; };
780db9fb3fc93a0be6fa234c02751de8ea961453.cu
// // Transpose.cu // MNN // // Created by MNN on b'2021/12/09'. // Copyright © 2018, Alibaba Group Holding Limited // #include "Transpose.cuh" #include "core/Macro.h" #include "MNNCUDADefine.hpp" #include "MNNCUDAFunction.cuh" namespace MNN { namespace CUDA { template<typename T0, typename T1> __global__ void UNPACKCOMMON_4(const T0 *input, T1 *output, const int total, int inside, int axis, int outside, int insideStride, int axisStride, DivModFast is, DivModFast cs ) { int axisAlign = UP_DIV(axis, PACK_NUMBER/ 4) * PACK_NUMBER / 4;; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmp, x, y, z; cs.divmod(i, tmp, y); is.divmod(tmp, z, x); int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N int dstOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } } } template<typename T0, typename T1> __global__ void UNPACKCOMMON(const T0 *input, T1 *output, int inside, int axis, int outside, int insideStride, int axisStride ) { int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;; int total = axisAlign * inside * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmpI = i / axisAlign; int y = i % axisAlign; int x = tmpI % inside; int z = tmpI / inside; int srcOffset = (z * inside + x) * axisAlign + y;// NHWC8 , inside <-> HW, ouside <-> N int dstOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } } } template<typename T0, typename T1> __global__ void PACKCOMMON_4(const T0 *input, T1 *output, int inside, int axis, int outside, int insideStride, int axisStride, DivModFast is, DivModFast cs ) { int axisAlign = UP_DIV(axis, PACK_NUMBER/ 4) * PACK_NUMBER / 4;; int total = axisAlign * inside * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmp, x, y, z; cs.divmod(i, tmp, y); is.divmod(tmp, z, x); int dstOffset = (z * inside + x) * axisAlign + y; int srcOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } else { output[dstOffset] = {0, 0, 0, 0}; } } } template<typename T0, typename T1> __global__ void PACKCOMMON(const T0 *input, T1 *output, int inside, int axis, int outside, int insideStride, int axisStride ) { int axisAlign = UP_DIV(axis, PACK_NUMBER) * PACK_NUMBER;; int total = axisAlign * inside * outside; for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < total; i += blockDim.x * gridDim.x) { int tmpI = i / axisAlign; int y = i % axisAlign; int x = tmpI % inside; int z = tmpI / inside; int dstOffset = (z * inside + x) * axisAlign + y; int srcOffset = x * insideStride + y * axisStride + z * inside * axis; if (y < axis) { output[dstOffset] = input[srcOffset]; } else { output[dstOffset] = 0.0; } } } void PackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; if (info->axis % 4 == 0 && info->axisStride == 1 && \ bytes == 4 && info->insideStride == info->axis) { int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4; DivModFast is(info->inside); DivModFast cs(axis_pack); PACKCOMMON_4<<<cores, threadNumbers>>>((const int4*)input, (int4*)output, info->inside, info->axis / 4, info->outside, info->insideStride / 4, info->axisStride, is, cs); return; } switch (bytes) { case 4: PACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 2: PACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 1: PACKCOMMON<<<cores, threadNumbers>>>((const int8_t*)input, (int8_t*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; default: break; } } void UnpackBuffer(void* output, const void* input, const PackInfo* info, int bytes, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; if (info->axis % 4 == 0 && info->axisStride == 1 && bytes == 4 && info->insideStride == info->axis) { int axis_pack = UP_DIV(info->axis, PACK_NUMBER) * PACK_NUMBER / 4; DivModFast is(info->inside); DivModFast cs(axis_pack); const int maxCount = info->inside * axis_pack * info->outside; int block_num = runtime->blocks_num(maxCount); int block_size = runtime->threads_num(); UNPACKCOMMON_4<<<block_num, block_size>>>((const int4*)input, (int4*)output, maxCount, info->inside, info->axis / 4, info->outside, info->insideStride / 4, info->axisStride, is, cs); return; } switch (bytes) { case 4: UNPACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 2: UNPACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; case 1: UNPACKCOMMON<<<cores, threadNumbers>>>((const int8_t*)input, (int8_t*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); break; default: break; } } void PackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; PACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } void PackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; PACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } void UnpackFP16ToFP32(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; UNPACKCOMMON<<<cores, threadNumbers>>>((const half*)input, (float*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } void UnpackFP32ToFP16(void* output, const void* input, const PackInfo* info, CUDARuntime* runtime) { auto& prop = runtime->prop(); int cores = prop.multiProcessorCount; int threadNumbers = prop.maxThreadsPerBlock; UNPACKCOMMON<<<cores, threadNumbers>>>((const float*)input, (half*)output, info->inside, info->axis, info->outside, info->insideStride, info->axisStride); } template<typename T> __global__ void TRANSPOSE(const T *input, T *output, const TransposeParam* param) { size_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i < param->total) { int x = i % param->dims[0]; int tmp = i / param->dims[0]; int y = tmp % param->dims[1]; int z = tmp / param->dims[1]; int srcOffset = param->srcStride * z + y + x * param->dims[2]; int dstOffset = param->dstStride * z + x + y * param->dims[3]; output[dstOffset] = input[srcOffset]; } } #define LOCAL_DIM 8 template <typename T> __global__ void TRANSPOSE_LOCAL(const T* input, T *output, const TransposeParam* param) { __shared__ T localM[LOCAL_DIM][LOCAL_DIM + 1]; int num = blockIdx.z; for (int n = num; n < param->size; n += gridDim.z) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < param->dims[0] && y < param->dims[1]) { int offset = n * param->srcStride + x * param->dims[2] + y; localM[threadIdx.y][threadIdx.x] = input[offset]; } __syncthreads(); x = blockIdx.y * blockDim.y + threadIdx.x; y = blockIdx.x * blockDim.x + threadIdx.y; if (x < param->dims[1] && y < param->dims[0]) { int offset = n * param->dstStride + x * param->dims[3] + y; output[offset] = localM[threadIdx.x][threadIdx.y]; } } } void Transpose(uint8_t* output, const uint8_t* input, const TransposeParam* cpuParam, const TransposeParam* gpuRegion, int bytes, CUDARuntime* runtime) { int count = cpuParam->total; int block_num = runtime->blocks_num(count); int threads_num = runtime->threads_num(); auto out = output + bytes * cpuParam->dstOffset; auto inp = input + bytes * cpuParam->srcOffset; if (runtime->prop().maxThreadsPerBlock >= LOCAL_DIM * LOCAL_DIM && (cpuParam->dims[0] >= LOCAL_DIM || cpuParam->dims[1] >= LOCAL_DIM)) { dim3 localSize(LOCAL_DIM, LOCAL_DIM, 1); //printf("%d, %d - %d, %d - %d\n", cpuParam->size, cpuParam->dims[0], cpuParam->dims[1], cpuParam->dims[2], cpuParam->dims[3]); int globalZ = ALIMIN(runtime->prop().multiProcessorCount, cpuParam->size); dim3 globalSize(UP_DIV(cpuParam->dims[0], LOCAL_DIM), UP_DIV(cpuParam->dims[1], LOCAL_DIM), globalZ); switch (bytes) { case 4: TRANSPOSE_LOCAL<<<globalSize, localSize>>>((const float *)inp, (float *)out, gpuRegion); break; case 2: TRANSPOSE_LOCAL<<<globalSize, localSize>>>((const half *)inp, (half *)out, gpuRegion); break; case 1: TRANSPOSE_LOCAL<<<globalSize, localSize>>>((const int8_t *)inp, (int8_t *)out, gpuRegion); break; default: break; } return; } switch (bytes) { case 4: TRANSPOSE<<<block_num, threads_num>>>((int*)inp, (int*)out, gpuRegion); break; case 2: TRANSPOSE<<<block_num, threads_num>>>((int16_t*)inp, (int16_t*)out, gpuRegion); break; case 1: TRANSPOSE<<<block_num, threads_num>>>((int8_t*)inp, (int8_t*)out, gpuRegion); break; default: break; } } template<typename T0, typename T1> __global__ void NCHW_2_NHWC8(const T0* input, T1* output, const int maxCount, const int channel, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int chnlp_idx = index % channel_pack; int temp = index / channel_pack; int area_idx = temp % area; int batch_idx = temp / area; if(chnlp_idx >= channel) { output[index] = (T1)0.0f; continue; } output[index] = (T1)input[(batch_idx * channel + chnlp_idx) * area + area_idx]; } } template<typename T0, typename T1> __global__ void NHWC8_2_NCHW(const T0* input, T1* output, const int maxCount, const int channel, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int area_idx = index % area; int temp = index / area; int channel_idx = temp % channel; int batch_idx = temp / channel; output[index] = (T1)input[(batch_idx * area + area_idx) * channel_pack + channel_idx]; } } template<typename T0, typename T1> __global__ void NCHW_2_NCHW(const T0* input, T1* output, const int maxCount ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { output[index] = (T1)input[index]; } } template<typename T0, typename T1> __global__ void C4NHW4_2_NHWC8(const T0* input, T1* output, const int maxCount, const int batch, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int c_idx = index % channel_pack; int temp = index / channel_pack; int hw_idx = temp % area; int batch_idx = temp / area; int c4_idx = c_idx >> 2; int cL_idx = c_idx & 3; output[index] = (T1)input[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx]; } } template<typename T0, typename T1> __global__ void NHWC8_2_C4NHW4(const T0* input, T1* output, const int maxCount, const int batch, const int channel, const int area, const int channel_pack ) { for(size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < maxCount; index += blockDim.x * gridDim.x) { int c_idx = index % channel_pack; int temp = index / channel_pack; int hw_idx = temp % area; int batch_idx = temp / area; int channel_8 = ((channel + 7) / 8) * 8; int c4_idx = c_idx >> 2; int cL_idx = c_idx & 3; output[((c4_idx * batch + batch_idx) * area + hw_idx) * 4 + cL_idx] = (T1)input[(batch_idx * area + hw_idx) * channel_8 + c_idx]; } } template<class T0, class T1> static void insideFormatConvert(T0* input, T1* output, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \ const int area, const int batch, const int channel) { if(srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); NCHW_2_NHWC8<T0, T1><<<block_num, block_size>>>(input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8); checkKernelErrors; return; } if((srcDataFormat == MNN_DATA_FORMAT_NCHW && dstDataFormat == MNN_DATA_FORMAT_NCHW) || \ (srcDataFormat == MNN_DATA_FORMAT_NHWC && dstDataFormat == MNN_DATA_FORMAT_NHWC)) { const int maxCount = batch * area * channel; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); NCHW_2_NCHW<T0, T1><<<block_num, block_size>>>(input, output, maxCount); checkKernelErrors; return; } if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NCHW) { const int maxCount = batch * area * channel; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); NHWC8_2_NCHW<T0, T1><<<block_num, block_size>>>(input, output, maxCount, channel, area, UP_DIV(channel, 8) * 8); checkKernelErrors; return; } MNN_PRINT("insideFormatConvert form %d to %d, not support\n", (int)srcDataFormat, (int)dstDataFormat); } void FormatConvert(void* output, void* input, MNN_DATA_FORMAT srcDataFormat, MNN_DATA_FORMAT dstDataFormat, CUDARuntime* runtime, \ const int area, const int batch, const int channel, const Tensor* srcTensor, bool isFp16, bool srcDevice, bool dstDevice) { //MNN_PRINT("FormatConvert size batch:%d - plane:%d - channel:%d, %d-%d, %d-%d\n", batch, area, channel, srcDataFormat, dstDataFormat, srcDevice, dstDevice); if(batch == 0 || area == 0 || channel == 0) { return; } if(srcTensor->getType().bits == 8) { if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) { if(!srcDevice && dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); C4NHW4_2_NHWC8<<<block_num, block_size>>>((int8_t *)input, (int8_t *)output, maxCount, batch, area, UP_DIV(channel, 8) * 8); checkKernelErrors; return; } if(srcDevice && !dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 4) * 4; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); NHWC8_2_C4NHW4<<<block_num, block_size>>>((int8_t *)input, (int8_t *)output, maxCount, batch, channel, area, UP_DIV(channel, 4) * 4); checkKernelErrors; return; } } insideFormatConvert<int8_t, int8_t>((int8_t *)input, (int8_t *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); return; } isFp16 = isFp16 & (halide_type_float == srcTensor->getType().code); if(srcDataFormat == MNN_DATA_FORMAT_NC4HW4 && dstDataFormat == MNN_DATA_FORMAT_NC4HW4) { if(!srcDevice && dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); if(isFp16) { C4NHW4_2_NHWC8<<<block_num, block_size>>>((float *)input, (half *)output, maxCount, batch, area, UP_DIV(channel, 8) * 8); checkKernelErrors; } else { C4NHW4_2_NHWC8<<<block_num, block_size>>>((float *)input, (float *)output, maxCount, batch, area, UP_DIV(channel, 8) * 8); checkKernelErrors; } return; } if(srcDevice && !dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 4) * 4; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); if(isFp16) { NHWC8_2_C4NHW4<<<block_num, block_size>>>((half *)input, (float *)output, maxCount, batch, channel, area, UP_DIV(channel, 4) * 4); checkKernelErrors; } else { NHWC8_2_C4NHW4<<<block_num, block_size>>>((float *)input, (float *)output, maxCount, batch, channel, area, UP_DIV(channel, 4) * 4); checkKernelErrors; } return; } if(srcDevice && dstDevice) { const int maxCount = batch * area * UP_DIV(channel, 8) * 8; const int block_num = runtime->blocks_num(maxCount); const int block_size = runtime->threads_num(); if(isFp16) { NCHW_2_NCHW<half, half><<<block_num, block_size>>>((half *)input, (half *)output, maxCount); checkKernelErrors; } else { NCHW_2_NCHW<float, float><<<block_num, block_size>>>((float *)input, (float *)output, maxCount); checkKernelErrors; } return; } } if(!srcDevice) { if(isFp16) { insideFormatConvert<float, half>((float *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } else { insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } } else if(!dstDevice) { if(isFp16) { insideFormatConvert<half, float>((half *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } else { insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } } else { if(isFp16) { insideFormatConvert<half, half>((half *)input, (half *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } else { insideFormatConvert<float, float>((float *)input, (float *)output, srcDataFormat, dstDataFormat, runtime, area, batch, channel); } } } }; };
2138a1664da6dc9b318ed4a25bee196ec6e1903f.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include "hip/hip_runtime.h" #include "utils.cuh" #include "../device/device_context.cuh" #include "tsvd.h" #include <ctime> #include <thrust/iterator/counting_iterator.h> #include<algorithm> #include <thrust/sequence.h> namespace tsvd { /** * Division utility to get explained variance ratio * * @param XVar * @param XVarSum * @param ExplainedVarRatio * @param context */ void divide(const Matrix<float> &XVar, const Matrix<float> &XVarSum, Matrix<float> &ExplainedVarRatio, DeviceContext &context){ auto d_x_var = XVar.data(); auto d_x_var_sum = XVarSum.data(); auto d_expl_var_ratio = ExplainedVarRatio.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+ExplainedVarRatio.size(), [=]__device__(int idx){ float div_val = 0.0; //XVarSum can possibly be zero if(d_x_var_sum[0] != 0.0){ div_val = d_x_var[idx] / d_x_var_sum[0]; } d_expl_var_ratio[idx] = div_val; } ); } /** * Square each value in a matrix * * @param UmultSigma * @param UmultSigmaSquare * @param context */ void square_val(const Matrix<float> &UmultSigma, Matrix<float> &UmultSigmaSquare, DeviceContext &context){ auto n = UmultSigma.columns(); auto m = UmultSigma.rows(); auto k = UmultSigmaSquare.rows(); auto d_u_mult_sigma = UmultSigma.data(); auto d_u_mult_sigma_square = UmultSigmaSquare.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+UmultSigmaSquare.size(), [=]__device__(int idx){ float square_val = ::pow(d_u_mult_sigma[idx],2); d_u_mult_sigma_square[idx] = square_val; } ); } /** * Alternative variance calculation (Can be slow for big matrices) * * @param UmultSigma * @param k * @param UmultSigmaVar * @param context */ void calc_var(const Matrix<float>UmultSigma, int k, Matrix<float> &UmultSigmaVar, DeviceContext &context){ //Set aside matrix of 1's for getting columnar sums(t(UmultSima) * UmultOnes) Matrix<float>UmultOnes(UmultSigma.rows(), 1); UmultOnes.fill(1.0f); //Allocate matrices for variance calculation Matrix<float>UmultSigmaSquare(UmultSigma.rows(), UmultSigma.columns()); Matrix<float>UmultSigmaSum(k, 1); Matrix<float>UmultSigmaSumSquare(k, 1); Matrix<float>UmultSigmaSumOfSquare(k, 1); Matrix<float>UmultSigmaVarNum(k, 1); //Calculate Variance square_val(UmultSigma, UmultSigmaSquare, context); multiply(UmultSigmaSquare, UmultOnes, UmultSigmaSumOfSquare, context, true, false, 1.0f); multiply(UmultSigma, UmultOnes, UmultSigmaSum, context, true, false, 1.0f); square_val(UmultSigmaSum, UmultSigmaSumSquare, context); //Get rows auto m = UmultSigma.rows(); multiply(UmultSigmaSumOfSquare, m, context); subtract(UmultSigmaSumOfSquare, UmultSigmaSumSquare, UmultSigmaVarNum, context); auto d_u_sigma_var_num = UmultSigmaVarNum.data(); auto d_u_sigma_var = UmultSigmaVar.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+UmultSigmaVar.size(), [=]__device__(int idx){ float div_val = d_u_sigma_var_num[idx]/(::pow(m,2)); d_u_sigma_var[idx] = div_val; } ); } template<typename T> class variance_iterator{ public: // Required iterator traits typedef variance_iterator<T> self_type; ///< My own type typedef size_t difference_type; ///< Type to express the result of subtracting one iterator from another typedef T value_type; ///< The type of the element the iterator can point to typedef T* pointer; ///< The type of a pointer to an element the iterator can point to typedef T reference; ///< The type of a reference to an element the iterator can point to typedef std::random_access_iterator_tag iterator_category; ///< The iterator category const T* data_ptr; const T* mean_ptr; const int col_rows; size_t offset; __device__ T operator[](size_t idx){ idx = idx + offset; T mean = mean_ptr[idx/col_rows]; T dev_square = pow((data_ptr[idx] - mean),2); return dev_square; } __device__ self_type operator+(size_t idx){ self_type retval(data_ptr, mean_ptr, col_rows); retval.offset += idx; return retval; } __host__ __device__ variance_iterator(const T* data_ptr, const T* mean_ptr, const int col_rows):data_ptr(data_ptr), mean_ptr(mean_ptr), col_rows(col_rows), offset(0){ } }; /** * Utility to calculate variance for each column of a matrix * * @param X * @param UColMean * @param UVar * @param context */ void calc_var_numerator(const Matrix<float> &X, const Matrix<float> &UColMean, Matrix<float> &UVar, DeviceContext &context){ auto m = X.rows(); variance_iterator<float> variance(X.data(), UColMean.data(), m); thrust::device_vector<int> segments(X.columns() + 1); thrust::sequence(segments.begin(), segments.end(), 0, static_cast<int>(X.rows())); // Determine temporary device storage requirements void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; int cols = static_cast<int>(X.columns()); hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(), cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1)); // Allocate temporary storage safe_cuda(hipMalloc(&d_temp_storage, temp_storage_bytes)); // Run sum-reduction hipcub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(), cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1)); safe_cuda(hipFree(d_temp_storage)); } /** * Utility to reverse q to show most import k to least important k * * @param Q * @param QReversed * @param context */ void col_reverse_q(const Matrix<float> &Q, Matrix<float> &QReversed, DeviceContext &context){ auto n = Q.columns(); auto m = Q.rows(); auto k = QReversed.rows(); auto d_q = Q.data(); auto d_q_reversed = QReversed.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+QReversed.size(), [=]__device__(int idx){ int dest_row = idx % m; int dest_col = idx/m; int src_row = dest_row; int src_col = (n - dest_col) - 1; d_q_reversed[idx] = d_q[src_col * m + src_row]; } ); } /** * Truncate Q transpose to top k * * @param Qt * @param QtTrunc * @param context */ void row_reverse_trunc_q(const Matrix<float> &Qt, Matrix<float> &QtTrunc, DeviceContext &context){ auto m = Qt.rows(); auto k = QtTrunc.rows(); auto d_q = Qt.data(); auto d_q_trunc = QtTrunc.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+QtTrunc.size(), [=]__device__(int idx){ int dest_row = idx % k; int dest_col = idx / k; int src_row = (m - dest_row) - 1; int src_col = dest_col; float q = d_q[src_col * m + src_row]; d_q_trunc[idx] = q; } ); } /** * Calculate the U matrix, which is defined as: * U = A*V/sigma where A is our X Matrix, V is Q, and sigma is 1/w_i * * @param X * @param Q * @param w * @param U * @param context */ void calculate_u(const Matrix<float> &X, const Matrix<float> &Q, const Matrix<float> &w, Matrix<float> &U, DeviceContext &context){ multiply(X, Q, U, context, false, false, 1.0f); //A*V auto d_u = U.data(); auto d_sigma = w.data(); auto column_size = U.rows(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+U.size(), [=]__device__(int idx){ int column = idx/column_size; float sigma = d_sigma[column]; float u = d_u[idx]; if(sigma != 0.0){ d_u[idx] = u * 1.0/sigma; } else{ d_u[idx] = 0.0; } } ); } /** * Conduct truncated SVD on a matrix * * @param _X * @param _Q * @param _w * @param _U * @param _explained_variance * @param _explained_variance_ratio * @param _param */ void truncated_svd(const double* _X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param) { Matrix<float>X(_param.X_m, _param.X_n); X.copy(_X); truncated_svd_matrix(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param); } void truncated_svd_matrix(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param) { try { //Allocate matrix for X^TX Matrix<float>XtX(_param.X_n, _param.X_n); //create context DeviceContext context; //Multiply X and Xt and output result to XtX multiply(X, X, XtX, context, true, false, 1.0f); //Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector Matrix<float>Q(XtX.rows(), XtX.columns()); // n X n -> V^T Matrix<float>w(Q.rows(), 1); calculate_eigen_pairs_exact(XtX, Q, w, context); //Obtain Q^T to obtain vector as row major order Matrix<float>Qt(Q.columns(), Q.rows()); transpose(Q, Qt, context); //Needed for calculate_u() Matrix<float>QtTrunc(_param.k, Qt.columns()); row_reverse_trunc_q(Qt, QtTrunc, context); QtTrunc.copy_to_host(_Q); //Send to host //Obtain square root of eigenvalues, which are singular values w.transform([=]__device__(float elem){ if(elem > 0.0){ return std::sqrt(elem); }else{ return 0.0f; } } ); //Sort from biggest singular value to smallest std::vector<double> w_temp(w.size()); w.copy_to_host(w_temp.data()); //Send to host std::reverse(w_temp.begin(), w_temp.end()); std::copy(w_temp.begin(), w_temp.begin() + _param.k, _w); Matrix<float>sigma(_param.k, 1); sigma.copy(w_temp.data()); //Get U matrix Matrix<float>U(X.rows(), _param.k); Matrix<float>QReversed(Q.rows(), Q.columns()); col_reverse_q(Q, QReversed, context); calculate_u(X, QReversed, sigma, U, context); U.copy_to_host(_U); //Send to host //Explained Variance Matrix<float>UmultSigma(U.rows(), U.columns()); //U * Sigma multiply_diag(U, sigma, UmultSigma, context, false); Matrix<float>UOnesSigma(UmultSigma.rows(), 1); UOnesSigma.fill(1.0f); Matrix<float>USigmaVar(_param.k, 1); Matrix<float>USigmaColMean(_param.k, 1); multiply(UmultSigma, UOnesSigma, USigmaColMean, context, true, false, 1.0f); float m_usigma = UmultSigma.rows(); multiply(USigmaColMean, 1/m_usigma, context); calc_var_numerator(UmultSigma, USigmaColMean, USigmaVar, context); multiply(USigmaVar, 1/m_usigma, context); USigmaVar.copy_to_host(_explained_variance); //Explained Variance Ratio //Set aside matrix of 1's for getting sum of columnar variances Matrix<float>XmultOnes(X.rows(), 1); XmultOnes.fill(1.0f); Matrix<float>XVar(X.columns(), 1); Matrix<float>XColMean(X.columns(), 1); multiply(X, XmultOnes, XColMean, context, true, false, 1.0f); float m = X.rows(); multiply(XColMean, 1/m, context); calc_var_numerator(X, XColMean, XVar, context); multiply(XVar, 1/m, context); Matrix<float>XVarSum(1,1); multiply(XVar, XmultOnes, XVarSum, context, true, false, 1.0f); Matrix<float>ExplainedVarRatio(_param.k, 1); divide(USigmaVar, XVarSum, ExplainedVarRatio, context); ExplainedVarRatio.copy_to_host(_explained_variance_ratio); } catch (const std::exception &e) { std::cerr << "tsvd error: " << e.what() << "\n"; } catch (std::string e) { std::cerr << "tsvd error: " << e << "\n"; } catch (...) { std::cerr << "tsvd error\n"; } } }
2138a1664da6dc9b318ed4a25bee196ec6e1903f.cu
#include <cstdio> #include "cuda_runtime.h" #include "utils.cuh" #include "../device/device_context.cuh" #include "tsvd.h" #include <ctime> #include <thrust/iterator/counting_iterator.h> #include<algorithm> #include <thrust/sequence.h> namespace tsvd { /** * Division utility to get explained variance ratio * * @param XVar * @param XVarSum * @param ExplainedVarRatio * @param context */ void divide(const Matrix<float> &XVar, const Matrix<float> &XVarSum, Matrix<float> &ExplainedVarRatio, DeviceContext &context){ auto d_x_var = XVar.data(); auto d_x_var_sum = XVarSum.data(); auto d_expl_var_ratio = ExplainedVarRatio.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+ExplainedVarRatio.size(), [=]__device__(int idx){ float div_val = 0.0; //XVarSum can possibly be zero if(d_x_var_sum[0] != 0.0){ div_val = d_x_var[idx] / d_x_var_sum[0]; } d_expl_var_ratio[idx] = div_val; } ); } /** * Square each value in a matrix * * @param UmultSigma * @param UmultSigmaSquare * @param context */ void square_val(const Matrix<float> &UmultSigma, Matrix<float> &UmultSigmaSquare, DeviceContext &context){ auto n = UmultSigma.columns(); auto m = UmultSigma.rows(); auto k = UmultSigmaSquare.rows(); auto d_u_mult_sigma = UmultSigma.data(); auto d_u_mult_sigma_square = UmultSigmaSquare.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+UmultSigmaSquare.size(), [=]__device__(int idx){ float square_val = std::pow(d_u_mult_sigma[idx],2); d_u_mult_sigma_square[idx] = square_val; } ); } /** * Alternative variance calculation (Can be slow for big matrices) * * @param UmultSigma * @param k * @param UmultSigmaVar * @param context */ void calc_var(const Matrix<float>UmultSigma, int k, Matrix<float> &UmultSigmaVar, DeviceContext &context){ //Set aside matrix of 1's for getting columnar sums(t(UmultSima) * UmultOnes) Matrix<float>UmultOnes(UmultSigma.rows(), 1); UmultOnes.fill(1.0f); //Allocate matrices for variance calculation Matrix<float>UmultSigmaSquare(UmultSigma.rows(), UmultSigma.columns()); Matrix<float>UmultSigmaSum(k, 1); Matrix<float>UmultSigmaSumSquare(k, 1); Matrix<float>UmultSigmaSumOfSquare(k, 1); Matrix<float>UmultSigmaVarNum(k, 1); //Calculate Variance square_val(UmultSigma, UmultSigmaSquare, context); multiply(UmultSigmaSquare, UmultOnes, UmultSigmaSumOfSquare, context, true, false, 1.0f); multiply(UmultSigma, UmultOnes, UmultSigmaSum, context, true, false, 1.0f); square_val(UmultSigmaSum, UmultSigmaSumSquare, context); //Get rows auto m = UmultSigma.rows(); multiply(UmultSigmaSumOfSquare, m, context); subtract(UmultSigmaSumOfSquare, UmultSigmaSumSquare, UmultSigmaVarNum, context); auto d_u_sigma_var_num = UmultSigmaVarNum.data(); auto d_u_sigma_var = UmultSigmaVar.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+UmultSigmaVar.size(), [=]__device__(int idx){ float div_val = d_u_sigma_var_num[idx]/(std::pow(m,2)); d_u_sigma_var[idx] = div_val; } ); } template<typename T> class variance_iterator{ public: // Required iterator traits typedef variance_iterator<T> self_type; ///< My own type typedef size_t difference_type; ///< Type to express the result of subtracting one iterator from another typedef T value_type; ///< The type of the element the iterator can point to typedef T* pointer; ///< The type of a pointer to an element the iterator can point to typedef T reference; ///< The type of a reference to an element the iterator can point to typedef std::random_access_iterator_tag iterator_category; ///< The iterator category const T* data_ptr; const T* mean_ptr; const int col_rows; size_t offset; __device__ T operator[](size_t idx){ idx = idx + offset; T mean = mean_ptr[idx/col_rows]; T dev_square = pow((data_ptr[idx] - mean),2); return dev_square; } __device__ self_type operator+(size_t idx){ self_type retval(data_ptr, mean_ptr, col_rows); retval.offset += idx; return retval; } __host__ __device__ variance_iterator(const T* data_ptr, const T* mean_ptr, const int col_rows):data_ptr(data_ptr), mean_ptr(mean_ptr), col_rows(col_rows), offset(0){ } }; /** * Utility to calculate variance for each column of a matrix * * @param X * @param UColMean * @param UVar * @param context */ void calc_var_numerator(const Matrix<float> &X, const Matrix<float> &UColMean, Matrix<float> &UVar, DeviceContext &context){ auto m = X.rows(); variance_iterator<float> variance(X.data(), UColMean.data(), m); thrust::device_vector<int> segments(X.columns() + 1); thrust::sequence(segments.begin(), segments.end(), 0, static_cast<int>(X.rows())); // Determine temporary device storage requirements void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; int cols = static_cast<int>(X.columns()); cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(), cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1)); // Allocate temporary storage safe_cuda(cudaMalloc(&d_temp_storage, temp_storage_bytes)); // Run sum-reduction cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, variance, UVar.data(), cols, thrust::raw_pointer_cast(segments.data()), thrust::raw_pointer_cast(segments.data() + 1)); safe_cuda(cudaFree(d_temp_storage)); } /** * Utility to reverse q to show most import k to least important k * * @param Q * @param QReversed * @param context */ void col_reverse_q(const Matrix<float> &Q, Matrix<float> &QReversed, DeviceContext &context){ auto n = Q.columns(); auto m = Q.rows(); auto k = QReversed.rows(); auto d_q = Q.data(); auto d_q_reversed = QReversed.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+QReversed.size(), [=]__device__(int idx){ int dest_row = idx % m; int dest_col = idx/m; int src_row = dest_row; int src_col = (n - dest_col) - 1; d_q_reversed[idx] = d_q[src_col * m + src_row]; } ); } /** * Truncate Q transpose to top k * * @param Qt * @param QtTrunc * @param context */ void row_reverse_trunc_q(const Matrix<float> &Qt, Matrix<float> &QtTrunc, DeviceContext &context){ auto m = Qt.rows(); auto k = QtTrunc.rows(); auto d_q = Qt.data(); auto d_q_trunc = QtTrunc.data(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+QtTrunc.size(), [=]__device__(int idx){ int dest_row = idx % k; int dest_col = idx / k; int src_row = (m - dest_row) - 1; int src_col = dest_col; float q = d_q[src_col * m + src_row]; d_q_trunc[idx] = q; } ); } /** * Calculate the U matrix, which is defined as: * U = A*V/sigma where A is our X Matrix, V is Q, and sigma is 1/w_i * * @param X * @param Q * @param w * @param U * @param context */ void calculate_u(const Matrix<float> &X, const Matrix<float> &Q, const Matrix<float> &w, Matrix<float> &U, DeviceContext &context){ multiply(X, Q, U, context, false, false, 1.0f); //A*V auto d_u = U.data(); auto d_sigma = w.data(); auto column_size = U.rows(); auto counting = thrust::make_counting_iterator <int>(0); thrust::for_each(counting, counting+U.size(), [=]__device__(int idx){ int column = idx/column_size; float sigma = d_sigma[column]; float u = d_u[idx]; if(sigma != 0.0){ d_u[idx] = u * 1.0/sigma; } else{ d_u[idx] = 0.0; } } ); } /** * Conduct truncated SVD on a matrix * * @param _X * @param _Q * @param _w * @param _U * @param _explained_variance * @param _explained_variance_ratio * @param _param */ void truncated_svd(const double* _X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param) { Matrix<float>X(_param.X_m, _param.X_n); X.copy(_X); truncated_svd_matrix(X, _Q, _w, _U, _explained_variance, _explained_variance_ratio, _param); } void truncated_svd_matrix(Matrix<float> &X, double* _Q, double* _w, double* _U, double* _explained_variance, double* _explained_variance_ratio, params _param) { try { //Allocate matrix for X^TX Matrix<float>XtX(_param.X_n, _param.X_n); //create context DeviceContext context; //Multiply X and Xt and output result to XtX multiply(X, X, XtX, context, true, false, 1.0f); //Set up Q (V^T) and w (singular value) matrices (w is a matrix of size Q.rows() by 1; really just a vector Matrix<float>Q(XtX.rows(), XtX.columns()); // n X n -> V^T Matrix<float>w(Q.rows(), 1); calculate_eigen_pairs_exact(XtX, Q, w, context); //Obtain Q^T to obtain vector as row major order Matrix<float>Qt(Q.columns(), Q.rows()); transpose(Q, Qt, context); //Needed for calculate_u() Matrix<float>QtTrunc(_param.k, Qt.columns()); row_reverse_trunc_q(Qt, QtTrunc, context); QtTrunc.copy_to_host(_Q); //Send to host //Obtain square root of eigenvalues, which are singular values w.transform([=]__device__(float elem){ if(elem > 0.0){ return std::sqrt(elem); }else{ return 0.0f; } } ); //Sort from biggest singular value to smallest std::vector<double> w_temp(w.size()); w.copy_to_host(w_temp.data()); //Send to host std::reverse(w_temp.begin(), w_temp.end()); std::copy(w_temp.begin(), w_temp.begin() + _param.k, _w); Matrix<float>sigma(_param.k, 1); sigma.copy(w_temp.data()); //Get U matrix Matrix<float>U(X.rows(), _param.k); Matrix<float>QReversed(Q.rows(), Q.columns()); col_reverse_q(Q, QReversed, context); calculate_u(X, QReversed, sigma, U, context); U.copy_to_host(_U); //Send to host //Explained Variance Matrix<float>UmultSigma(U.rows(), U.columns()); //U * Sigma multiply_diag(U, sigma, UmultSigma, context, false); Matrix<float>UOnesSigma(UmultSigma.rows(), 1); UOnesSigma.fill(1.0f); Matrix<float>USigmaVar(_param.k, 1); Matrix<float>USigmaColMean(_param.k, 1); multiply(UmultSigma, UOnesSigma, USigmaColMean, context, true, false, 1.0f); float m_usigma = UmultSigma.rows(); multiply(USigmaColMean, 1/m_usigma, context); calc_var_numerator(UmultSigma, USigmaColMean, USigmaVar, context); multiply(USigmaVar, 1/m_usigma, context); USigmaVar.copy_to_host(_explained_variance); //Explained Variance Ratio //Set aside matrix of 1's for getting sum of columnar variances Matrix<float>XmultOnes(X.rows(), 1); XmultOnes.fill(1.0f); Matrix<float>XVar(X.columns(), 1); Matrix<float>XColMean(X.columns(), 1); multiply(X, XmultOnes, XColMean, context, true, false, 1.0f); float m = X.rows(); multiply(XColMean, 1/m, context); calc_var_numerator(X, XColMean, XVar, context); multiply(XVar, 1/m, context); Matrix<float>XVarSum(1,1); multiply(XVar, XmultOnes, XVarSum, context, true, false, 1.0f); Matrix<float>ExplainedVarRatio(_param.k, 1); divide(USigmaVar, XVarSum, ExplainedVarRatio, context); ExplainedVarRatio.copy_to_host(_explained_variance_ratio); } catch (const std::exception &e) { std::cerr << "tsvd error: " << e.what() << "\n"; } catch (std::string e) { std::cerr << "tsvd error: " << e << "\n"; } catch (...) { std::cerr << "tsvd error\n"; } } }
c2650d9f6ba1721e7ac8bb41364bf00fc2e479e6.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file transformer.cu * \brief GPU implementation of the operators used in Transformer */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime_api.h> #include <mxnet/base.h> #include "./transformer-inl.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { // Approach in gemm_switch_fp32accum is coming from MLPerf v0.6 submission repository from NVIDIA // by https://github.com/kevinstephano template<typename DType> void CublasStridedBatchedGemm(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType* a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount, hipblasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP) { #if TORCH_HIP_VERSION >= 9010 using namespace mxnet::common::cuda; CHECK_EQ(s->blas_handle_ownership_, mshadow::Stream<gpu>::OwnHandle) << "Must init CuBLAS handle in stream"; hipblasHandle_t blas_handle = mshadow::Stream<gpu>::GetBlasHandle(s); auto err = HIPBLAS_STATUS_SUCCESS; using TrueFP16Type = DType; using PseudoFP16Type = typename CublasType<DType>::ScaleType; // Set up alpha and beta values in the possible formats needed (only different when dtype == half) TrueFP16Type trueFP16_alpha = static_cast<TrueFP16Type>(alpha); TrueFP16Type trueFP16_beta = static_cast<TrueFP16Type>(beta); PseudoFP16Type pseudoFP16_alpha = static_cast<PseudoFP16Type>(alpha); PseudoFP16Type pseudoFP16_beta = static_cast<PseudoFP16Type>(beta); const void *alpha_ptr; const void *beta_ptr; hipDataType computeType; bool use_true_fp16 = dmlc::GetEnv("MXNET_FC_TRUE_FP16", false); if (use_true_fp16) { alpha_ptr = &trueFP16_alpha; beta_ptr = &trueFP16_beta; computeType = CublasType<TrueFP16Type>::kCudaFlag; } else { alpha_ptr = &pseudoFP16_alpha; beta_ptr = &pseudoFP16_beta; computeType = CublasType<PseudoFP16Type>::kCudaFlag; } err = hipblasGemmStridedBatchedEx( blas_handle, CublasTransposeOp(transA), CublasTransposeOp(transB), static_cast<int>(m), static_cast<int>(n), static_cast<int>(k), alpha_ptr, a, CublasType<DType>::kCudaFlag, static_cast<int>(lda), strideA, b, CublasType<DType>::kCudaFlag, static_cast<int>(ldb), strideB, beta_ptr, c, CublasType<DType>::kCudaFlag, static_cast<int>(ldc), strideC, static_cast<int>(batchCount), computeType, algo); CHECK_EQ(err, HIPBLAS_STATUS_SUCCESS) << "Cublas gemmEx fail."; #else LOG(FATAL) << "Not implemented with CUDA < 9.1"; #endif } template<typename DType> void gemm_switch_fp32accum(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType *a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount) { hipStream_t stream = mshadow::Stream<gpu>::GetStream(s); if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_DEFAULT_TENSOR_OP); } else { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } CHECK_CUDA_ERROR("Error at InterleavedMatMul"); } // TODO(cfujitsang): use scale as optional ? void InterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, scale, queries_keys_values + head_dim, lead_dim, batch_stride, queries_keys_values, lead_dim, batch_stride, beta, output, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); }) } void BackwardInterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; if (req[0] == kWriteTo) { hipMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values + head_dim, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads, lead_dim, batch_stride, attn_batches); gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + head_dim, lead_dim, batch_stride, attn_batches); }) } void InterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { hipMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + 2 * head_dim, lead_dim, batch_stride, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); } }) } void InterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_q_dim = inputs[0].shape_[2]; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, scale, keys_values, lead_dim_kv, batch_stride_kv, queries, lead_dim_q, batch_stride_q, beta, output, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); }) } void BackwardInterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_q_dim = inputs[1].shape_[2]; const int32_t kv_seq_len = inputs[2].shape_[0]; const int32_t output_lin_kv_dim = inputs[2].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] != kNullOp) { const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, scale, keys_values, lead_dim_kv, batch_stride_kv, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, queries_grads, lead_dim_q, batch_stride_q, attn_batches); } if (req[1] != kNullOp) { if (req[1] == kWriteTo) { hipMemsetAsync(keys_values_grads, 0, outputs[1].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, scale, queries, lead_dim_q, batch_stride_q, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads, lead_dim_kv, batch_stride_kv, attn_batches); } }) } void InterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_kv_dim = inputs[0].shape_[2]; const int32_t attn_batches = inputs[1].shape_[0]; const int32_t q_seq_len = inputs[1].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t attn_batches = inputs[2].shape_[0]; const int32_t q_seq_len = inputs[2].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { hipMemsetAsync(keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads + head_dim, lead_dim_kv, batch_stride_kv, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); } }) } NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecValAttGPU); // relu NNVM_REGISTER_OP(_contrib_div_sqrt_dim) .set_attr<FCompute>("FCompute<gpu>", DivSqrtDimForward_<gpu>); } // namespace op } // namespace mxnet
c2650d9f6ba1721e7ac8bb41364bf00fc2e479e6.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2018 by Contributors * \file transformer.cu * \brief GPU implementation of the operators used in Transformer */ #include <cuda.h> #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include <mxnet/base.h> #include "./transformer-inl.h" #include "../../common/cuda_utils.h" namespace mxnet { namespace op { // Approach in gemm_switch_fp32accum is coming from MLPerf v0.6 submission repository from NVIDIA // by https://github.com/kevinstephano template<typename DType> void CublasStridedBatchedGemm(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType* a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount, cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP) { #if CUDA_VERSION >= 9010 using namespace mxnet::common::cuda; CHECK_EQ(s->blas_handle_ownership_, mshadow::Stream<gpu>::OwnHandle) << "Must init CuBLAS handle in stream"; cublasHandle_t blas_handle = mshadow::Stream<gpu>::GetBlasHandle(s); auto err = CUBLAS_STATUS_SUCCESS; using TrueFP16Type = DType; using PseudoFP16Type = typename CublasType<DType>::ScaleType; // Set up alpha and beta values in the possible formats needed (only different when dtype == half) TrueFP16Type trueFP16_alpha = static_cast<TrueFP16Type>(alpha); TrueFP16Type trueFP16_beta = static_cast<TrueFP16Type>(beta); PseudoFP16Type pseudoFP16_alpha = static_cast<PseudoFP16Type>(alpha); PseudoFP16Type pseudoFP16_beta = static_cast<PseudoFP16Type>(beta); const void *alpha_ptr; const void *beta_ptr; cudaDataType_t computeType; bool use_true_fp16 = dmlc::GetEnv("MXNET_FC_TRUE_FP16", false); if (use_true_fp16) { alpha_ptr = &trueFP16_alpha; beta_ptr = &trueFP16_beta; computeType = CublasType<TrueFP16Type>::kCudaFlag; } else { alpha_ptr = &pseudoFP16_alpha; beta_ptr = &pseudoFP16_beta; computeType = CublasType<PseudoFP16Type>::kCudaFlag; } err = cublasGemmStridedBatchedEx( blas_handle, CublasTransposeOp(transA), CublasTransposeOp(transB), static_cast<int>(m), static_cast<int>(n), static_cast<int>(k), alpha_ptr, a, CublasType<DType>::kCudaFlag, static_cast<int>(lda), strideA, b, CublasType<DType>::kCudaFlag, static_cast<int>(ldb), strideB, beta_ptr, c, CublasType<DType>::kCudaFlag, static_cast<int>(ldc), strideC, static_cast<int>(batchCount), computeType, algo); CHECK_EQ(err, CUBLAS_STATUS_SUCCESS) << "Cublas gemmEx fail."; #else LOG(FATAL) << "Not implemented with CUDA < 9.1"; #endif } template<typename DType> void gemm_switch_fp32accum(mshadow::Stream<gpu>* s, bool transA, bool transB, int32_t m, int32_t n, int32_t k, float alpha, const DType *a, int32_t lda, int32_t strideA, const DType *b, int32_t ldb, int32_t strideB, float beta, DType *c, int32_t ldc, int32_t strideC, int32_t batchCount) { cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s); if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_DEFAULT_TENSOR_OP); } else { CublasStridedBatchedGemm(s, transA, transB, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } CHECK_CUDA_ERROR("Error at InterleavedMatMul"); } // TODO(cfujitsang): use scale as optional ? void InterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, scale, queries_keys_values + head_dim, lead_dim, batch_stride, queries_keys_values, lead_dim, batch_stride, beta, output, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); }) } void BackwardInterleavedMatMulSelfAttQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float scale = 1.0 / sqrt(static_cast<float>(head_dim)); const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; if (req[0] == kWriteTo) { cudaMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values + head_dim, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads, lead_dim, batch_stride, attn_batches); gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, scale, queries_keys_values, lead_dim, batch_stride, output_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + head_dim, lead_dim, batch_stride, attn_batches); }) } void InterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries_keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_dim = inputs[0].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, qkv_seq_len, qkv_seq_len, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulSelfAttValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries_keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t qkv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_dim / 3; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim = attn_batches * 3 * head_dim; const int32_t batch_stride = 3 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { cudaMemsetAsync(queries_keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, qkv_seq_len, qkv_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, qkv_seq_len, qkv_seq_len * qkv_seq_len, beta, queries_keys_values_grads + 2 * head_dim, lead_dim, batch_stride, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, qkv_seq_len, qkv_seq_len, head_dim, alpha, queries_keys_values + 2 * head_dim, lead_dim, batch_stride, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, qkv_seq_len, qkv_seq_len * qkv_seq_len, attn_batches); } }) } void InterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* queries = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_q_dim = inputs[0].shape_[2]; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float beta = req[0] == kAddTo ? 1.f : 0.f; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, scale, keys_values, lead_dim_kv, batch_stride_kv, queries, lead_dim_q, batch_stride_q, beta, output, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); }) } void BackwardInterleavedMatMulEncDecQKGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* queries = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* queries_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t q_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_q_dim = inputs[1].shape_[2]; const int32_t kv_seq_len = inputs[2].shape_[0]; const int32_t output_lin_kv_dim = inputs[2].shape_[2]; const int32_t embed_dim = output_lin_q_dim; const int32_t head_dim = embed_dim / params.heads; const int32_t attn_batches = params.heads * sequences; const int32_t lead_dim_q = attn_batches * head_dim; const int32_t lead_dim_kv = attn_batches * 2 * head_dim; const int32_t batch_stride_q = head_dim; const int32_t batch_stride_kv = head_dim * 2; const float scale = 1.f / sqrt(static_cast<float>(head_dim)); if (req[0] != kNullOp) { const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, scale, keys_values, lead_dim_kv, batch_stride_kv, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, queries_grads, lead_dim_q, batch_stride_q, attn_batches); } if (req[1] != kNullOp) { if (req[1] == kWriteTo) { cudaMemsetAsync(keys_values_grads, 0, outputs[1].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, scale, queries, lead_dim_q, batch_stride_q, output_grads, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads, lead_dim_kv, batch_stride_kv, attn_batches); } }) } void InterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* keys_values = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; DType* output = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[0].shape_[0]; const int32_t sequences = inputs[0].shape_[1]; const int32_t output_lin_kv_dim = inputs[0].shape_[2]; const int32_t attn_batches = inputs[1].shape_[0]; const int32_t q_seq_len = inputs[1].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; const float beta = req[0] == kAddTo ? 1.f : 0.f; if (req[0] == kNullOp) return; gemm_switch_fp32accum(s, false, false, head_dim, q_seq_len, kv_seq_len, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, output, head_dim * attn_batches, head_dim, attn_batches); }) } void BackwardInterleavedMatMulEncDecValAttGPU(const nnvm::NodeAttrs& attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { const auto& params = nnvm::get<InterleavedMatMulParam>(attrs.parsed); mshadow::Stream<gpu>* s = ctx.get_stream<gpu>(); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { const DType* output_grads = inputs[0].FlatTo2D<gpu, DType>(s).dptr_; const DType* keys_values = inputs[1].FlatTo2D<gpu, DType>(s).dptr_; const DType* attention_maps = inputs[2].FlatTo2D<gpu, DType>(s).dptr_; DType* keys_values_grads = outputs[0].FlatTo2D<gpu, DType>(s).dptr_; DType* attention_maps_grads = outputs[1].FlatTo2D<gpu, DType>(s).dptr_; const int32_t kv_seq_len = inputs[1].shape_[0]; const int32_t sequences = inputs[1].shape_[1]; const int32_t output_lin_kv_dim = inputs[1].shape_[2]; const int32_t attn_batches = inputs[2].shape_[0]; const int32_t q_seq_len = inputs[2].shape_[1]; const int32_t embed_dim = output_lin_kv_dim / 2; int32_t head_dim = embed_dim / params.heads; const int32_t lead_dim_kv = attn_batches * head_dim * 2; const int32_t batch_stride_kv = 2 * head_dim; const float alpha = 1.f; if (req[0] != kNullOp) { if (req[0] == kWriteTo) { cudaMemsetAsync(keys_values_grads, 0, outputs[0].shape_.Size() * sizeof(DType), mshadow::Stream<gpu>::GetStream(s)); } const float beta = req[0] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, false, true, head_dim, kv_seq_len, q_seq_len, alpha, output_grads, head_dim * attn_batches, head_dim, attention_maps, kv_seq_len, kv_seq_len * q_seq_len, beta, keys_values_grads + head_dim, lead_dim_kv, batch_stride_kv, attn_batches); } if (req[1] != kNullOp) { const float beta = req[1] == kAddTo ? 1.f : 0.f; gemm_switch_fp32accum(s, true, false, kv_seq_len, q_seq_len, head_dim, alpha, keys_values + head_dim, lead_dim_kv, batch_stride_kv, output_grads, head_dim * attn_batches, head_dim, beta, attention_maps_grads, kv_seq_len, kv_seq_len * q_seq_len, attn_batches); } }) } NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_contrib_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", InterleavedMatMulEncDecValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_selfatt_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulSelfAttValAttGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_qk) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecQKGPU); NNVM_REGISTER_OP(_backward_interleaved_matmul_encdec_valatt) .set_attr<FCompute>("FCompute<gpu>", BackwardInterleavedMatMulEncDecValAttGPU); // relu NNVM_REGISTER_OP(_contrib_div_sqrt_dim) .set_attr<FCompute>("FCompute<gpu>", DivSqrtDimForward_<gpu>); } // namespace op } // namespace mxnet
54422a1a9502edcfe48d02d6b14b58cc55279da3.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** MIT License Copyright (c) 2016 Antti-Pekka Hynninen Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #include <stdio.h> #ifdef ENABLE_NVTOOLS #include <nvToolsExtCuda.h> #endif #include "CudaUtils.h" //---------------------------------------------------------------------------------------- void set_device_array_async_T(void *data, int value, const size_t ndata, hipStream_t stream, const size_t sizeofT) { cudaCheck(hipMemsetAsync(data, value, sizeofT*ndata, stream)); } void set_device_array_T(void *data, int value, const size_t ndata, const size_t sizeofT) { cudaCheck(hipMemset(data, value, sizeofT*ndata)); } //---------------------------------------------------------------------------------------- // // Copies memory Host -> Device // void copy_HtoD_async_T(const void *h_array, void *d_array, size_t array_len, hipStream_t stream, const size_t sizeofT) { cudaCheck(hipMemcpyAsync(d_array, h_array, sizeofT*array_len, hipMemcpyDefault, stream)); } void copy_HtoD_T(const void *h_array, void *d_array, size_t array_len, const size_t sizeofT) { cudaCheck(hipMemcpy(d_array, h_array, sizeofT*array_len, hipMemcpyDefault)); } //---------------------------------------------------------------------------------------- // // Copies memory Device -> Host // void copy_DtoH_async_T(const void *d_array, void *h_array, const size_t array_len, hipStream_t stream, const size_t sizeofT) { cudaCheck(hipMemcpyAsync(h_array, d_array, sizeofT*array_len, hipMemcpyDefault, stream)); } void copy_DtoH_T(const void *d_array, void *h_array, const size_t array_len, const size_t sizeofT) { cudaCheck(hipMemcpy(h_array, d_array, sizeofT*array_len, hipMemcpyDefault)); } //---------------------------------------------------------------------------------------- #ifdef ENABLE_NVTOOLS void gpuRangeStart(const char *range_name) { static int color_id=0; nvtxEventAttributes_t att; att.version = NVTX_VERSION; att.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; att.colorType = NVTX_COLOR_ARGB; if (color_id == 0) { att.color = 0xFFFF0000; } else if (color_id == 1) { att.color = 0xFF00FF00; } else if (color_id == 2) { att.color = 0xFF0000FF; } else if (color_id == 3) { att.color = 0xFFFF00FF; } color_id++; if (color_id > 3) color_id = 0; att.messageType = NVTX_MESSAGE_TYPE_ASCII; att.message.ascii = range_name; nvtxRangePushEx(&att); } void gpuRangeStop() { roctxRangePop(); } #endif
54422a1a9502edcfe48d02d6b14b58cc55279da3.cu
/****************************************************************************** MIT License Copyright (c) 2016 Antti-Pekka Hynninen Copyright (c) 2016 Oak Ridge National Laboratory (UT-Batelle) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. *******************************************************************************/ #include <stdio.h> #ifdef ENABLE_NVTOOLS #include <nvToolsExtCuda.h> #endif #include "CudaUtils.h" //---------------------------------------------------------------------------------------- void set_device_array_async_T(void *data, int value, const size_t ndata, cudaStream_t stream, const size_t sizeofT) { cudaCheck(cudaMemsetAsync(data, value, sizeofT*ndata, stream)); } void set_device_array_T(void *data, int value, const size_t ndata, const size_t sizeofT) { cudaCheck(cudaMemset(data, value, sizeofT*ndata)); } //---------------------------------------------------------------------------------------- // // Copies memory Host -> Device // void copy_HtoD_async_T(const void *h_array, void *d_array, size_t array_len, cudaStream_t stream, const size_t sizeofT) { cudaCheck(cudaMemcpyAsync(d_array, h_array, sizeofT*array_len, cudaMemcpyDefault, stream)); } void copy_HtoD_T(const void *h_array, void *d_array, size_t array_len, const size_t sizeofT) { cudaCheck(cudaMemcpy(d_array, h_array, sizeofT*array_len, cudaMemcpyDefault)); } //---------------------------------------------------------------------------------------- // // Copies memory Device -> Host // void copy_DtoH_async_T(const void *d_array, void *h_array, const size_t array_len, cudaStream_t stream, const size_t sizeofT) { cudaCheck(cudaMemcpyAsync(h_array, d_array, sizeofT*array_len, cudaMemcpyDefault, stream)); } void copy_DtoH_T(const void *d_array, void *h_array, const size_t array_len, const size_t sizeofT) { cudaCheck(cudaMemcpy(h_array, d_array, sizeofT*array_len, cudaMemcpyDefault)); } //---------------------------------------------------------------------------------------- #ifdef ENABLE_NVTOOLS void gpuRangeStart(const char *range_name) { static int color_id=0; nvtxEventAttributes_t att; att.version = NVTX_VERSION; att.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; att.colorType = NVTX_COLOR_ARGB; if (color_id == 0) { att.color = 0xFFFF0000; } else if (color_id == 1) { att.color = 0xFF00FF00; } else if (color_id == 2) { att.color = 0xFF0000FF; } else if (color_id == 3) { att.color = 0xFFFF00FF; } color_id++; if (color_id > 3) color_id = 0; att.messageType = NVTX_MESSAGE_TYPE_ASCII; att.message.ascii = range_name; nvtxRangePushEx(&att); } void gpuRangeStop() { nvtxRangePop(); } #endif
be08bb247f73c85e40cfd22868abc3891a9de343.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <moderngpu/transform.hxx> #include <moderngpu/kernel_scan.hxx> #include <moderngpu/kernel_load_balance.hxx> using namespace mgpu; using namespace cub; __global__ void GetneighLen(uint32_t *nodes, int sizeNode, uint32_t *tr_offset, uint32_t *neighLen, uint32_t *changeColor){ for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=gridDim.x*blockDim.x) { neighLen[i] = tr_offset[nodes[i]+1]-tr_offset[nodes[i]]; changeColor[i] = 0; } } __global__ void FindChangeColor(uint32_t *changeColor, uint32_t sizeNode, uint32_t *nodes, int *wir, int *lbs, uint32_t sizeLbs, uint32_t *tr_col_id, uint32_t *tr_offset, int theColor, unsigned char *color) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeLbs; i+=blockDim.x*gridDim.x) { int neighborOwner = lbs[i]; int neighbor = tr_col_id[tr_offset[nodes[lbs[i]]] + wir[i]]; // if(threadIdx.x==0) // { // for(int x=0; x<sizeNode; x++) // printf("nodes[%d]=%d ", x, nodes[i]); // printf("\n"); // printf("theColor: %u\n", (unsigned char)theColor); // } // printf("\n"); // if(i==0) // printf("theColor: %u\n", (unsigned char)theColor); // if(i>=0 && i<=20) { // printf("thread %d, lbs[%d] is %d, wir[%d] is %d, nodes[lbs[%d]] is %d, my neighbor is %d, neighborOwner is %d, \n", i, i, lbs[i],i, wir[i], i, nodes[lbs[i]], neighbor, neighborOwner); // printf("thread %d, nodes[%d] is %d and color[%d] is %u\n", i, neighborOwner, nodes[neighborOwner], neighbor, color[neighbor]); // } if(color[neighbor] == (unsigned char)theColor) { changeColor[neighborOwner]=1; } } // if(threadIdx.x==0) // { // for(int i=0; i<sizeNode; i++) // printf("changeColor[%d]=%d ", i,changeColor[i] ); // printf("\n"); // } } __global__ void Conflict_assignColor(uint32_t *changeColor, int theColor, unsigned char *color, uint32_t *nodes, uint32_t sizeNode) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=blockDim.x*gridDim.x) { if(changeColor[i]==1) color[nodes[i]] = (unsigned char)theColor; } } __global__ void GenNewNodes(uint32_t *nodes, uint32_t *newNodes, uint32_t *neighLen, uint32_t *newNeighLen, int sizeNode, int *changeColor) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=blockDim.x*gridDim.x) { if(changeColor[i]+1 == changeColor[i+1]) { newNodes[changeColor[i]] = nodes[i]; newNeighLen[changeColor[i]] = neighLen[i]; } } } __global__ void WorkItemRank(int *scan, int *lbs, int *wir, int sizeLbs) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeLbs; i+=blockDim.x*gridDim.x) { wir[i] = i - scan[lbs[i]]; } } __global__ void ResetChangeColor(int sizeNode, uint32_t *changeColor) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=blockDim.x*gridDim.x) { changeColor[i]=0; } } int conflict_resolve_forgetabout_sharedmemory1(uint32_t* conflict_color, // Array of conflict vetices grouped by color uint32_t *conflict_color_offset, // offset of different color on conflit_color uint32_t *tr_col_id, // CSR of graph, but only lower triangle part uint32_t *tr_offset, // CSR offset of graph, but only lower triangle part uint32_t numVertices, // number of vertices uint32_t size_tr_col,// size of tr_col_id uint32_t numColor,// number of color has been used unsigned char *color,// color array for all vertices uint32_t colorID, int gridSize, int blockSize ) // working space and the size of this array shoudl be BLOCK_THREADS*ITEM_PER_THREAD, o,w it overwrittern information { standard_context_t context; uint32_t *nodes(NULL), *changeColor(NULL), *nodes1(NULL), *nodes2(NULL), *neighLen1(NULL), *neighLen2(NULL), *neighLen(NULL); uint32_t *newNodes(NULL), *newNeighLen(NULL); int start = conflict_color_offset[colorID]; int end = conflict_color_offset[colorID+1]; int sizeNode = end-start; if(sizeNode==0) return numColor; // std::cout<<"start: "<<start<<" end: "<<end<<" sizeNode: "<<sizeNode <<std::endl; nodes = conflict_color+start; HANDLE_ERROR(hipMallocManaged(&changeColor, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(hipMallocManaged(&nodes1, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(hipMallocManaged(&nodes2, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(hipMallocManaged(&neighLen1, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(hipMallocManaged(&neighLen2, sizeNode*sizeof(uint32_t))); int *lbs(NULL), *wir(NULL); int *scanArray(NULL); HANDLE_ERROR(hipMallocManaged(&scanArray, (sizeNode+1)*sizeof(int))); // std::cout<<"allocate succeed"<<std::endl; // for(int i=0; i<sizeNode; i++) // { // std::cout<<"nodes["<<i<<"]= "<<nodes[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; hipLaunchKernelGGL(( GetneighLen), dim3(gridSize), dim3(blockSize), 0, 0, nodes, sizeNode, tr_offset, neighLen1, changeColor); hipDeviceSynchronize(); // for(int i=0; i<sizeNode; i++) // { // std::cout<<"changeColor["<<i<<"]= "<<changeColor[i]<<" "; // } // std::cout<<std::endl; // for(int i=0; i<numVertices; i++) // { // printf("color[%d]=%u ", i, color[i]); // } // std::cout<<std::endl; int choseL = 0; int counter = 1; neighLen = neighLen1; int theColor = colorID; void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, neighLen, scanArray, sizeNode+1); HANDLE_ERROR(hipMalloc(&d_temp_storage, temp_storage_bytes)); while(true) { // for(int i=0; i<sizeNode; i++) // { // std::cout<<"neighLen["<<i<<"]= "<<neighLen[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, neighLen, scanArray, sizeNode+1); hipDeviceSynchronize(); // for(int i=0; i<sizeNode+1; i++) // { // std::cout<<"scan_neighLen["<<i<<"]= "<<scanArray[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; int sizeLbs = scanArray[sizeNode]; if(counter == 1) { HANDLE_ERROR(hipMallocManaged(&lbs, sizeLbs*sizeof(int))); HANDLE_ERROR(hipMallocManaged(&wir, sizeLbs*sizeof(int))); } load_balance_search(sizeLbs, scanArray, sizeNode, lbs, context); hipDeviceSynchronize(); // for(int i=0; i<sizeLbs; i++) // { // std::cout<<"lbs["<<i<<"]= "<<lbs[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; hipLaunchKernelGGL(( WorkItemRank), dim3(gridSize),dim3(blockSize), 0, 0, scanArray, lbs, wir, sizeLbs); hipDeviceSynchronize(); // for(int i=0; i<sizeLbs; i++) // { // std::cout<<"WIR["<<i<<"]= "<<wir[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; hipLaunchKernelGGL(( FindChangeColor), dim3(gridSize),dim3(blockSize), 0, 0, changeColor, sizeNode, nodes, wir, lbs, sizeLbs, tr_col_id, tr_offset, theColor, color); hipDeviceSynchronize(); theColor = numColor+counter; hipLaunchKernelGGL(( Conflict_assignColor), dim3(gridSize),dim3(blockSize), 0, 0, changeColor, theColor, color, nodes, sizeNode); hipDeviceSynchronize(); // for(int i=0; i<numVertices; i++) // { // printf("color[%d]=%u ", i, color[i]); // } // std::cout<<std::endl; // std::cout<<std::endl; DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, changeColor, scanArray, sizeNode+1); hipDeviceSynchronize(); // for(int i=0; i<sizeNode; i++) // { // std::cout<<"changeColor["<<i<<"]= "<<changeColor[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; // for(int i=0; i<sizeNode+1; i++) // { // std::cout<<"scan_changeColor["<<i<<"]= "<<scanArray[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; choseL = choseL^1; if(choseL == 1) { newNeighLen = neighLen2; newNodes = nodes2; } else { newNeighLen = neighLen1; newNodes = nodes1; } // std::cout<<"sizeNode: "<< sizeNode<<std::endl; hipLaunchKernelGGL(( GenNewNodes), dim3(gridSize),dim3(blockSize), 0, 0, nodes, newNodes, neighLen, newNeighLen, sizeNode, scanArray); hipDeviceSynchronize(); sizeNode = scanArray[sizeNode]; hipLaunchKernelGGL(( ResetChangeColor), dim3(gridSize),dim3(blockSize), 0, 0, sizeNode, changeColor); hipDeviceSynchronize(); if(sizeNode == 0) break; printf("new sizeNode: %d\n", sizeNode); nodes = newNodes; for(int i=0; i<sizeNode; i++) { std::cout<<"newNode["<<i<<"]= "<<nodes[i]<<" "; } std::cout<<std::endl; std::cout<<std::endl; neighLen = newNeighLen; // for(int i=0; i<sizeNode; i++) // { // std::cout<<"newNeighLen["<<i<<"]= "<<neighLen[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; // std::cout<<"counter: "<<counter<<std::endl; counter++; // std::cout<<std::endl; // std::cout<<std::endl; } std::cout<<counter-1<<" color is added, total number of color is "<<theColor-1<<std::endl; hipFree(nodes1); hipFree(nodes2); hipFree(neighLen1); hipFree(neighLen2); hipFree(changeColor); hipFree(scanArray); hipFree(d_temp_storage); hipFree(lbs); hipFree(wir); return theColor-1; }
be08bb247f73c85e40cfd22868abc3891a9de343.cu
#include <cub/util_allocator.cuh> #include <cub/device/device_scan.cuh> #include <moderngpu/transform.hxx> #include <moderngpu/kernel_scan.hxx> #include <moderngpu/kernel_load_balance.hxx> using namespace mgpu; using namespace cub; __global__ void GetneighLen(uint32_t *nodes, int sizeNode, uint32_t *tr_offset, uint32_t *neighLen, uint32_t *changeColor){ for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=gridDim.x*blockDim.x) { neighLen[i] = tr_offset[nodes[i]+1]-tr_offset[nodes[i]]; changeColor[i] = 0; } } __global__ void FindChangeColor(uint32_t *changeColor, uint32_t sizeNode, uint32_t *nodes, int *wir, int *lbs, uint32_t sizeLbs, uint32_t *tr_col_id, uint32_t *tr_offset, int theColor, unsigned char *color) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeLbs; i+=blockDim.x*gridDim.x) { int neighborOwner = lbs[i]; int neighbor = tr_col_id[tr_offset[nodes[lbs[i]]] + wir[i]]; // if(threadIdx.x==0) // { // for(int x=0; x<sizeNode; x++) // printf("nodes[%d]=%d ", x, nodes[i]); // printf("\n"); // printf("theColor: %u\n", (unsigned char)theColor); // } // printf("\n"); // if(i==0) // printf("theColor: %u\n", (unsigned char)theColor); // if(i>=0 && i<=20) { // printf("thread %d, lbs[%d] is %d, wir[%d] is %d, nodes[lbs[%d]] is %d, my neighbor is %d, neighborOwner is %d, \n", i, i, lbs[i],i, wir[i], i, nodes[lbs[i]], neighbor, neighborOwner); // printf("thread %d, nodes[%d] is %d and color[%d] is %u\n", i, neighborOwner, nodes[neighborOwner], neighbor, color[neighbor]); // } if(color[neighbor] == (unsigned char)theColor) { changeColor[neighborOwner]=1; } } // if(threadIdx.x==0) // { // for(int i=0; i<sizeNode; i++) // printf("changeColor[%d]=%d ", i,changeColor[i] ); // printf("\n"); // } } __global__ void Conflict_assignColor(uint32_t *changeColor, int theColor, unsigned char *color, uint32_t *nodes, uint32_t sizeNode) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=blockDim.x*gridDim.x) { if(changeColor[i]==1) color[nodes[i]] = (unsigned char)theColor; } } __global__ void GenNewNodes(uint32_t *nodes, uint32_t *newNodes, uint32_t *neighLen, uint32_t *newNeighLen, int sizeNode, int *changeColor) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=blockDim.x*gridDim.x) { if(changeColor[i]+1 == changeColor[i+1]) { newNodes[changeColor[i]] = nodes[i]; newNeighLen[changeColor[i]] = neighLen[i]; } } } __global__ void WorkItemRank(int *scan, int *lbs, int *wir, int sizeLbs) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeLbs; i+=blockDim.x*gridDim.x) { wir[i] = i - scan[lbs[i]]; } } __global__ void ResetChangeColor(int sizeNode, uint32_t *changeColor) { for(int i=blockIdx.x*blockDim.x+threadIdx.x; i<sizeNode; i+=blockDim.x*gridDim.x) { changeColor[i]=0; } } int conflict_resolve_forgetabout_sharedmemory1(uint32_t* conflict_color, // Array of conflict vetices grouped by color uint32_t *conflict_color_offset, // offset of different color on conflit_color uint32_t *tr_col_id, // CSR of graph, but only lower triangle part uint32_t *tr_offset, // CSR offset of graph, but only lower triangle part uint32_t numVertices, // number of vertices uint32_t size_tr_col,// size of tr_col_id uint32_t numColor,// number of color has been used unsigned char *color,// color array for all vertices uint32_t colorID, int gridSize, int blockSize ) // working space and the size of this array shoudl be BLOCK_THREADS*ITEM_PER_THREAD, o,w it overwrittern information { standard_context_t context; uint32_t *nodes(NULL), *changeColor(NULL), *nodes1(NULL), *nodes2(NULL), *neighLen1(NULL), *neighLen2(NULL), *neighLen(NULL); uint32_t *newNodes(NULL), *newNeighLen(NULL); int start = conflict_color_offset[colorID]; int end = conflict_color_offset[colorID+1]; int sizeNode = end-start; if(sizeNode==0) return numColor; // std::cout<<"start: "<<start<<" end: "<<end<<" sizeNode: "<<sizeNode <<std::endl; nodes = conflict_color+start; HANDLE_ERROR(cudaMallocManaged(&changeColor, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(cudaMallocManaged(&nodes1, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(cudaMallocManaged(&nodes2, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(cudaMallocManaged(&neighLen1, sizeNode*sizeof(uint32_t))); HANDLE_ERROR(cudaMallocManaged(&neighLen2, sizeNode*sizeof(uint32_t))); int *lbs(NULL), *wir(NULL); int *scanArray(NULL); HANDLE_ERROR(cudaMallocManaged(&scanArray, (sizeNode+1)*sizeof(int))); // std::cout<<"allocate succeed"<<std::endl; // for(int i=0; i<sizeNode; i++) // { // std::cout<<"nodes["<<i<<"]= "<<nodes[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; GetneighLen<<<gridSize, blockSize>>>(nodes, sizeNode, tr_offset, neighLen1, changeColor); cudaDeviceSynchronize(); // for(int i=0; i<sizeNode; i++) // { // std::cout<<"changeColor["<<i<<"]= "<<changeColor[i]<<" "; // } // std::cout<<std::endl; // for(int i=0; i<numVertices; i++) // { // printf("color[%d]=%u ", i, color[i]); // } // std::cout<<std::endl; int choseL = 0; int counter = 1; neighLen = neighLen1; int theColor = colorID; void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, neighLen, scanArray, sizeNode+1); HANDLE_ERROR(cudaMalloc(&d_temp_storage, temp_storage_bytes)); while(true) { // for(int i=0; i<sizeNode; i++) // { // std::cout<<"neighLen["<<i<<"]= "<<neighLen[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, neighLen, scanArray, sizeNode+1); cudaDeviceSynchronize(); // for(int i=0; i<sizeNode+1; i++) // { // std::cout<<"scan_neighLen["<<i<<"]= "<<scanArray[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; int sizeLbs = scanArray[sizeNode]; if(counter == 1) { HANDLE_ERROR(cudaMallocManaged(&lbs, sizeLbs*sizeof(int))); HANDLE_ERROR(cudaMallocManaged(&wir, sizeLbs*sizeof(int))); } load_balance_search(sizeLbs, scanArray, sizeNode, lbs, context); cudaDeviceSynchronize(); // for(int i=0; i<sizeLbs; i++) // { // std::cout<<"lbs["<<i<<"]= "<<lbs[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; WorkItemRank<<<gridSize,blockSize>>>(scanArray, lbs, wir, sizeLbs); cudaDeviceSynchronize(); // for(int i=0; i<sizeLbs; i++) // { // std::cout<<"WIR["<<i<<"]= "<<wir[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; FindChangeColor<<<gridSize,blockSize>>>(changeColor, sizeNode, nodes, wir, lbs, sizeLbs, tr_col_id, tr_offset, theColor, color); cudaDeviceSynchronize(); theColor = numColor+counter; Conflict_assignColor<<<gridSize,blockSize>>>(changeColor, theColor, color, nodes, sizeNode); cudaDeviceSynchronize(); // for(int i=0; i<numVertices; i++) // { // printf("color[%d]=%u ", i, color[i]); // } // std::cout<<std::endl; // std::cout<<std::endl; DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, changeColor, scanArray, sizeNode+1); cudaDeviceSynchronize(); // for(int i=0; i<sizeNode; i++) // { // std::cout<<"changeColor["<<i<<"]= "<<changeColor[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; // for(int i=0; i<sizeNode+1; i++) // { // std::cout<<"scan_changeColor["<<i<<"]= "<<scanArray[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; choseL = choseL^1; if(choseL == 1) { newNeighLen = neighLen2; newNodes = nodes2; } else { newNeighLen = neighLen1; newNodes = nodes1; } // std::cout<<"sizeNode: "<< sizeNode<<std::endl; GenNewNodes<<<gridSize,blockSize>>>(nodes, newNodes, neighLen, newNeighLen, sizeNode, scanArray); cudaDeviceSynchronize(); sizeNode = scanArray[sizeNode]; ResetChangeColor<<<gridSize,blockSize>>>(sizeNode, changeColor); cudaDeviceSynchronize(); if(sizeNode == 0) break; printf("new sizeNode: %d\n", sizeNode); nodes = newNodes; for(int i=0; i<sizeNode; i++) { std::cout<<"newNode["<<i<<"]= "<<nodes[i]<<" "; } std::cout<<std::endl; std::cout<<std::endl; neighLen = newNeighLen; // for(int i=0; i<sizeNode; i++) // { // std::cout<<"newNeighLen["<<i<<"]= "<<neighLen[i]<<" "; // } // std::cout<<std::endl; // std::cout<<std::endl; // std::cout<<"counter: "<<counter<<std::endl; counter++; // std::cout<<std::endl; // std::cout<<std::endl; } std::cout<<counter-1<<" color is added, total number of color is "<<theColor-1<<std::endl; cudaFree(nodes1); cudaFree(nodes2); cudaFree(neighLen1); cudaFree(neighLen2); cudaFree(changeColor); cudaFree(scanArray); cudaFree(d_temp_storage); cudaFree(lbs); cudaFree(wir); return theColor-1; }
6b2ff27d3659f51258bc0db8353a02a99613d97f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float* var_13,float* var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) { float tmp_1 = (var_3 * +0.0f * var_4); comp += tmp_1 * var_5 * +1.7524E35f * var_6; comp = +1.1666E-37f + (-1.7689E-36f + var_7 + -0.0f + +1.9863E-37f); for (int i=0; i < var_1; ++i) { comp = asinf((-1.1271E-35f + var_8 - var_9 / -1.2014E-25f)); float tmp_2 = -0.0f; comp = tmp_2 / (var_10 * var_11 + (-1.8626E-41f / (+0.0f - var_12))); } for (int i=0; i < var_2; ++i) { var_13[i] = -1.4305E-43f; var_14[i] = asinf(asinf(-1.9549E-35f / (var_15 * (var_16 * -1.1807E35f * coshf(-1.8153E-26f / -1.4167E15f / atanf(+1.4834E-44f)))))); comp += var_14[i] - var_13[i] - +1.6842E26f / sinhf(sinhf(fmodf(atanf(var_17 * var_18), -1.0377E-42f / atanf(+1.2541E-43f + (var_19 / var_20))))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float* tmp_14 = initPointer( atof(argv[14]) ); float* tmp_15 = initPointer( atof(argv[15]) ); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21); hipDeviceSynchronize(); return 0; }
6b2ff27d3659f51258bc0db8353a02a99613d97f.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float* var_13,float* var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) { float tmp_1 = (var_3 * +0.0f * var_4); comp += tmp_1 * var_5 * +1.7524E35f * var_6; comp = +1.1666E-37f + (-1.7689E-36f + var_7 + -0.0f + +1.9863E-37f); for (int i=0; i < var_1; ++i) { comp = asinf((-1.1271E-35f + var_8 - var_9 / -1.2014E-25f)); float tmp_2 = -0.0f; comp = tmp_2 / (var_10 * var_11 + (-1.8626E-41f / (+0.0f - var_12))); } for (int i=0; i < var_2; ++i) { var_13[i] = -1.4305E-43f; var_14[i] = asinf(asinf(-1.9549E-35f / (var_15 * (var_16 * -1.1807E35f * coshf(-1.8153E-26f / -1.4167E15f / atanf(+1.4834E-44f)))))); comp += var_14[i] - var_13[i] - +1.6842E26f / sinhf(sinhf(fmodf(atanf(var_17 * var_18), -1.0377E-42f / atanf(+1.2541E-43f + (var_19 / var_20))))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float* tmp_14 = initPointer( atof(argv[14]) ); float* tmp_15 = initPointer( atof(argv[15]) ); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21); cudaDeviceSynchronize(); return 0; }
66ce1f044e90d9c8ab1ba1c04824926bd839958e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> // You can use any other block size you wish. #define BLOCK_SIZE 256 #define DEFAULT_NUM_ELEMENTS 16777216 #define MAX_RAND 2 int LOG_BLOCK_SIZE; void getLogBlockSize(int block_size) { for(LOG_BLOCK_SIZE = 1;LOG_BLOCK_SIZE < 31; LOG_BLOCK_SIZE++) { if((1<<LOG_BLOCK_SIZE) >= block_size) return; } fprintf(stderr, "The size requested might be too large!\n"); exit(-1); } __global__ void kernel_reduction(float *inArray, int numElements, int stride, int numRest) { int tid = threadIdx.x; int bidx = blockIdx.x, bidy = blockIdx.y; int idx = tid + blockDim.x * bidx + blockDim.x * gridDim.x * bidy; __shared__ float idata[(BLOCK_SIZE << 1)+256]; int copyIdx = stride * ((idx << 1) + 1) - 1; int copyToIdx = tid<<1; copyToIdx += (copyToIdx>>4); idata[copyToIdx] = inArray[copyIdx]; idata[copyToIdx+1] = idata[copyToIdx] + inArray[copyIdx + stride]; __syncthreads(); int localStride = 2; for(numRest>>=1;numRest > 1; numRest >>= 1, localStride <<= 1) { if((tid<<1) < numRest) { int idxOne = (localStride << 1) * (tid + 1) - 1; int idxTwo = idxOne - localStride; idxOne += (idxOne >> 4); idxTwo += (idxTwo >> 4); idata[idxOne] += idata[idxTwo]; } __syncthreads(); } inArray[copyIdx] = idata[copyToIdx]; inArray[copyIdx+stride] = idata[copyToIdx+1]; } __global__ void kernel_downtraverse(float *inArray, int numElements, int startStride, int LOG_BLOCK_SIZE) { int tid = threadIdx.x; int bidx = blockIdx.x, bidy = blockIdx.y; int idx = tid + blockDim.x * bidx + blockDim.x * gridDim.x * bidy; int finalStride = (startStride >> LOG_BLOCK_SIZE); if(finalStride <= 0) finalStride = 1; if((startStride << 1) == numElements) { __shared__ float idata[(BLOCK_SIZE<<1)+256]; int copyIdx = finalStride * ((idx << 1) + 1) - 1; int copyToIdx = (tid<<1); copyToIdx += (copyToIdx>>4); if(copyIdx < numElements){ idata[copyToIdx] = inArray[copyIdx]; idata[copyToIdx + 1] = inArray[copyIdx+finalStride]; } __syncthreads(); int localStride = blockDim.x; while(localStride >= 1) { int idxOne = (localStride << 1) * (tid + 1) - 1; if(idxOne < (blockDim.x<<1)) { int idxTwo = idxOne - localStride; idxOne += (idxOne>>4); idxTwo += (idxTwo>>4); float tmp = idata[idxOne] + idata[idxTwo]; idata[idxTwo] = idata[idxOne]; idata[idxOne] = tmp; } localStride >>= 1; __syncthreads(); } if(copyIdx < numElements) { inArray[copyIdx] = idata[copyToIdx]; inArray[copyIdx+finalStride] = idata[copyToIdx+1]; } } else { int stride = startStride; int idxOne = (stride << 1) * (idx + 1) - 1; if(idxOne < numElements) { int idxTwo = idxOne - stride; float tmp = inArray[idxOne] + inArray[idxTwo]; inArray[idxTwo] = inArray[idxOne]; inArray[idxOne] = tmp; } } } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. void prescanArray(float *inArray, int numElements) { thrust::device_ptr<float> dev_ptr(inArray); thrust::exclusive_scan(dev_ptr, dev_ptr + numElements, dev_ptr); } // **===-----------------------------------------------------------===** //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); unsigned getSmallestPower2(unsigned); unsigned int cutComparef( float *reference, float *h_data, int num_elements, float err); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// // Get the power of 2 which is the least of the all powers that are not smaller // than the given number //////////////////////////////////////////////////////////////////////////////// int getSmallestPower2(int num) { int result = 1; while(result < num && result > 0) result <<= 1; if(result <= 0 || num <= 0) { fprintf(stderr, "The size requested might be two large!\n"); exit(-1); } return result; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float device_time; float host_time; int num_elements = 0; // Must support large, non-power-of-2 arrays int compare_size = 0; // allocate host memory to store the input data unsigned int mem_size = sizeof( float) * num_elements; float* h_data = NULL; // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicates the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch(argc-1) { default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements if(argc <= 1) compare_size = num_elements = DEFAULT_NUM_ELEMENTS; else compare_size = num_elements = atoi(argv[1]); int tmp_size = num_elements; num_elements = getSmallestPower2(num_elements); // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; //h_data = (float*) malloc( mem_size); hipHostMalloc(&h_data, mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = 0.0f; } for( unsigned int i = 0; i < tmp_size; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND)*2 - 1; } break; } getLogBlockSize(BLOCK_SIZE); hipEvent_t time_start; hipEvent_t time_end; hipEventCreate(&time_start); hipEventCreate(&time_end); // compute reference solution float* reference = (float*) malloc( mem_size); hipEventRecord(time_start, 0); computeGold( reference, h_data, num_elements); hipEventRecord(time_end, 0); hipEventSynchronize(time_end); hipEventElapsedTime(&host_time, time_start, time_end); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", host_time); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; hipMalloc( (void**) &d_idata, mem_size); hipMalloc( (void**) &d_odata, mem_size); // **===-------- Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray(d_idata, 16); // Run the prescan hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice); hipEventRecord(time_start, 0); // **===-------- Modify the body of this function -----------===** prescanArray(d_idata, num_elements); // **===-----------------------------------------------------------===** hipDeviceSynchronize(); hipEventRecord(time_end, 0); hipEventSynchronize(time_end); hipEventElapsedTime(&device_time, time_start, time_end); // copy result from device to host hipMemcpy( h_data, d_idata, sizeof(float) * compare_size, hipMemcpyDeviceToHost); printf("CUDA Processing time: %f (ms)\n", device_time); printf("Speedup: %fX\n", host_time/device_time); // **===-------- Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, compare_size, 1e-6); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory hipHostFree(h_data); free( reference); hipFree( d_odata); hipFree( d_idata); printf("------------------------------------------------------\n\n"); } unsigned int cutComparef( float *reference, float *h_data, int num_elements, float err) { int i; int diff_count = 0; for (i = 0; i < num_elements; i++) { float diff = fabs(reference[i] - h_data[i]); float denominator = 1.f; if (denominator < fabs(reference[i])) { denominator = fabs(reference[i]); } if (i % 1000000 == 0) { //printf("Diff at %d: %g %g\n", i, diff, diff / denominator); } if (!(diff / denominator < err)) { //printf("Diff at %d: %g %g\n", i, diff, diff / denominator); getchar(); diff_count ++; } } if (diff_count > 0) { printf("Number of difference: %d\n", diff_count); return 0; } else { return 1; } }
66ce1f044e90d9c8ab1ba1c04824926bd839958e.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> // You can use any other block size you wish. #define BLOCK_SIZE 256 #define DEFAULT_NUM_ELEMENTS 16777216 #define MAX_RAND 2 int LOG_BLOCK_SIZE; void getLogBlockSize(int block_size) { for(LOG_BLOCK_SIZE = 1;LOG_BLOCK_SIZE < 31; LOG_BLOCK_SIZE++) { if((1<<LOG_BLOCK_SIZE) >= block_size) return; } fprintf(stderr, "The size requested might be too large!\n"); exit(-1); } __global__ void kernel_reduction(float *inArray, int numElements, int stride, int numRest) { int tid = threadIdx.x; int bidx = blockIdx.x, bidy = blockIdx.y; int idx = tid + blockDim.x * bidx + blockDim.x * gridDim.x * bidy; __shared__ float idata[(BLOCK_SIZE << 1)+256]; int copyIdx = stride * ((idx << 1) + 1) - 1; int copyToIdx = tid<<1; copyToIdx += (copyToIdx>>4); idata[copyToIdx] = inArray[copyIdx]; idata[copyToIdx+1] = idata[copyToIdx] + inArray[copyIdx + stride]; __syncthreads(); int localStride = 2; for(numRest>>=1;numRest > 1; numRest >>= 1, localStride <<= 1) { if((tid<<1) < numRest) { int idxOne = (localStride << 1) * (tid + 1) - 1; int idxTwo = idxOne - localStride; idxOne += (idxOne >> 4); idxTwo += (idxTwo >> 4); idata[idxOne] += idata[idxTwo]; } __syncthreads(); } inArray[copyIdx] = idata[copyToIdx]; inArray[copyIdx+stride] = idata[copyToIdx+1]; } __global__ void kernel_downtraverse(float *inArray, int numElements, int startStride, int LOG_BLOCK_SIZE) { int tid = threadIdx.x; int bidx = blockIdx.x, bidy = blockIdx.y; int idx = tid + blockDim.x * bidx + blockDim.x * gridDim.x * bidy; int finalStride = (startStride >> LOG_BLOCK_SIZE); if(finalStride <= 0) finalStride = 1; if((startStride << 1) == numElements) { __shared__ float idata[(BLOCK_SIZE<<1)+256]; int copyIdx = finalStride * ((idx << 1) + 1) - 1; int copyToIdx = (tid<<1); copyToIdx += (copyToIdx>>4); if(copyIdx < numElements){ idata[copyToIdx] = inArray[copyIdx]; idata[copyToIdx + 1] = inArray[copyIdx+finalStride]; } __syncthreads(); int localStride = blockDim.x; while(localStride >= 1) { int idxOne = (localStride << 1) * (tid + 1) - 1; if(idxOne < (blockDim.x<<1)) { int idxTwo = idxOne - localStride; idxOne += (idxOne>>4); idxTwo += (idxTwo>>4); float tmp = idata[idxOne] + idata[idxTwo]; idata[idxTwo] = idata[idxOne]; idata[idxOne] = tmp; } localStride >>= 1; __syncthreads(); } if(copyIdx < numElements) { inArray[copyIdx] = idata[copyToIdx]; inArray[copyIdx+finalStride] = idata[copyToIdx+1]; } } else { int stride = startStride; int idxOne = (stride << 1) * (idx + 1) - 1; if(idxOne < numElements) { int idxTwo = idxOne - stride; float tmp = inArray[idxOne] + inArray[idxTwo]; inArray[idxTwo] = inArray[idxOne]; inArray[idxOne] = tmp; } } } // **===-------- Modify the body of this function -----------===** // You may need to make multiple kernel calls. void prescanArray(float *inArray, int numElements) { thrust::device_ptr<float> dev_ptr(inArray); thrust::exclusive_scan(dev_ptr, dev_ptr + numElements, dev_ptr); } // **===-----------------------------------------------------------===** //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); unsigned getSmallestPower2(unsigned); unsigned int cutComparef( float *reference, float *h_data, int num_elements, float err); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// // Get the power of 2 which is the least of the all powers that are not smaller // than the given number //////////////////////////////////////////////////////////////////////////////// int getSmallestPower2(int num) { int result = 1; while(result < num && result > 0) result <<= 1; if(result <= 0 || num <= 0) { fprintf(stderr, "The size requested might be two large!\n"); exit(-1); } return result; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { float device_time; float host_time; int num_elements = 0; // Must support large, non-power-of-2 arrays int compare_size = 0; // allocate host memory to store the input data unsigned int mem_size = sizeof( float) * num_elements; float* h_data = NULL; // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicates the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch(argc-1) { default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements if(argc <= 1) compare_size = num_elements = DEFAULT_NUM_ELEMENTS; else compare_size = num_elements = atoi(argv[1]); int tmp_size = num_elements; num_elements = getSmallestPower2(num_elements); // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; //h_data = (float*) malloc( mem_size); cudaMallocHost(&h_data, mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = 0.0f; } for( unsigned int i = 0; i < tmp_size; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND)*2 - 1; } break; } getLogBlockSize(BLOCK_SIZE); cudaEvent_t time_start; cudaEvent_t time_end; cudaEventCreate(&time_start); cudaEventCreate(&time_end); // compute reference solution float* reference = (float*) malloc( mem_size); cudaEventRecord(time_start, 0); computeGold( reference, h_data, num_elements); cudaEventRecord(time_end, 0); cudaEventSynchronize(time_end); cudaEventElapsedTime(&host_time, time_start, time_end); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", host_time); // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; cudaMalloc( (void**) &d_idata, mem_size); cudaMalloc( (void**) &d_odata, mem_size); // **===-------- Allocate data structure here -----------===** // preallocBlockSums(num_elements); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray(d_idata, 16); // Run the prescan cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice); cudaEventRecord(time_start, 0); // **===-------- Modify the body of this function -----------===** prescanArray(d_idata, num_elements); // **===-----------------------------------------------------------===** cudaThreadSynchronize(); cudaEventRecord(time_end, 0); cudaEventSynchronize(time_end); cudaEventElapsedTime(&device_time, time_start, time_end); // copy result from device to host cudaMemcpy( h_data, d_idata, sizeof(float) * compare_size, cudaMemcpyDeviceToHost); printf("CUDA Processing time: %f (ms)\n", device_time); printf("Speedup: %fX\n", host_time/device_time); // **===-------- Deallocate data structure here -----------===** // deallocBlockSums(); // **===-----------------------------------------------------------===** // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, compare_size, 1e-6); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory cudaFreeHost(h_data); free( reference); cudaFree( d_odata); cudaFree( d_idata); printf("------------------------------------------------------\n\n"); } unsigned int cutComparef( float *reference, float *h_data, int num_elements, float err) { int i; int diff_count = 0; for (i = 0; i < num_elements; i++) { float diff = fabs(reference[i] - h_data[i]); float denominator = 1.f; if (denominator < fabs(reference[i])) { denominator = fabs(reference[i]); } if (i % 1000000 == 0) { //printf("Diff at %d: %g %g\n", i, diff, diff / denominator); } if (!(diff / denominator < err)) { //printf("Diff at %d: %g %g\n", i, diff, diff / denominator); getchar(); diff_count ++; } } if (diff_count > 0) { printf("Number of difference: %d\n", diff_count); return 0; } else { return 1; } }
d43ea4f1a0878019413ed847cb51a76e6d7c86bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> #include <gpu_error.cuh> __global__ void saxpy(int n, float a, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = index; i < n; i += stride) { y[i] = a * x[i] + y[i]; } } int main(void) { int N = 1 << 20; float *x, *y; // Allocate unified memory accessible from host and device errchk( hipMallocManaged(&x, N * sizeof(float)) ); errchk( hipMallocManaged(&y, N * sizeof(float)) ); for (int i = 0; i < N; ++i) { x[i] = 3.0f; y[i] = 4.0f; } int blockSize = 1 << 8; int numBlocks = 1 << 10; hipLaunchKernelGGL(( saxpy), dim3(numBlocks), dim3(blockSize), 0, 0, N, 2.0f, x, y); errchk( hipPeekAtLastError() ); errchk( hipDeviceSynchronize() ); float max_error = 0.0f; float expected = 10.0f; for (int i = 0; i < N; ++i) { max_error = max(max_error, fabs(y[i] - expected)); } printf("Max error: %.5f\n", max_error); // Free memory errchk( hipFree(x) ); errchk( hipFree(y) ); }
d43ea4f1a0878019413ed847cb51a76e6d7c86bd.cu
#include <stdio.h> #include <math.h> #include <gpu_error.cuh> __global__ void saxpy(int n, float a, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = index; i < n; i += stride) { y[i] = a * x[i] + y[i]; } } int main(void) { int N = 1 << 20; float *x, *y; // Allocate unified memory accessible from host and device errchk( cudaMallocManaged(&x, N * sizeof(float)) ); errchk( cudaMallocManaged(&y, N * sizeof(float)) ); for (int i = 0; i < N; ++i) { x[i] = 3.0f; y[i] = 4.0f; } int blockSize = 1 << 8; int numBlocks = 1 << 10; saxpy<<<numBlocks, blockSize>>>(N, 2.0f, x, y); errchk( cudaPeekAtLastError() ); errchk( cudaDeviceSynchronize() ); float max_error = 0.0f; float expected = 10.0f; for (int i = 0; i < N; ++i) { max_error = max(max_error, fabs(y[i] - expected)); } printf("Max error: %.5f\n", max_error); // Free memory errchk( cudaFree(x) ); errchk( cudaFree(y) ); }
168c40f49e00cd0d0b08f76b95bc18608621d6d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./argsort.cuh" #include "./bitonic_sort.cuh" #include "megdnn/basic_types.h" #include "src/cuda/utils.cuh" #include "src/cuda/hipcub/hipcub.hpp" #include "src/cuda/cub/device/device_segmented_radix_sort.cuh" #include "src/cuda/kernel_common/diagnostic_prologue.cuh" using namespace megdnn; using namespace cuda; namespace { struct StridedOffsetIterator { int bias, stride; StridedOffsetIterator(int bias_, int stride_) : bias(bias_), stride(stride_) {} __device__ __forceinline__ int operator[](int i) const { return stride * i + bias; } }; bool use_bitonic(uint32_t /*M*/, uint32_t N) { // bitonic sort is preferred when N is small (alwyas faster than radix sort) return N <= BITONIC_SORT_MAX_LENGTH; } bool use_segmented(uint32_t M, uint32_t /*N*/) { // an empirical value: // sort(1, 1e6): 0.574ms // segsort({1,2,8,16}, 1e6): 7-8ms // sort(1, 1e7): 3.425ms // segsort({1,2,8,16}, 1e7): 71-84ms // // segsort is about 7x-10x slower than sort on small batches, so we can // expect it to be faster than sort when batch is large enough. return M >= 8; } __global__ void kern_arange(int* dst, uint32_t n, uint32_t mod) { uint32_t i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { dst[i] = i % mod; } } template <typename ctype> size_t get_sort_workspace(uint32_t M, uint32_t N, bool is_ascending) { if (use_bitonic(M, N)) { return 0; } return argsort::cub_sort_pairs<ctype, int>( is_ascending, NULL, 0, NULL, NULL, NULL, NULL, M, N, 0, sizeof(float) * 8, NULL); } } // anonymous namespace template <typename KeyType, typename ValueType> MEGDNN_NOINLINE size_t argsort::cub_sort_pairs( bool is_ascending, void* workspace, size_t workspace_size, const KeyType* keys_in, KeyType* keys_out, const ValueType* values_in, ValueType* values_out, uint32_t M, uint32_t N, int begin_bit, int end_bit, hipStream_t stream) { hipError_t err; if (use_segmented(M, N)) { if (is_ascending) { err = hipcub::DeviceSegmentedRadixSort::SortPairs( workspace, workspace_size, keys_in, keys_out, values_in, values_out, N * M, M, StridedOffsetIterator(0, N), StridedOffsetIterator(N, N), begin_bit, end_bit, stream); cuda_check(err); } else { err = hipcub::DeviceSegmentedRadixSort::SortPairsDescending( workspace, workspace_size, keys_in, keys_out, values_in, values_out, N * M, M, StridedOffsetIterator(0, N), StridedOffsetIterator(N, N), begin_bit, end_bit, stream); cuda_check(err); } } else { if (is_ascending) { for (size_t i = 0; i < M; ++i) { err = hipcub::DeviceRadixSort::SortPairs( workspace, workspace_size, keys_in + N * i, keys_out + N * i, values_in + N * i, values_out + N * i, N, begin_bit, end_bit, stream); cuda_check(err); if (!keys_in) { return workspace_size; } } } else { for (size_t i = 0; i < M; ++i) { err = hipcub::DeviceRadixSort::SortPairsDescending( workspace, workspace_size, keys_in + N * i, keys_out + N * i, values_in + N * i, values_out + N * i, N, begin_bit, end_bit, stream); cuda_check(err); if (!keys_in) { return workspace_size; } } } } return workspace_size; } size_t argsort::get_fwd_workspace_in_bytes( uint32_t M, uint32_t N, DType dtype, bool is_ascending, bool iptr_src_given) { size_t size = 0; switch (dtype.enumv().ev) { #define cb(ctype) \ case DTypeTrait<ctype>::enumv: \ size = get_sort_workspace<ctype>(M, N, is_ascending); \ break; ARGSORT_FOREACH_CTYPE(cb) #undef cb default: megdnn_throw("argsort only supports float, int32 and float16"); } if (!iptr_src_given) { size = DIVUP(size, sizeof(float)) * sizeof(float) + M * N * sizeof(int); } return size; } template <typename dtype> void argsort::forward( const dtype* sptr, dtype* dptr, int* iptr, void* workspace, uint32_t M, uint32_t N, bool is_ascending, hipStream_t stream, const int* iptr_src) { size_t wk_size = get_sort_workspace<dtype>(M, N, is_ascending); if (!iptr_src) { int* ptr = reinterpret_cast<int*>( static_cast<uint8_t*>(workspace) + DIVUP(wk_size, sizeof(float)) * sizeof(float)); hipLaunchKernelGGL(( kern_arange), dim3(DIVUP(N * M, 512)), dim3(512), 0, stream, ptr, M * N, N); iptr_src = ptr; } if (use_bitonic(M, N)) { cuda_check( bitonic_sort(M, N, sptr, iptr_src, dptr, iptr, is_ascending, stream)); } else { cub_sort_pairs( is_ascending, workspace, wk_size, sptr, dptr, iptr_src, iptr, M, N, 0, sizeof(float) * 8, stream); } } namespace megdnn { namespace cuda { #define INST_CUB_SORT(dtype) \ template MEGDNN_NOINLINE size_t argsort::cub_sort_pairs<dtype, dtype>( \ bool, void*, size_t, const dtype*, dtype*, const dtype*, dtype*, uint32_t, \ uint32_t, int, int, hipStream_t); #define INST_FORWARD(dtype) \ template void argsort::forward<dtype>( \ const dtype*, dtype*, int*, void*, uint32_t, uint32_t, bool, hipStream_t, \ const int*); ARGSORT_FOREACH_CTYPE(INST_FORWARD) INST_CUB_SORT(uint32_t) INST_CUB_SORT(uint64_t) #undef INST_CUB_SORT #undef INST_FORWARD } // namespace cuda } // namespace megdnn #include "src/cuda/kernel_common/diagnostic_epilogue.cuh" // vim: ft=cuda syntax=cuda.doxygen
168c40f49e00cd0d0b08f76b95bc18608621d6d5.cu
#include "./argsort.cuh" #include "./bitonic_sort.cuh" #include "megdnn/basic_types.h" #include "src/cuda/utils.cuh" #include "src/cuda/cub/device/device_radix_sort.cuh" #include "src/cuda/cub/device/device_segmented_radix_sort.cuh" #include "src/cuda/kernel_common/diagnostic_prologue.cuh" using namespace megdnn; using namespace cuda; namespace { struct StridedOffsetIterator { int bias, stride; StridedOffsetIterator(int bias_, int stride_) : bias(bias_), stride(stride_) {} __device__ __forceinline__ int operator[](int i) const { return stride * i + bias; } }; bool use_bitonic(uint32_t /*M*/, uint32_t N) { // bitonic sort is preferred when N is small (alwyas faster than radix sort) return N <= BITONIC_SORT_MAX_LENGTH; } bool use_segmented(uint32_t M, uint32_t /*N*/) { // an empirical value: // sort(1, 1e6): 0.574ms // segsort({1,2,8,16}, 1e6): 7-8ms // sort(1, 1e7): 3.425ms // segsort({1,2,8,16}, 1e7): 71-84ms // // segsort is about 7x-10x slower than sort on small batches, so we can // expect it to be faster than sort when batch is large enough. return M >= 8; } __global__ void kern_arange(int* dst, uint32_t n, uint32_t mod) { uint32_t i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { dst[i] = i % mod; } } template <typename ctype> size_t get_sort_workspace(uint32_t M, uint32_t N, bool is_ascending) { if (use_bitonic(M, N)) { return 0; } return argsort::cub_sort_pairs<ctype, int>( is_ascending, NULL, 0, NULL, NULL, NULL, NULL, M, N, 0, sizeof(float) * 8, NULL); } } // anonymous namespace template <typename KeyType, typename ValueType> MEGDNN_NOINLINE size_t argsort::cub_sort_pairs( bool is_ascending, void* workspace, size_t workspace_size, const KeyType* keys_in, KeyType* keys_out, const ValueType* values_in, ValueType* values_out, uint32_t M, uint32_t N, int begin_bit, int end_bit, cudaStream_t stream) { cudaError_t err; if (use_segmented(M, N)) { if (is_ascending) { err = cub::DeviceSegmentedRadixSort::SortPairs( workspace, workspace_size, keys_in, keys_out, values_in, values_out, N * M, M, StridedOffsetIterator(0, N), StridedOffsetIterator(N, N), begin_bit, end_bit, stream); cuda_check(err); } else { err = cub::DeviceSegmentedRadixSort::SortPairsDescending( workspace, workspace_size, keys_in, keys_out, values_in, values_out, N * M, M, StridedOffsetIterator(0, N), StridedOffsetIterator(N, N), begin_bit, end_bit, stream); cuda_check(err); } } else { if (is_ascending) { for (size_t i = 0; i < M; ++i) { err = cub::DeviceRadixSort::SortPairs( workspace, workspace_size, keys_in + N * i, keys_out + N * i, values_in + N * i, values_out + N * i, N, begin_bit, end_bit, stream); cuda_check(err); if (!keys_in) { return workspace_size; } } } else { for (size_t i = 0; i < M; ++i) { err = cub::DeviceRadixSort::SortPairsDescending( workspace, workspace_size, keys_in + N * i, keys_out + N * i, values_in + N * i, values_out + N * i, N, begin_bit, end_bit, stream); cuda_check(err); if (!keys_in) { return workspace_size; } } } } return workspace_size; } size_t argsort::get_fwd_workspace_in_bytes( uint32_t M, uint32_t N, DType dtype, bool is_ascending, bool iptr_src_given) { size_t size = 0; switch (dtype.enumv().ev) { #define cb(ctype) \ case DTypeTrait<ctype>::enumv: \ size = get_sort_workspace<ctype>(M, N, is_ascending); \ break; ARGSORT_FOREACH_CTYPE(cb) #undef cb default: megdnn_throw("argsort only supports float, int32 and float16"); } if (!iptr_src_given) { size = DIVUP(size, sizeof(float)) * sizeof(float) + M * N * sizeof(int); } return size; } template <typename dtype> void argsort::forward( const dtype* sptr, dtype* dptr, int* iptr, void* workspace, uint32_t M, uint32_t N, bool is_ascending, cudaStream_t stream, const int* iptr_src) { size_t wk_size = get_sort_workspace<dtype>(M, N, is_ascending); if (!iptr_src) { int* ptr = reinterpret_cast<int*>( static_cast<uint8_t*>(workspace) + DIVUP(wk_size, sizeof(float)) * sizeof(float)); kern_arange<<<DIVUP(N * M, 512), 512, 0, stream>>>(ptr, M * N, N); iptr_src = ptr; } if (use_bitonic(M, N)) { cuda_check( bitonic_sort(M, N, sptr, iptr_src, dptr, iptr, is_ascending, stream)); } else { cub_sort_pairs( is_ascending, workspace, wk_size, sptr, dptr, iptr_src, iptr, M, N, 0, sizeof(float) * 8, stream); } } namespace megdnn { namespace cuda { #define INST_CUB_SORT(dtype) \ template MEGDNN_NOINLINE size_t argsort::cub_sort_pairs<dtype, dtype>( \ bool, void*, size_t, const dtype*, dtype*, const dtype*, dtype*, uint32_t, \ uint32_t, int, int, cudaStream_t); #define INST_FORWARD(dtype) \ template void argsort::forward<dtype>( \ const dtype*, dtype*, int*, void*, uint32_t, uint32_t, bool, cudaStream_t, \ const int*); ARGSORT_FOREACH_CTYPE(INST_FORWARD) INST_CUB_SORT(uint32_t) INST_CUB_SORT(uint64_t) #undef INST_CUB_SORT #undef INST_FORWARD } // namespace cuda } // namespace megdnn #include "src/cuda/kernel_common/diagnostic_epilogue.cuh" // vim: ft=cuda syntax=cuda.doxygen
43f768cfe599b760ef0004d9697d27e22747f56e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> extern "C" { void * alloc_gpu_mem( size_t N) { void*d; int size = N *sizeof(float); int err; err = hipMalloc(&d, size); if (err != 0) printf("cuda malloc error: %d\n", err); return d; }} // see kernels.cu for launch_kernel functions extern "C" { void host2gpu(float * a, void * da, size_t N) { int size = N * sizeof(float); int err; err = hipMemcpy(da, a, size, hipMemcpyHostToDevice); if (err != 0) printf("load mem: %d\n", err); }} extern "C"{ void gpu2host(float *c, void *d_c, size_t N) { hipError_t err; int size = N*sizeof(float); // copy result back err = hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); if (err != 0) {printf("cpy mem back %d\n", err); //hipError_t hipGetLastError(void); printf("%s\n", hipGetErrorString(hipGetLastError())); } }} extern "C"{ void free_gpu_mem(void *d) { hipFree(d); }} extern "C"{ void free_mem(void *d) { free(d); }} extern "C"{ void get_cuda_info() { int count, i; const int kb = 1024; const int mb = kb*kb; hipGetDeviceCount(&count); for(i=0; i<count;i++) { hipDeviceProp_t props; hipGetDeviceProperties(&props, i); printf("\nDevice Details:\n"); printf("%d : %s : %d : %d\n", i, props.name, props.major, props.minor); printf("Number of Processors: %d\n", props.multiProcessorCount); printf("Global Memory: %f mb\n", (float) props.totalGlobalMem /mb); printf("Shared Memory: %f kb \n", (float) props.sharedMemPerBlock / kb); printf("Constant Memory: %f kb\n", (float) props.totalConstMem / kb); printf("Block registers: %d\n", props.regsPerBlock); printf("Warp size: %d\n", props.warpSize); printf("Threads per block: %d\n", props.maxThreadsPerBlock); printf("Max block dimensions: [%d, %d, %d]\n", props.maxThreadsDim[0], props.maxThreadsDim[1], props.maxThreadsDim[2]); printf("Max grid dimensions: [%d, %d, %d]\n", props.maxGridSize[0], props.maxGridSize[1], props.maxGridSize[2]); printf("Clock Rate: %d\n", props.memoryClockRate); printf("Memory Bus Widths %d\n", props.memoryBusWidth); printf("\n"); } }} extern "C"{ void distance3D(float *x, float *y, float *dist, size_t nx, size_t ny, size_t T, size_t k) { int n, m, i, j; float d, d_; for (n=0; n<nx; n++){ for (m=0; m<ny; m++){ d = 0; for (i=0; i<T; i++){ d_ = 0; for (j =0; j<k; j++){ d_ += pow((x[n*T*k + i*k + j] - y[m*T*k + i*k + j]), 2); } d += sqrt(d_); } dist[n*ny + m] = d; } } }}
43f768cfe599b760ef0004d9697d27e22747f56e.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> extern "C" { void * alloc_gpu_mem( size_t N) { void*d; int size = N *sizeof(float); int err; err = cudaMalloc(&d, size); if (err != 0) printf("cuda malloc error: %d\n", err); return d; }} // see kernels.cu for launch_kernel functions extern "C" { void host2gpu(float * a, void * da, size_t N) { int size = N * sizeof(float); int err; err = cudaMemcpy(da, a, size, cudaMemcpyHostToDevice); if (err != 0) printf("load mem: %d\n", err); }} extern "C"{ void gpu2host(float *c, void *d_c, size_t N) { cudaError_t err; int size = N*sizeof(float); // copy result back err = cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); if (err != 0) {printf("cpy mem back %d\n", err); //cudaError_t cudaGetLastError(void); printf("%s\n", cudaGetErrorString(cudaGetLastError())); } }} extern "C"{ void free_gpu_mem(void *d) { cudaFree(d); }} extern "C"{ void free_mem(void *d) { free(d); }} extern "C"{ void get_cuda_info() { int count, i; const int kb = 1024; const int mb = kb*kb; cudaGetDeviceCount(&count); for(i=0; i<count;i++) { cudaDeviceProp props; cudaGetDeviceProperties(&props, i); printf("\nDevice Details:\n"); printf("%d : %s : %d : %d\n", i, props.name, props.major, props.minor); printf("Number of Processors: %d\n", props.multiProcessorCount); printf("Global Memory: %f mb\n", (float) props.totalGlobalMem /mb); printf("Shared Memory: %f kb \n", (float) props.sharedMemPerBlock / kb); printf("Constant Memory: %f kb\n", (float) props.totalConstMem / kb); printf("Block registers: %d\n", props.regsPerBlock); printf("Warp size: %d\n", props.warpSize); printf("Threads per block: %d\n", props.maxThreadsPerBlock); printf("Max block dimensions: [%d, %d, %d]\n", props.maxThreadsDim[0], props.maxThreadsDim[1], props.maxThreadsDim[2]); printf("Max grid dimensions: [%d, %d, %d]\n", props.maxGridSize[0], props.maxGridSize[1], props.maxGridSize[2]); printf("Clock Rate: %d\n", props.memoryClockRate); printf("Memory Bus Widths %d\n", props.memoryBusWidth); printf("\n"); } }} extern "C"{ void distance3D(float *x, float *y, float *dist, size_t nx, size_t ny, size_t T, size_t k) { int n, m, i, j; float d, d_; for (n=0; n<nx; n++){ for (m=0; m<ny; m++){ d = 0; for (i=0; i<T; i++){ d_ = 0; for (j =0; j<k; j++){ d_ += pow((x[n*T*k + i*k + j] - y[m*T*k + i*k + j]), 2); } d += sqrt(d_); } dist[n*ny + m] = d; } } }}
d8844cf3c74ee005539426d699f32486aaae4204.hip
// !!! This is a file automatically generated by hipify!!! #include<iostream> #include<complex> #include<stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #include <math.h> #include<hiprand/hiprand.h> #include<hiprand/hiprand_kernel.h> #include "stopwatch.hpp" stopwatch<std::milli, float> sw; #define RADIUS 3 #define FRAME_SIZE 4096*8*7 #define NBPSC 2 #define NSC 64 #define NCBPS 128 typedef std::complex<double> Complex; using namespace std; __global__ void init(unsigned int seed, hiprandState_t* states) { /* we have to initialize the state */ hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ blockIdx.x*blockDim.x+threadIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[blockIdx.x]); } /* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */ __global__ void randoms(hiprandState_t* states, int* numbers) { /* hiprand works like rand - except that it takes a state as a parameter */ numbers[blockIdx.x*blockDim.x+threadIdx.x] = hiprand(&states[blockIdx.x*blockDim.x+threadIdx.x]) % 2; } __global__ void scramble(int* numbers,int *scrambler_bits) { /* hiprand works like rand - except that it takes a state as a parameter */ numbers[blockIdx.x*blockDim.x+threadIdx.x] ^=scrambler_bits[ (blockIdx.x*blockDim.x+threadIdx.x) % 128]; } int checkResults(int startElem, int endElem, float* cudaRes, float* res) { int nDiffs=0; const float smallVal = 0.0001f; for(int i=startElem; i<endElem; i++) { if(fabs(cudaRes[i]-res[i])>smallVal) { nDiffs++; std::cout << i << std::endl; } } return nDiffs; } void initializeWeights(float* weights, int rad) { // for now hardcoded for RADIUS=3 weights[0] = 0.50f; weights[1] = 0.75f; weights[2] = 1.25f; weights[3] = 2.00f; weights[4] = 1.25f; weights[5] = 0.75f; weights[6] = 0.50f; } void initializeArray(FILE* fp,float* arr, int nElements) { for( int i=0; i<nElements; i++){ int r=fscanf(fp,"%f",&arr[i]); if(r == EOF){ rewind(fp); } } } void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in, float *out) { for (int i = sIdx; i < eIdx; i++) { out[i] = 0; //loop over all elements in the stencil for (int j = -RADIUS; j <= RADIUS; j++) { out[i] += weights[j + RADIUS] * in[i + j]; } out[i] = out[i] / (2 * RADIUS + 1); } } __global__ void interleave(int *in, int *out) { const int tid = blockIdx.x*blockDim.x + threadIdx.x; const int symbol_no = tid/NCBPS; const int k = tid%NCBPS; const int i = (NCBPS/16) * (k % 16) + ((k/16)) ; const int s =1; //for QPSK const int j = s * (i/s) + ((int)(i + NBPSC- ((16 * i)/NCBPS)) % s); out[symbol_no*NCBPS + j] = in[symbol_no*NCBPS + k]; } __global__ void modulate(int *in, hipDoubleComplex *out) { const int tid = blockIdx.x*blockDim.x + threadIdx.x; out[tid]=make_cuDoubleComplex(2*in[2*tid]-1,2*in[2*tid+1]-1); } __global__ void encode_s(int sIdx, int eIdx, int *in, int *out) { int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x; volatile __shared__ int s_in[512+6]; int offset = blockIdx.x*blockDim.x; s_in[threadIdx.x+3] = in[i] ; if(threadIdx.x<=3) { s_in[sIdx-threadIdx.x] = in [sIdx+offset-threadIdx.x]; s_in[sIdx+blockDim.x + threadIdx.x] = in [ sIdx+ offset+ blockDim.x +threadIdx.x]; } __syncthreads(); if( i < eIdx ) { int result0 = 0; int result1 = 0; result0 ^= s_in[-offset+i-2]; result0 ^= s_in[-offset+i-3]; result0 ^= s_in[-offset+i-5]; result0 ^= s_in[-offset+i-6]; result0 ^= s_in[-offset+i]; result1 ^= s_in[-offset+i-1]; result1 ^= s_in[-offset+i-2]; result1 ^= s_in[-offset+i-3]; result1 ^= s_in[-offset+i-6]; result1 ^= s_in[-offset+i]; out[2*i] = result0; out[2*i+1] = result1; } } __global__ void applyStencil1D(int sIdx, int eIdx, const float *weights, float *in, float *out) { int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x; if( i < eIdx ) { float result = 0.f; result += weights[0]*in[i-3]; result += weights[1]*in[i-2]; result += weights[2]*in[i-1]; result += weights[3]*in[i]; result += weights[4]*in[i+1]; result += weights[5]*in[i+2]; result += weights[6]*in[i+3]; result /=7.f; out[i] = result; } } int main(int argc, char* argv[]) { int num_frames=10; if(argc>1) num_frames = atoi ( argv[1]); float time = 0.f; float generate_time= 0, scramble_time=0, encode_time=0, encode_parallel_time=0, interleave_time=0, modulate_time = 0, fft_time=0 ; int frame_size = FRAME_SIZE; int *frame; int *scrambler_bits; int *encoded_frame; int *interleaved_frame; hipDoubleComplex *modulated_frame; hipEvent_t startevent, stopevent; hipEventCreate(&startevent); hipEventCreate(&stopevent); hipEventRecord(startevent,0); hiprandState_t* states; /* allocate space on the GPU for the random states */ hipMalloc((void**) &states, FRAME_SIZE * sizeof(hiprandState_t)); /* invoke the GPU to initialize all of the random states */ hipLaunchKernelGGL(( init), dim3(FRAME_SIZE/128), dim3(128), 0, 0, 1000, states); hipMalloc((void**) &scrambler_bits, 128 * sizeof(int)); hipLaunchKernelGGL(( randoms), dim3(1), dim3(128), 0, 0, states, scrambler_bits); /* allocate an array of unsigned ints on the CPU and GPU */ hipMalloc((void**) &frame, FRAME_SIZE * sizeof(int)); hipMalloc((void**) &encoded_frame, 2*FRAME_SIZE * sizeof(int)); hipMalloc((void**) &interleaved_frame, 2*FRAME_SIZE * sizeof(int)); hipMalloc((void**) &modulated_frame, FRAME_SIZE * sizeof(hipDoubleComplex)); /* invoke the kernel to get some random numbers */ for(int ii=0; ii<num_frames;ii++) { sw.start(); hipLaunchKernelGGL(( randoms), dim3(FRAME_SIZE/128), dim3(128), 0, 0, states, frame); hipDeviceSynchronize(); sw.stop(); generate_time += sw.count(); sw.start(); hipLaunchKernelGGL(( scramble), dim3(FRAME_SIZE/128), dim3(128), 0, 0, frame,scrambler_bits); hipDeviceSynchronize(); sw.stop(); scramble_time += sw.count(); sw.start(); hipLaunchKernelGGL(( encode_s), dim3(FRAME_SIZE/512), dim3(512), 0, 0, 6,FRAME_SIZE, frame,encoded_frame); hipDeviceSynchronize(); sw.stop(); encode_time += sw.count(); sw.start(); hipLaunchKernelGGL(( interleave), dim3(FRAME_SIZE/512), dim3(512), 0, 0, encoded_frame,interleaved_frame); hipDeviceSynchronize(); sw.stop(); interleave_time += sw.count(); sw.start(); hipLaunchKernelGGL(( modulate), dim3(FRAME_SIZE/512), dim3(512), 0, 0, interleaved_frame,modulated_frame); hipDeviceSynchronize(); sw.stop(); modulate_time += sw.count(); } cout<< " generate_time " << generate_time << endl; cout<< " scramble_time " << scramble_time << endl; cout<< " encode_time " << encode_time << endl; cout<< " interleave_time " << interleave_time << endl; cout<< " modulate_time " << modulate_time << endl; hipEventRecord(stopevent,0); //ending timing for inclusive hipEventSynchronize(stopevent); hipEventElapsedTime(&time, startevent, stopevent); cout << "Total time minus fft" << time << endl; return 0; }
d8844cf3c74ee005539426d699f32486aaae4204.cu
#include<iostream> #include<complex> #include<stdlib.h> #include <cuda.h> #include <cuComplex.h> #include <math.h> #include<curand.h> #include<curand_kernel.h> #include "stopwatch.hpp" stopwatch<std::milli, float> sw; #define RADIUS 3 #define FRAME_SIZE 4096*8*7 #define NBPSC 2 #define NSC 64 #define NCBPS 128 typedef std::complex<double> Complex; using namespace std; __global__ void init(unsigned int seed, curandState_t* states) { /* we have to initialize the state */ curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ blockIdx.x*blockDim.x+threadIdx.x, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[blockIdx.x]); } /* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */ __global__ void randoms(curandState_t* states, int* numbers) { /* curand works like rand - except that it takes a state as a parameter */ numbers[blockIdx.x*blockDim.x+threadIdx.x] = curand(&states[blockIdx.x*blockDim.x+threadIdx.x]) % 2; } __global__ void scramble(int* numbers,int *scrambler_bits) { /* curand works like rand - except that it takes a state as a parameter */ numbers[blockIdx.x*blockDim.x+threadIdx.x] ^=scrambler_bits[ (blockIdx.x*blockDim.x+threadIdx.x) % 128]; } int checkResults(int startElem, int endElem, float* cudaRes, float* res) { int nDiffs=0; const float smallVal = 0.0001f; for(int i=startElem; i<endElem; i++) { if(fabs(cudaRes[i]-res[i])>smallVal) { nDiffs++; std::cout << i << std::endl; } } return nDiffs; } void initializeWeights(float* weights, int rad) { // for now hardcoded for RADIUS=3 weights[0] = 0.50f; weights[1] = 0.75f; weights[2] = 1.25f; weights[3] = 2.00f; weights[4] = 1.25f; weights[5] = 0.75f; weights[6] = 0.50f; } void initializeArray(FILE* fp,float* arr, int nElements) { for( int i=0; i<nElements; i++){ int r=fscanf(fp,"%f",&arr[i]); if(r == EOF){ rewind(fp); } } } void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in, float *out) { for (int i = sIdx; i < eIdx; i++) { out[i] = 0; //loop over all elements in the stencil for (int j = -RADIUS; j <= RADIUS; j++) { out[i] += weights[j + RADIUS] * in[i + j]; } out[i] = out[i] / (2 * RADIUS + 1); } } __global__ void interleave(int *in, int *out) { const int tid = blockIdx.x*blockDim.x + threadIdx.x; const int symbol_no = tid/NCBPS; const int k = tid%NCBPS; const int i = (NCBPS/16) * (k % 16) + ((k/16)) ; const int s =1; //for QPSK const int j = s * (i/s) + ((int)(i + NBPSC- ((16 * i)/NCBPS)) % s); out[symbol_no*NCBPS + j] = in[symbol_no*NCBPS + k]; } __global__ void modulate(int *in, cuDoubleComplex *out) { const int tid = blockIdx.x*blockDim.x + threadIdx.x; out[tid]=make_cuDoubleComplex(2*in[2*tid]-1,2*in[2*tid+1]-1); } __global__ void encode_s(int sIdx, int eIdx, int *in, int *out) { int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x; volatile __shared__ int s_in[512+6]; int offset = blockIdx.x*blockDim.x; s_in[threadIdx.x+3] = in[i] ; if(threadIdx.x<=3) { s_in[sIdx-threadIdx.x] = in [sIdx+offset-threadIdx.x]; s_in[sIdx+blockDim.x + threadIdx.x] = in [ sIdx+ offset+ blockDim.x +threadIdx.x]; } __syncthreads(); if( i < eIdx ) { int result0 = 0; int result1 = 0; result0 ^= s_in[-offset+i-2]; result0 ^= s_in[-offset+i-3]; result0 ^= s_in[-offset+i-5]; result0 ^= s_in[-offset+i-6]; result0 ^= s_in[-offset+i]; result1 ^= s_in[-offset+i-1]; result1 ^= s_in[-offset+i-2]; result1 ^= s_in[-offset+i-3]; result1 ^= s_in[-offset+i-6]; result1 ^= s_in[-offset+i]; out[2*i] = result0; out[2*i+1] = result1; } } __global__ void applyStencil1D(int sIdx, int eIdx, const float *weights, float *in, float *out) { int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x; if( i < eIdx ) { float result = 0.f; result += weights[0]*in[i-3]; result += weights[1]*in[i-2]; result += weights[2]*in[i-1]; result += weights[3]*in[i]; result += weights[4]*in[i+1]; result += weights[5]*in[i+2]; result += weights[6]*in[i+3]; result /=7.f; out[i] = result; } } int main(int argc, char* argv[]) { int num_frames=10; if(argc>1) num_frames = atoi ( argv[1]); float time = 0.f; float generate_time= 0, scramble_time=0, encode_time=0, encode_parallel_time=0, interleave_time=0, modulate_time = 0, fft_time=0 ; int frame_size = FRAME_SIZE; int *frame; int *scrambler_bits; int *encoded_frame; int *interleaved_frame; cuDoubleComplex *modulated_frame; cudaEvent_t startevent, stopevent; cudaEventCreate(&startevent); cudaEventCreate(&stopevent); cudaEventRecord(startevent,0); curandState_t* states; /* allocate space on the GPU for the random states */ cudaMalloc((void**) &states, FRAME_SIZE * sizeof(curandState_t)); /* invoke the GPU to initialize all of the random states */ init<<<FRAME_SIZE/128, 128>>>(1000, states); cudaMalloc((void**) &scrambler_bits, 128 * sizeof(int)); randoms<<<1, 128>>>(states, scrambler_bits); /* allocate an array of unsigned ints on the CPU and GPU */ cudaMalloc((void**) &frame, FRAME_SIZE * sizeof(int)); cudaMalloc((void**) &encoded_frame, 2*FRAME_SIZE * sizeof(int)); cudaMalloc((void**) &interleaved_frame, 2*FRAME_SIZE * sizeof(int)); cudaMalloc((void**) &modulated_frame, FRAME_SIZE * sizeof(cuDoubleComplex)); /* invoke the kernel to get some random numbers */ for(int ii=0; ii<num_frames;ii++) { sw.start(); randoms<<<FRAME_SIZE/128, 128>>>(states, frame); cudaDeviceSynchronize(); sw.stop(); generate_time += sw.count(); sw.start(); scramble<<<FRAME_SIZE/128, 128>>>( frame,scrambler_bits); cudaDeviceSynchronize(); sw.stop(); scramble_time += sw.count(); sw.start(); encode_s<<<FRAME_SIZE/512, 512>>>(6,FRAME_SIZE, frame,encoded_frame); cudaDeviceSynchronize(); sw.stop(); encode_time += sw.count(); sw.start(); interleave<<<FRAME_SIZE/512, 512>>> (encoded_frame,interleaved_frame); cudaDeviceSynchronize(); sw.stop(); interleave_time += sw.count(); sw.start(); modulate<<<FRAME_SIZE/512, 512>>> (interleaved_frame,modulated_frame); cudaDeviceSynchronize(); sw.stop(); modulate_time += sw.count(); } cout<< " generate_time " << generate_time << endl; cout<< " scramble_time " << scramble_time << endl; cout<< " encode_time " << encode_time << endl; cout<< " interleave_time " << interleave_time << endl; cout<< " modulate_time " << modulate_time << endl; cudaEventRecord(stopevent,0); //ending timing for inclusive cudaEventSynchronize(stopevent); cudaEventElapsedTime(&time, startevent, stopevent); cout << "Total time minus fft" << time << endl; return 0; }
62a7c612f709cc59ff67dfade587ebb4c0fb72fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void bp_maxpool(float* d_preact, float* preact, float* p_output, float* nd_output, const int kernel_size, const int size, const int n_size, const int in_channel, bool SAME) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int totalPos = blockDim.x * gridDim.x; const int N = kernel_size * kernel_size * n_size * n_size * in_channel; // total number of connections in this convolution const int padding = (kernel_size - 1) / 2; // number of padding for both ends int input_row, input_col; // distribute certain number of connections to each thread regardless of detailed position and shape for(int n = N * pos / totalPos; n < N * (pos+1) / totalPos; n++){ int idx = n; const int i_kernel_row = ((idx /= 1 ) % kernel_size); const int i_kernel_col = ((idx /= kernel_size ) % kernel_size); const int i_channel = ((idx /= kernel_size ) % in_channel); const int i_row = ((idx /= in_channel ) % n_size); const int i_col = ((idx /= n_size ) % n_size); float maxidx = (float)-1; idx = 0; // corresponding position of the input matrix if (SAME){ // SAME padding scheme implemented input_row = i_kernel_row + i_row - padding; input_col = i_kernel_col + i_col - padding; } else{ input_row = i_kernel_row + i_row; input_col = i_kernel_col + i_col; } if(input_row >= 0 && input_row < size && input_col >=0 && input_col < size){ if (p_output[((i_channel % in_channel) * size + input_col) * size + input_row] > maxidx) { maxidx = p_output[((i_channel % in_channel) * size + input_col) * size + input_row] ; idx = ((i_channel % in_channel) * size + input_col) * size + input_row; } } d_preact[idx] = nd_output[((i_channel % in_channel) * n_size + i_col) * n_size + i_row]; } }
62a7c612f709cc59ff67dfade587ebb4c0fb72fd.cu
#include "includes.h" __global__ void bp_maxpool(float* d_preact, float* preact, float* p_output, float* nd_output, const int kernel_size, const int size, const int n_size, const int in_channel, bool SAME) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int totalPos = blockDim.x * gridDim.x; const int N = kernel_size * kernel_size * n_size * n_size * in_channel; // total number of connections in this convolution const int padding = (kernel_size - 1) / 2; // number of padding for both ends int input_row, input_col; // distribute certain number of connections to each thread regardless of detailed position and shape for(int n = N * pos / totalPos; n < N * (pos+1) / totalPos; n++){ int idx = n; const int i_kernel_row = ((idx /= 1 ) % kernel_size); const int i_kernel_col = ((idx /= kernel_size ) % kernel_size); const int i_channel = ((idx /= kernel_size ) % in_channel); const int i_row = ((idx /= in_channel ) % n_size); const int i_col = ((idx /= n_size ) % n_size); float maxidx = (float)-1; idx = 0; // corresponding position of the input matrix if (SAME){ // SAME padding scheme implemented input_row = i_kernel_row + i_row - padding; input_col = i_kernel_col + i_col - padding; } else{ input_row = i_kernel_row + i_row; input_col = i_kernel_col + i_col; } if(input_row >= 0 && input_row < size && input_col >=0 && input_col < size){ if (p_output[((i_channel % in_channel) * size + input_col) * size + input_row] > maxidx) { maxidx = p_output[((i_channel % in_channel) * size + input_col) * size + input_row] ; idx = ((i_channel % in_channel) * size + input_col) * size + input_row; } } d_preact[idx] = nd_output[((i_channel % in_channel) * n_size + i_col) * n_size + i_row]; } }
fb88d3e7f26b194f88a934abd1df19cd9e609b1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void saxpy(int n, float a, float* x, float* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } int main(void) { int N = 1 << 20; float* x, * y, * d_x, * d_y; x = (float*)malloc(N * sizeof(float)); y = (float*)malloc(N * sizeof(float)); hipMalloc(&d_x, N * sizeof(float)); hipMalloc(&d_y, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N * sizeof(float), hipMemcpyHostToDevice); // Perform SAXPY on 1M elements saxpy << <(N + 255) / 256, 256 >> > (N, 2.0f, d_x, d_y); hipMemcpy(y, d_y, N * sizeof(float), hipMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i] - 4.0f)); printf("Max error: %f\n", maxError); hipFree(d_x); hipFree(d_y); free(x); free(y); }
fb88d3e7f26b194f88a934abd1df19cd9e609b1e.cu
#include <stdio.h> __global__ void saxpy(int n, float a, float* x, float* y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } int main(void) { int N = 1 << 20; float* x, * y, * d_x, * d_y; x = (float*)malloc(N * sizeof(float)); y = (float*)malloc(N * sizeof(float)); cudaMalloc(&d_x, N * sizeof(float)); cudaMalloc(&d_y, N * sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice); // Perform SAXPY on 1M elements saxpy << <(N + 255) / 256, 256 >> > (N, 2.0f, d_x, d_y); cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i] - 4.0f)); printf("Max error: %f\n", maxError); cudaFree(d_x); cudaFree(d_y); free(x); free(y); }
526783aa0ebad9996054d3e157359e451b3d744a.hip
// !!! This is a file automatically generated by hipify!!! /* * (C) Copyright 1996-2016 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation nor * does it submit to any jurisdiction. */ #include <hip/hip_runtime.h> #include "tests/AtlasTestEnvironment.h" #include "atlas/array.h" #include "atlas/array/MakeView.h" #include "atlas/runtime/Log.h" using namespace atlas::array; namespace atlas { namespace test { template<typename Value, int RANK> __global__ void kernel_ex(array::ArrayView<Value, RANK> dv) { dv(3, 3, 3) += dv.data_view().template length<0>() * dv.data_view().template length<1>() * dv.data_view().template length<2>(); } template<typename Value, int RANK> __global__ void loop_kernel_ex(array::ArrayView<Value, RANK> dv) { for(int i=0; i < dv.data_view().template length<0>(); i++) { for(int j=0; j < dv.data_view().template length<1>(); j++) { for(int k=0; k < dv.data_view().template length<2>(); k++) { dv(i,j,k) += i*10+j*100+k*1000; } } } } CASE( "test_array" ) { constexpr unsigned int dx = 5; constexpr unsigned int dy = 6; constexpr unsigned int dz = 7; Array* ds = Array::create<double>(dx, dy, dz); auto hv = make_host_view<double, 3>(*ds); hv(3, 3, 3) = 4.5; ds->updateDevice(); auto cv = make_device_view<double, 3>(*ds); hipLaunchKernelGGL(( kernel_ex), dim3(1),dim3(1), 0, 0, cv); hipDeviceSynchronize(); ds->updateHost(); ds->reactivateHostWriteViews(); EXPECT( hv(3, 3, 3) == 4.5 + dx*dy*dz ); delete ds; } CASE( "test_array_loop" ) { constexpr unsigned int dx = 5; constexpr unsigned int dy = 6; constexpr unsigned int dz = 7; Array* ds = Array::create<double>(dx, dy, dz); array::ArrayView<double,3> hv = make_host_view<double, 3>(*ds); for(int i=0; i < dx; i++) { for(int j=0; j < dy; j++) { for(int k=0; k < dz; k++) { hv(i,j,k) = 0; } } } ds->updateDevice(); auto cv = make_device_view<double, 3>(*ds); hipLaunchKernelGGL(( loop_kernel_ex), dim3(1),dim3(1), 0, 0, cv); hipDeviceSynchronize(); ds->updateHost(); ds->reactivateHostWriteViews(); for(int i=0; i < dx; i++) { for(int j=0; j < dy; j++) { for(int k=0; k < dz; k++) { EXPECT( hv(i,j,k) == i*10+j*100+k*1000 ); } } } delete ds; } } } int main(int argc, char **argv) { return atlas::test::run( argc, argv ); }
526783aa0ebad9996054d3e157359e451b3d744a.cu
/* * (C) Copyright 1996-2016 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation nor * does it submit to any jurisdiction. */ #include <cuda_runtime.h> #include "tests/AtlasTestEnvironment.h" #include "atlas/array.h" #include "atlas/array/MakeView.h" #include "atlas/runtime/Log.h" using namespace atlas::array; namespace atlas { namespace test { template<typename Value, int RANK> __global__ void kernel_ex(array::ArrayView<Value, RANK> dv) { dv(3, 3, 3) += dv.data_view().template length<0>() * dv.data_view().template length<1>() * dv.data_view().template length<2>(); } template<typename Value, int RANK> __global__ void loop_kernel_ex(array::ArrayView<Value, RANK> dv) { for(int i=0; i < dv.data_view().template length<0>(); i++) { for(int j=0; j < dv.data_view().template length<1>(); j++) { for(int k=0; k < dv.data_view().template length<2>(); k++) { dv(i,j,k) += i*10+j*100+k*1000; } } } } CASE( "test_array" ) { constexpr unsigned int dx = 5; constexpr unsigned int dy = 6; constexpr unsigned int dz = 7; Array* ds = Array::create<double>(dx, dy, dz); auto hv = make_host_view<double, 3>(*ds); hv(3, 3, 3) = 4.5; ds->updateDevice(); auto cv = make_device_view<double, 3>(*ds); kernel_ex<<<1,1>>>(cv); cudaDeviceSynchronize(); ds->updateHost(); ds->reactivateHostWriteViews(); EXPECT( hv(3, 3, 3) == 4.5 + dx*dy*dz ); delete ds; } CASE( "test_array_loop" ) { constexpr unsigned int dx = 5; constexpr unsigned int dy = 6; constexpr unsigned int dz = 7; Array* ds = Array::create<double>(dx, dy, dz); array::ArrayView<double,3> hv = make_host_view<double, 3>(*ds); for(int i=0; i < dx; i++) { for(int j=0; j < dy; j++) { for(int k=0; k < dz; k++) { hv(i,j,k) = 0; } } } ds->updateDevice(); auto cv = make_device_view<double, 3>(*ds); loop_kernel_ex<<<1,1>>>(cv); cudaDeviceSynchronize(); ds->updateHost(); ds->reactivateHostWriteViews(); for(int i=0; i < dx; i++) { for(int j=0; j < dy; j++) { for(int k=0; k < dz; k++) { EXPECT( hv(i,j,k) == i*10+j*100+k*1000 ); } } } delete ds; } } } int main(int argc, char **argv) { return atlas::test::run( argc, argv ); }
207d1a99bed514fbadb9d456d28d22ac04ad8692.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mv_op.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { template <typename T> __global__ void MVGradDxCUDAKernel(const int m, const int n, const T *dout, const T *vec, T *dx) { int idx = blockDim.x * blockIdx.x + threadIdx.x; for (; idx < m * n; idx += blockDim.x * gridDim.x) { int i = idx / n; int j = idx % n; dx[idx] = dout[i] * vec[j]; } } // Using dimensional constraints on matrix multiplication, it is // straight-forward to check the following table for when X and Y // are both matrices. // // dX = | dOut Vec^T // dVec = | X^T dOut template <typename T> class MVGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *x = context.Input<framework::Tensor>("X"); auto *vec = context.Input<framework::Tensor>("Vec"); auto *dout = context.Input<framework::Tensor>(framework::GradVarName("Out")); auto *dx = context.Output<framework::Tensor>(framework::GradVarName("X")); auto *dvec = context.Output<framework::Tensor>(framework::GradVarName("Vec")); auto dim_x = x->dims(); int m = dim_x[0]; int n = dim_x[1]; // get data ptr const T *x_data = x->data<T>(); const T *vec_data = vec->data<T>(); const T *dout_data = dout->data<T>(); auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); auto stream = context.cuda_device_context().stream(); auto config = GetGpuLaunchConfig1D(dev_ctx, m * n); if (dx) { T *dx_data = dx->mutable_data<T>(context.GetPlace()); hipLaunchKernelGGL(( MVGradDxCUDAKernel< T>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, stream, m, n, dout_data, vec_data, dx_data); } if (dvec) { T *dvec_data = dvec->mutable_data<T>(context.GetPlace()); blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data, static_cast<T>(0), dvec_data); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( mv, ops::MVKernel<paddle::platform::CUDADeviceContext, float>, ops::MVKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( mv_grad, ops::MVGradKernel<paddle::platform::CUDADeviceContext, float>, ops::MVGradKernel<paddle::platform::CUDADeviceContext, double>);
207d1a99bed514fbadb9d456d28d22ac04ad8692.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/mv_op.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { template <typename T> __global__ void MVGradDxCUDAKernel(const int m, const int n, const T *dout, const T *vec, T *dx) { int idx = blockDim.x * blockIdx.x + threadIdx.x; for (; idx < m * n; idx += blockDim.x * gridDim.x) { int i = idx / n; int j = idx % n; dx[idx] = dout[i] * vec[j]; } } // Using dimensional constraints on matrix multiplication, it is // straight-forward to check the following table for when X and Y // are both matrices. // // dX = | dOut Vec^T // dVec = | X^T dOut template <typename T> class MVGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { auto *x = context.Input<framework::Tensor>("X"); auto *vec = context.Input<framework::Tensor>("Vec"); auto *dout = context.Input<framework::Tensor>(framework::GradVarName("Out")); auto *dx = context.Output<framework::Tensor>(framework::GradVarName("X")); auto *dvec = context.Output<framework::Tensor>(framework::GradVarName("Vec")); auto dim_x = x->dims(); int m = dim_x[0]; int n = dim_x[1]; // get data ptr const T *x_data = x->data<T>(); const T *vec_data = vec->data<T>(); const T *dout_data = dout->data<T>(); auto &dev_ctx = context.template device_context<platform::CUDADeviceContext>(); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); auto stream = context.cuda_device_context().stream(); auto config = GetGpuLaunchConfig1D(dev_ctx, m * n); if (dx) { T *dx_data = dx->mutable_data<T>(context.GetPlace()); MVGradDxCUDAKernel< T><<<config.block_per_grid.x, config.thread_per_block.x, 0, stream>>>( m, n, dout_data, vec_data, dx_data); } if (dvec) { T *dvec_data = dvec->mutable_data<T>(context.GetPlace()); blas.GEMV(true, dim_x[0], dim_x[1], static_cast<T>(1), x_data, dout_data, static_cast<T>(0), dvec_data); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( mv, ops::MVKernel<paddle::platform::CUDADeviceContext, float>, ops::MVKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( mv_grad, ops::MVGradKernel<paddle::platform::CUDADeviceContext, float>, ops::MVGradKernel<paddle::platform::CUDADeviceContext, double>);
7adf076be48f49d02139779ed23d621b71790251.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This exercise is for student to get familiarized with passing data between host and device #include <stdio.h> __global__ void vector_add(int *d_c, int *d_a, int *d_b, int n){ int i = blockIdx.x * blockDim.x + threadIdx.x; d_c[i] = d_a[i] + d_b[i]; //printf("GPU[%d] done!\n", i); } int main(void){ int N = 4; int a[N] = {22, 13, 16, 5}; int b[N] = { 5, 22, 17, 37}; int c[N]; int *d_a, *d_b, *d_c; hipMalloc((void**)&d_a, sizeof(int)*N); hipMalloc((void**)&d_b, sizeof(int)*N); hipMalloc((void**)&d_c, sizeof(int)*N); hipMemcpy(d_a, a, sizeof(int)*N, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(int)*N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( vector_add), dim3(N),dim3(1), 0, 0, d_c, d_a, d_b, N); // N (4) threads hipMemcpy(c, d_c, sizeof(int)*N, hipMemcpyDeviceToHost); hipFree(d_a); hipFree(d_b); hipFree(d_c); printf("A = [%2d %2d %2d %2d]\n", a[0], a[1], a[2], a[3]); printf("B = [%2d %2d %2d %2d]\n", b[0], b[1], b[2], b[3]); printf("C = [%2d %2d %2d %2d]\n", c[0], c[1], c[2], c[3]); return 0; }
7adf076be48f49d02139779ed23d621b71790251.cu
// This exercise is for student to get familiarized with passing data between host and device #include <stdio.h> __global__ void vector_add(int *d_c, int *d_a, int *d_b, int n){ int i = blockIdx.x * blockDim.x + threadIdx.x; d_c[i] = d_a[i] + d_b[i]; //printf("GPU[%d] done!\n", i); } int main(void){ int N = 4; int a[N] = {22, 13, 16, 5}; int b[N] = { 5, 22, 17, 37}; int c[N]; int *d_a, *d_b, *d_c; cudaMalloc((void**)&d_a, sizeof(int)*N); cudaMalloc((void**)&d_b, sizeof(int)*N); cudaMalloc((void**)&d_c, sizeof(int)*N); cudaMemcpy(d_a, a, sizeof(int)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(int)*N, cudaMemcpyHostToDevice); vector_add<<<N,1>>>(d_c, d_a, d_b, N); // N (4) threads cudaMemcpy(c, d_c, sizeof(int)*N, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("A = [%2d %2d %2d %2d]\n", a[0], a[1], a[2], a[3]); printf("B = [%2d %2d %2d %2d]\n", b[0], b[1], b[2], b[3]); printf("C = [%2d %2d %2d %2d]\n", c[0], c[1], c[2], c[3]); return 0; }
ef80c6c3657bae7163d414c7041e1a13c82ac48e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(hipGetLastError()); */ static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } __global__ void vectorAddKernel(float* A, float* B, float* Result) { // Get the thread id, which we can use as itterator in the array of results. int i = threadIdx.x + blockDim.x * blockIdx.x; // Perform the action. Result[i] = A[i] + B[i]; } void vectorAddCuda(int n, float* a, float* b, float* result) { int threadBlockSize = 256; // allocate the vectors on the GPU float* deviceA = NULL; checkCudaCall(hipMalloc((void **) &deviceA, n * sizeof(float))); if (deviceA == NULL) { cout << "could not allocate memory!" << endl; return; } float* deviceB = NULL; checkCudaCall(hipMalloc((void **) &deviceB, n * sizeof(float))); if (deviceB == NULL) { checkCudaCall(hipFree(deviceA)); cout << "could not allocate memory!" << endl; return; } float* deviceResult = NULL; checkCudaCall(hipMalloc((void **) &deviceResult, n * sizeof(float))); if (deviceResult == NULL) { checkCudaCall(hipFree(deviceA)); checkCudaCall(hipFree(deviceB)); cout << "could not allocate memory!" << endl; return; } timer kernelTime1 = timer("kernelTime1"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceA, a, n*sizeof(float), hipMemcpyHostToDevice)); checkCudaCall(hipMemcpy(deviceB, b, n*sizeof(float), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( vectorAddKernel), dim3(n/threadBlockSize), dim3(threadBlockSize), 0, 0, deviceA, deviceB, deviceResult); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(result, deviceResult, n * sizeof(float), hipMemcpyDeviceToHost)); checkCudaCall(hipMemcpy(b, deviceB, n * sizeof(float), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceA)); checkCudaCall(hipFree(deviceB)); checkCudaCall(hipFree(deviceResult)); cout << "vector-add (kernel): \t\t" << kernelTime1 << endl; cout << "vector-add (memory): \t\t" << memoryTime << endl; } int vectorAddSeq(int n, float* a, float* b, float* result) { int i; timer sequentialTime = timer("Sequential"); sequentialTime.start(); for (i=0; i<n; i++) { result[i] = a[i]+b[i]; } sequentialTime.stop(); cout << "vector-add (sequential): \t\t" << sequentialTime << endl; } int main(int argc, char* argv[]) { int n = 655360; float* a = new float[n]; float* b = new float[n]; float* result = new float[n]; float* result_s = new float[n]; if (argc > 1) n = atoi(argv[1]); cout << "Adding two vectors of " << n << " integer elements." << endl; // initialize the vectors. for(int i=0; i<n; i++) { a[i] = i; b[i] = i; } vectorAddSeq(n, a, b, result_s); vectorAddCuda(n, a, b, result); // verify the resuls for(int i=0; i<n; i++) { // if(result[i] != n /*2*i*/) { if (result[i]!=result_s[i]) { cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << result_s[i] << endl; exit(1); } } cout << "results OK!" << endl; delete[] a; delete[] b; delete[] result; return 0; }
ef80c6c3657bae7163d414c7041e1a13c82ac48e.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } __global__ void vectorAddKernel(float* A, float* B, float* Result) { // Get the thread id, which we can use as itterator in the array of results. int i = threadIdx.x + blockDim.x * blockIdx.x; // Perform the action. Result[i] = A[i] + B[i]; } void vectorAddCuda(int n, float* a, float* b, float* result) { int threadBlockSize = 256; // allocate the vectors on the GPU float* deviceA = NULL; checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(float))); if (deviceA == NULL) { cout << "could not allocate memory!" << endl; return; } float* deviceB = NULL; checkCudaCall(cudaMalloc((void **) &deviceB, n * sizeof(float))); if (deviceB == NULL) { checkCudaCall(cudaFree(deviceA)); cout << "could not allocate memory!" << endl; return; } float* deviceResult = NULL; checkCudaCall(cudaMalloc((void **) &deviceResult, n * sizeof(float))); if (deviceResult == NULL) { checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); cout << "could not allocate memory!" << endl; return; } timer kernelTime1 = timer("kernelTime1"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(float), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(deviceB, b, n*sizeof(float), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); vectorAddKernel<<<n/threadBlockSize, threadBlockSize>>>(deviceA, deviceB, deviceResult); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(result, deviceResult, n * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaCall(cudaMemcpy(b, deviceB, n * sizeof(float), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); checkCudaCall(cudaFree(deviceResult)); cout << "vector-add (kernel): \t\t" << kernelTime1 << endl; cout << "vector-add (memory): \t\t" << memoryTime << endl; } int vectorAddSeq(int n, float* a, float* b, float* result) { int i; timer sequentialTime = timer("Sequential"); sequentialTime.start(); for (i=0; i<n; i++) { result[i] = a[i]+b[i]; } sequentialTime.stop(); cout << "vector-add (sequential): \t\t" << sequentialTime << endl; } int main(int argc, char* argv[]) { int n = 655360; float* a = new float[n]; float* b = new float[n]; float* result = new float[n]; float* result_s = new float[n]; if (argc > 1) n = atoi(argv[1]); cout << "Adding two vectors of " << n << " integer elements." << endl; // initialize the vectors. for(int i=0; i<n; i++) { a[i] = i; b[i] = i; } vectorAddSeq(n, a, b, result_s); vectorAddCuda(n, a, b, result); // verify the resuls for(int i=0; i<n; i++) { // if(result[i] != n /*2*i*/) { if (result[i]!=result_s[i]) { cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << result_s[i] << endl; exit(1); } } cout << "results OK!" << endl; delete[] a; delete[] b; delete[] result; return 0; }
d020a380e7a887ade564a828df1797c94f7ca7e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Modified from // https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include "cc_attention_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f, Tensor weight) { AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] { hipLaunchKernelGGL(( ca_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); } void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t, const Tensor f, Tensor dt, Tensor df) { AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] { hipLaunchKernelGGL(( ca_backward_kernel_t<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), dt.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] { hipLaunchKernelGGL(( ca_backward_kernel_f<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), df.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); } void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g, Tensor out) { AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = g.size(0); auto c = g.size(1); auto h = g.size(2); auto w = g.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] { hipLaunchKernelGGL(( ca_map_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), out.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); } void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight, const Tensor g, Tensor dw, Tensor dg) { AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = dout.size(0); auto c = dout.size(1); auto h = dout.size(2); auto w = dout.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES( weight.scalar_type(), "ca_map_backward_kernel_w", [&] { hipLaunchKernelGGL(( ca_map_backward_kernel_w<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dw.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); d3 = c * n; blocks = dim3(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] { hipLaunchKernelGGL(( ca_map_backward_kernel_g<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dg.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); }
d020a380e7a887ade564a828df1797c94f7ca7e5.cu
// Modified from // https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include "cc_attention_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f, Tensor weight) { AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] { ca_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); } void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t, const Tensor f, Tensor dt, Tensor df) { AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] { ca_backward_kernel_t<scalar_t><<<blocks, threads, 0, stream>>>( dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), dt.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] { ca_backward_kernel_f<scalar_t><<<blocks, threads, 0, stream>>>( dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), df.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); } void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g, Tensor out) { AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = g.size(0); auto c = g.size(1); auto h = g.size(2); auto w = g.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] { ca_map_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), out.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); } void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight, const Tensor g, Tensor dw, Tensor dg) { AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = dout.size(0); auto c = dout.size(1); auto h = dout.size(2); auto w = dout.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES( weight.scalar_type(), "ca_map_backward_kernel_w", [&] { ca_map_backward_kernel_w<scalar_t><<<blocks, threads, 0, stream>>>( dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dw.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); d3 = c * n; blocks = dim3(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] { ca_map_backward_kernel_g<scalar_t><<<blocks, threads, 0, stream>>>( dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dg.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); }
0d2fcaeb3d9b279a4f1493a509762cf27484f9ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void kernelA(int N){ int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; // Conditional statement to exit if index (globalThreadId) is out of bounds if(globalThreadId >= N) { return; } //Insert code here printf("Hello from block %d, threadInd x %d,threadInd y %d,threadInd z %d ,blockDim x %d, blockDim y %d,blockDim z %d \n", blockIdx.x, threadIdx.x,threadIdx.y,threadIdx.z,blockDim.x,blockDim.y,blockDim.z ); } int main() { // More realistic GPU problem size int problemSize = 4; // try with 1000 or 100000000 //set the device on which the host execute files hipSetDevice(0); // On average a good thread count, the best thread count varies based on the situation int threadCount = 2; // try with 256 which is the averagely good size // Simple way to ensure enough threads are launched // may result in launching more threads than needed though int blockCount = ceil(problemSize/threadCount); hipLaunchKernelGGL(( kernelA) , dim3(blockCount), dim3(threadCount), 0, 0, problemSize); hipDeviceSynchronize(); hipDeviceReset(); return 0; }
0d2fcaeb3d9b279a4f1493a509762cf27484f9ee.cu
#include <stdio.h> __global__ void kernelA(int N){ int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; // Conditional statement to exit if index (globalThreadId) is out of bounds if(globalThreadId >= N) { return; } //Insert code here printf("Hello from block %d, threadInd x %d,threadInd y %d,threadInd z %d ,blockDim x %d, blockDim y %d,blockDim z %d \n", blockIdx.x, threadIdx.x,threadIdx.y,threadIdx.z,blockDim.x,blockDim.y,blockDim.z ); } int main() { // More realistic GPU problem size int problemSize = 4; // try with 1000 or 100000000 //set the device on which the host execute files cudaSetDevice(0); // On average a good thread count, the best thread count varies based on the situation int threadCount = 2; // try with 256 which is the averagely good size // Simple way to ensure enough threads are launched // may result in launching more threads than needed though int blockCount = ceil(problemSize/threadCount); kernelA <<<blockCount, threadCount>>>(problemSize); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
c7ca18d736f2dc1b4cef4d25345f3309ea04fc72.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <cmath> #include <ctime> #include <cfloat> #include <algorithm> #include <chrono> #include <iomanip> #include <iostream> #include <map> #include <memory> #include <random> #include <sstream> #include <string> #include <vector> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <rocblas.h> #include <cudnn.h> #include "readubyte.h" /////////////////////////////////////////////////////////////////////////////////////////// // Definitions and helper utilities // Block width for CUDA kernels #define BW 128 #ifdef USE_GFLAGS #include <gflags/gflags.h> #ifndef _WIN32 #define gflags google #endif #else // Constant versions of gflags #define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value) #define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value) #define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value) #define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value) #define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value)) #endif /** * Computes ceil(x / y) for integral nonnegative values. */ static inline unsigned int RoundUp(unsigned int nominator, unsigned int denominator) { return (nominator + denominator - 1) / denominator; } /** * Saves a PGM grayscale image out of unsigned 8-bit data */ void SavePGMFile(const unsigned char *data, size_t width, size_t height, const char *filename) { FILE *fp = fopen(filename, "wb"); if (fp) { fprintf(fp, "P5\n%lu %lu\n255\n", width, height); fwrite(data, sizeof(unsigned char), width * height, fp); fclose(fp); } } #define FatalError(s) do { \ std::stringstream _where, _message; \ _where << __FILE__ << ':' << __LINE__; \ _message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \ std::cerr << _message.str() << "\nAborting...\n"; \ hipDeviceReset(); \ exit(1); \ } while(0) #define checkCUDNN(status) do { \ std::stringstream _error; \ if (status != CUDNN_STATUS_SUCCESS) { \ _error << "CUDNN failure: " << cudnnGetErrorString(status); \ FatalError(_error.str()); \ } \ } while(0) #define checkCudaErrors(status) do { \ std::stringstream _error; \ if (status != 0) { \ _error << "Cuda failure: " << status; \ FatalError(_error.str()); \ } \ } while(0) /////////////////////////////////////////////////////////////////////////////////////////// // Command-line flags // Application parameters DEFINE_int32(gpu, 0, "The GPU ID to use"); DEFINE_int32(iterations, 1000, "Number of iterations for training"); DEFINE_int32(random_seed, -1, "Override random seed (default uses std::random_device)"); DEFINE_int32(classify, -1, "Number of images to classify to compute error rate (default uses entire test set)"); // Batch parameters DEFINE_uint64(batch_size, 64, "Batch size for training"); // Filenames DEFINE_bool(pretrained, false, "Use the pretrained CUDNN model as input"); DEFINE_bool(save_data, false, "Save pretrained weights to file"); DEFINE_string(train_images, "train-images-idx3-ubyte", "Training images filename"); DEFINE_string(train_labels, "train-labels-idx1-ubyte", "Training labels filename"); DEFINE_string(test_images, "t10k-images-idx3-ubyte", "Test images filename"); DEFINE_string(test_labels, "t10k-labels-idx1-ubyte", "Test labels filename"); // Solver parameters DEFINE_double(learning_rate, 0.01, "Base learning rate"); DEFINE_double(lr_gamma, 0.0001, "Learning rate policy gamma"); DEFINE_double(lr_power, 0.75, "Learning rate policy power"); /////////////////////////////////////////////////////////////////////////////////////////// // Layer representations /** * Represents a convolutional layer with bias. */ struct ConvBiasLayer { int in_channels, out_channels, kernel_size; int in_width, in_height, out_width, out_height; std::vector<float> pconv, pbias; ConvBiasLayer(int in_channels_, int out_channels_, int kernel_size_, int in_w_, int in_h_) : pconv(in_channels_ * kernel_size_ * kernel_size_ * out_channels_), pbias(out_channels_) { in_channels = in_channels_; out_channels = out_channels_; kernel_size = kernel_size_; in_width = in_w_; in_height = in_h_; out_width = in_w_ - kernel_size_ + 1; out_height = in_h_ - kernel_size_ + 1; } bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); } }; /** * Represents a max-pooling layer. */ struct MaxPoolLayer { int size, stride; MaxPoolLayer(int size_, int stride_) : size(size_), stride(stride_) {} }; /** * Represents a fully-connected neural network layer with bias. */ struct FullyConnectedLayer { int inputs, outputs; std::vector<float> pneurons, pbias; FullyConnectedLayer(int inputs_, int outputs_) : outputs(outputs_), inputs(inputs_), pneurons(inputs_ * outputs_), pbias(outputs_) {} bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), outputs, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), outputs, fp); fclose(fp); } }; /////////////////////////////////////////////////////////////////////////////////////////// // GPU Kernels /** * Fills a floating-point array with ones. * * @param vec The array to fill. * @param size The number of elements in the array. */ __global__ void FillOnes(float *vec, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; vec[idx] = 1.0f; } /** * Computes the backpropagation results of the Softmax loss for each result in a batch. * Uses the softmax values obtained from forward propagation to compute the difference. * * @param label The training batch label values. * @param num_labels The number of possible labels. * @param batch_size The size of the trained batch. * @param diff The resulting gradient. */ __global__ void SoftmaxLossBackprop(const float *label, int num_labels, int batch_size, float *diff) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 diff[idx * num_labels + label_value] -= 1.0f; } /////////////////////////////////////////////////////////////////////////////////////////// // CUDNN/CUBLAS training context struct TrainingContext { cudnnHandle_t cudnnHandle; hipblasHandle_t cublasHandle; cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor, pool1Tensor, conv2Tensor, conv2BiasTensor, pool2Tensor, fc1Tensor, fc2Tensor; cudnnFilterDescriptor_t conv1filterDesc, conv2filterDesc; cudnnConvolutionDescriptor_t conv1Desc, conv2Desc; cudnnConvolutionFwdAlgo_t conv1algo, conv2algo; cudnnConvolutionBwdFilterAlgo_t conv1bwfalgo, conv2bwfalgo; cudnnConvolutionBwdDataAlgo_t conv2bwdalgo; cudnnPoolingDescriptor_t poolDesc; cudnnActivationDescriptor_t fc1Activation; int m_gpuid; int m_batchSize; size_t m_workspaceSize; FullyConnectedLayer& ref_fc1, &ref_fc2; // Disable copying TrainingContext& operator=(const TrainingContext&) = delete; TrainingContext(const TrainingContext&) = delete; TrainingContext(int gpuid, int batch_size, ConvBiasLayer& conv1, MaxPoolLayer& pool1, ConvBiasLayer& conv2, MaxPoolLayer& pool2, FullyConnectedLayer& fc1, FullyConnectedLayer& fc2) : ref_fc1(fc1), ref_fc2(fc2), m_gpuid(gpuid) { m_batchSize = batch_size; // Create CUBLAS and CUDNN handles checkCudaErrors(hipSetDevice(gpuid)); checkCudaErrors(hipblasCreate(&cublasHandle)); checkCUDNN(cudnnCreate(&cudnnHandle)); // Create tensor descriptors checkCUDNN(cudnnCreateTensorDescriptor(&dataTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc2Tensor)); checkCUDNN(cudnnCreateActivationDescriptor(&fc1Activation)); checkCUDNN(cudnnCreateFilterDescriptor(&conv1filterDesc)); checkCUDNN(cudnnCreateFilterDescriptor(&conv2filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv2Desc)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); // Set tensor descriptor sizes checkCUDNN(cudnnSetTensor4dDescriptor(conv1BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv1.out_channels, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(conv2BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv2.out_channels, 1, 1)); checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, pool1.size, pool1.size, 0, 0, pool1.stride, pool1.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(pool2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, conv2.out_channels, conv2.out_height / pool2.stride, conv2.out_width / pool2.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(fc1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc1.outputs, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(fc2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc2.outputs, 1, 1)); checkCUDNN(cudnnSetActivationDescriptor(fc1Activation, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); // Set convolution tensor sizes and compute workspace size size_t workspace = 0; workspace = ::max(workspace, SetFwdConvolutionTensors(conv1, dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, conv1algo)); workspace = ::max(workspace, SetBwdConvolutionTensors(dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, &conv1bwfalgo, nullptr)); workspace = ::max(workspace, SetFwdConvolutionTensors(conv2, pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, conv2algo)); workspace = ::max(workspace, SetBwdConvolutionTensors(pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, &conv2bwfalgo, &conv2bwdalgo)); // The workspace is allocated later (if necessary) m_workspaceSize = workspace; } ~TrainingContext() { checkCudaErrors(hipSetDevice(m_gpuid)); checkCudaErrors(hipblasDestroy(cublasHandle)); checkCUDNN(cudnnDestroy(cudnnHandle)); checkCUDNN(cudnnDestroyTensorDescriptor(dataTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc2Tensor)); checkCUDNN(cudnnDestroyActivationDescriptor(fc1Activation)); checkCUDNN(cudnnDestroyFilterDescriptor(conv1filterDesc)); checkCUDNN(cudnnDestroyFilterDescriptor(conv2filterDesc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv1Desc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv2Desc)); checkCUDNN(cudnnDestroyPoolingDescriptor(poolDesc)); } size_t SetFwdConvolutionTensors(ConvBiasLayer& conv, cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionFwdAlgo_t& algo) { size_t sizeInBytes = 0; int n = m_batchSize; int c = conv.in_channels; int h = conv.in_height; int w = conv.in_width; checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, conv.out_channels, conv.in_channels, conv.kernel_size, conv.kernel_size)); #if CUDNN_MAJOR > 5 checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); #else checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); #endif // Find dimension of convolution output checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc, srcTensorDesc, filterDesc, &n, &c, &h, &w)); checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, algo, &sizeInBytes)); return sizeInBytes; } void ForwardPropagation(float *data, float *conv1, float *pool1, float *conv2, float *pool2, float *fc1, float *fc1relu, float *fc2, float *result, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; checkCudaErrors(hipSetDevice(m_gpuid)); // Conv1 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor, data, conv1filterDesc, pconv1, conv1Desc, conv1algo, workspace, m_workspaceSize, &beta, conv1Tensor, conv1)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv1BiasTensor, pconv1bias, &alpha, conv1Tensor, conv1)); // Pool1 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv1Tensor, conv1, &beta, pool1Tensor, pool1)); // Conv2 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, pool1Tensor, pool1, conv2filterDesc, pconv2, conv2Desc, conv2algo, workspace, m_workspaceSize, &beta, conv2Tensor, conv2)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv2BiasTensor, pconv2bias, &alpha, conv2Tensor, conv2)); // Pool2 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv2Tensor, conv2, &beta, pool2Tensor, pool2)); // FC1 layer // Forward propagate neurons using weights (fc1 = pfc1'*pool2) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, ref_fc1.outputs, m_batchSize, ref_fc1.inputs, &alpha, pfc1, ref_fc1.inputs, pool2, ref_fc1.inputs, &beta, fc1, ref_fc1.outputs)); // Add bias using GEMM's "beta" (fc1 += pfc1bias*1_vec') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc1.outputs, m_batchSize, 1, &alpha, pfc1bias, ref_fc1.outputs, onevec, 1, &alpha, fc1, ref_fc1.outputs)); // ReLU activation checkCUDNN(cudnnActivationForward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1, &beta, fc1Tensor, fc1relu)); // FC2 layer // Forward propagate neurons using weights (fc2 = pfc2'*fc1relu) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, ref_fc2.outputs, m_batchSize, ref_fc2.inputs, &alpha, pfc2, ref_fc2.inputs, fc1relu, ref_fc2.inputs, &beta, fc2, ref_fc2.outputs)); // Add bias using GEMM's "beta" (fc2 += pfc2bias*1_vec') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc2.outputs, m_batchSize, 1, &alpha, pfc2bias, ref_fc2.outputs, onevec, 1, &alpha, fc2, ref_fc2.outputs)); // Softmax loss checkCUDNN(cudnnSoftmaxForward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, fc2Tensor, fc2, &beta, fc2Tensor, result)); } size_t SetBwdConvolutionTensors(cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionBwdFilterAlgo_t *falgo, cudnnConvolutionBwdDataAlgo_t *dalgo) { size_t sizeInBytes = 0, tmpsize = 0; // If backprop filter algorithm was requested if (falgo) { checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, falgo)); checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, *falgo, &tmpsize)); sizeInBytes = ::max(sizeInBytes, tmpsize); } // If backprop data algorithm was requested if (dalgo) { checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, dalgo)); checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, *dalgo, &tmpsize)); sizeInBytes = ::max(sizeInBytes, tmpsize); } return sizeInBytes; } void Backpropagation(ConvBiasLayer& layer_conv1, MaxPoolLayer& layer_pool1, ConvBiasLayer& layer_conv2, MaxPoolLayer& layer_pool2, float *data, float *labels, float *conv1, float *pool1, float *conv2, float *pool2, float *fc1, float *fc1relu, float *fc2, float *fc2smax, float *dloss_data, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *dpool1, float *gconv2, float *gconv2bias, float *dconv2, float *dpool2, float *gfc1, float *gfc1bias, float *dfc1, float *dfc1relu, float *gfc2, float *gfc2bias, float *dfc2, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; float scalVal = 1.0f / static_cast<float>(m_batchSize); checkCudaErrors(hipSetDevice(m_gpuid)); // Initialization (using the training error function) checkCudaErrors(hipMemcpyAsync(dloss_data, fc2smax, sizeof(float) * m_batchSize * ref_fc2.outputs, hipMemcpyDeviceToDevice)); // Softmax layer hipLaunchKernelGGL(( SoftmaxLossBackprop), dim3(RoundUp(m_batchSize, BW)), dim3(BW), 0, 0, labels, ref_fc2.outputs, m_batchSize, dloss_data); // Accounting for batch size in SGD checkCudaErrors(hipblasSscal(cublasHandle, ref_fc2.outputs * m_batchSize, &scalVal, dloss_data, 1)); // FC2 layer // Compute derivative with respect to weights: gfc2 = (fc1relu * dfc2smax') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, ref_fc2.inputs, ref_fc2.outputs, m_batchSize, &alpha, fc1relu, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, gfc2, ref_fc2.inputs)); // Compute derivative with respect to bias: gfc2bias = dfc2smax * 1_vec checkCudaErrors(hipblasSgemv(cublasHandle, HIPBLAS_OP_N, ref_fc2.outputs, m_batchSize, &alpha, dloss_data, ref_fc2.outputs, onevec, 1, &beta, gfc2bias, 1)); // Compute derivative with respect to data (for previous layer): pfc2*dfc2smax (500x10*10xN) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc2.inputs, m_batchSize, ref_fc2.outputs, &alpha, pfc2, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, dfc2, ref_fc2.inputs)); // ReLU activation checkCUDNN(cudnnActivationBackward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1relu, fc1Tensor, dfc2, fc1Tensor, fc1, &beta, fc1Tensor, dfc1relu)); // FC1 layer // Compute derivative with respect to weights: gfc1 = (pool2 * dfc1relu') checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, ref_fc1.inputs, ref_fc1.outputs, m_batchSize, &alpha, pool2, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, gfc1, ref_fc1.inputs)); // Compute derivative with respect to bias: gfc1bias = dfc1relu * 1_vec checkCudaErrors(hipblasSgemv(cublasHandle, HIPBLAS_OP_N, ref_fc1.outputs, m_batchSize, &alpha, dfc1relu, ref_fc1.outputs, onevec, 1, &beta, gfc1bias, 1)); // Compute derivative with respect to data (for previous layer): pfc1*dfc1relu (800x500*500xN) checkCudaErrors(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ref_fc1.inputs, m_batchSize, ref_fc1.outputs, &alpha, pfc1, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, dfc1, ref_fc1.inputs)); // Pool2 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool2Tensor, pool2, pool2Tensor, dfc1, conv2Tensor, conv2, &beta, conv2Tensor, dpool2)); // Conv2 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv2Tensor, dpool2, &beta, conv2BiasTensor, gconv2bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, pool1Tensor, pool1, conv2Tensor, dpool2, conv2Desc, conv2bwfalgo, workspace, m_workspaceSize, &beta, conv2filterDesc, gconv2)); checkCUDNN(cudnnConvolutionBackwardData(cudnnHandle, &alpha, conv2filterDesc, pconv2, conv2Tensor, dpool2, conv2Desc, conv2bwdalgo, workspace, m_workspaceSize, &beta, pool1Tensor, dconv2)); // Pool1 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool1Tensor, pool1, pool1Tensor, dconv2, conv1Tensor, conv1, &beta, conv1Tensor, dpool1)); // Conv1 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv1Tensor, dpool1, &beta, conv1BiasTensor, gconv1bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, dataTensor, data, conv1Tensor, dpool1, conv1Desc, conv1bwfalgo, workspace, m_workspaceSize, &beta, conv1filterDesc, gconv1)); // No need for convBackwardData because there are no more layers below } void UpdateWeights(float learning_rate, ConvBiasLayer& conv1, ConvBiasLayer& conv2, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *gconv2, float *gconv2bias, float *gfc1, float *gfc1bias, float *gfc2, float *gfc2bias) { float alpha = -learning_rate; checkCudaErrors(hipSetDevice(m_gpuid)); // Conv1 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv1.pconv.size()), &alpha, gconv1, 1, pconv1, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv1.pbias.size()), &alpha, gconv1bias, 1, pconv1bias, 1)); // Conv2 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv2.pconv.size()), &alpha, gconv2, 1, pconv2, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(conv2.pbias.size()), &alpha, gconv2bias, 1, pconv2bias, 1)); // Fully connected 1 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pneurons.size()), &alpha, gfc1, 1, pfc1, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pbias.size()), &alpha, gfc1bias, 1, pfc1bias, 1)); // Fully connected 2 checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pneurons.size()), &alpha, gfc2, 1, pfc2, 1)); checkCudaErrors(hipblasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pbias.size()), &alpha, gfc2bias, 1, pfc2bias, 1)); } }; /////////////////////////////////////////////////////////////////////////////////////////// // Main function int main(int argc, char **argv) { #ifdef USE_GFLAGS gflags::ParseCommandLineFlags(&argc, &argv, true); #endif size_t width, height, channels = 1; // Open input data printf("Reading input data\n"); // Read dataset sizes size_t train_size = ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), nullptr, nullptr, width, height); size_t test_size = ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), nullptr, nullptr, width, height); if (train_size == 0) return 1; std::vector<uint8_t> train_images(train_size * width * height * channels), train_labels(train_size); std::vector<uint8_t> test_images(test_size * width * height * channels), test_labels(test_size); // Read data from datasets if (ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), &train_images[0], &train_labels[0], width, height) != train_size) return 2; if (ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), &test_images[0], &test_labels[0], width, height) != test_size) return 3; printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int)train_size, (int)test_size); printf("Batch size: %lld, iterations: %d\n", FLAGS_batch_size, FLAGS_iterations); // This code snippet saves a random image and its label /* std::random_device rd_image; int random_image = rd_image() % train_size; std::stringstream ss; ss << "image-" << (int)train_labels[random_image] << ".pgm"; SavePGMFile(&train_images[0] + random_image * width*height*channels, width, height, ss.str().c_str()); */ // Choose GPU int num_gpus; checkCudaErrors(hipGetDeviceCount(&num_gpus)); if (FLAGS_gpu < 0 || FLAGS_gpu >= num_gpus) { printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n", FLAGS_gpu, num_gpus); return 4; } // Create the LeNet network architecture ConvBiasLayer conv1((int)channels, 20, 5, (int)width, (int)height); MaxPoolLayer pool1(2, 2); ConvBiasLayer conv2(conv1.out_channels, 50, 5, conv1.out_width / pool1.stride, conv1.out_height / pool1.stride); MaxPoolLayer pool2(2, 2); FullyConnectedLayer fc1((conv2.out_channels*conv2.out_width*conv2.out_height) / (pool2.stride * pool2.stride), 500); FullyConnectedLayer fc2(fc1.outputs, 10); // Initialize CUDNN/CUBLAS training context TrainingContext context(FLAGS_gpu, FLAGS_batch_size, conv1, pool1, conv2, pool2, fc1, fc2); // Determine initial network structure bool bRet = true; if (FLAGS_pretrained) { bRet = conv1.FromFile("conv1"); bRet &= conv2.FromFile("conv2"); bRet &= fc1.FromFile("ip1"); bRet &= fc2.FromFile("ip2"); } if (!bRet || !FLAGS_pretrained) { // Create random network std::random_device rd; std::mt19937 gen(FLAGS_random_seed < 0 ? rd() : static_cast<unsigned int>(FLAGS_random_seed)); // Xavier weight filling float wconv1 = sqrt(3.0f / (conv1.kernel_size * conv1.kernel_size * conv1.in_channels)); std::uniform_real_distribution<> dconv1(-wconv1, wconv1); float wconv2 = sqrt(3.0f / (conv2.kernel_size * conv2.kernel_size * conv2.in_channels)); std::uniform_real_distribution<> dconv2(-wconv2, wconv2); float wfc1 = sqrt(3.0f / (fc1.inputs * fc1.outputs)); std::uniform_real_distribution<> dfc1(-wfc1, wfc1); float wfc2 = sqrt(3.0f / (fc2.inputs * fc2.outputs)); std::uniform_real_distribution<> dfc2(-wfc2, wfc2); // Randomize network for (auto&& iter : conv1.pconv) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv1.pbias) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv2.pconv) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : conv2.pbias) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : fc1.pneurons) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc1.pbias) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc2.pneurons) iter = static_cast<float>(dfc2(gen)); for (auto&& iter : fc2.pbias) iter = static_cast<float>(dfc2(gen)); } ///////////////////////////////////////////////////////////////////////////// // Create GPU data structures // Forward propagation data float *d_data, *d_labels, *d_conv1, *d_pool1, *d_conv2, *d_pool2, *d_fc1, *d_fc1relu, *d_fc2, *d_fc2smax; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(hipMalloc(&d_data, sizeof(float) * context.m_batchSize * channels * height * width)); checkCudaErrors(hipMalloc(&d_labels, sizeof(float) * context.m_batchSize * 1 * 1 * 1)); checkCudaErrors(hipMalloc(&d_conv1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(hipMalloc(&d_pool1, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(hipMalloc(&d_conv2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(hipMalloc(&d_pool2, sizeof(float) * context.m_batchSize * conv2.out_channels * (conv2.out_height / pool2.stride) * (conv2.out_width / pool2.stride))); checkCudaErrors(hipMalloc(&d_fc1, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(hipMalloc(&d_fc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(hipMalloc(&d_fc2, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(hipMalloc(&d_fc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); // Network parameters float *d_pconv1, *d_pconv1bias, *d_pconv2, *d_pconv2bias; float *d_pfc1, *d_pfc1bias, *d_pfc2, *d_pfc2bias; checkCudaErrors(hipMalloc(&d_pconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(hipMalloc(&d_pconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(hipMalloc(&d_pconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(hipMalloc(&d_pconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(hipMalloc(&d_pfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(hipMalloc(&d_pfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(hipMalloc(&d_pfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(hipMalloc(&d_pfc2bias, sizeof(float) * fc2.pbias.size())); // Network parameter gradients float *d_gconv1, *d_gconv1bias, *d_gconv2, *d_gconv2bias; float *d_gfc1, *d_gfc1bias, *d_gfc2, *d_gfc2bias; checkCudaErrors(hipMalloc(&d_gconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(hipMalloc(&d_gconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(hipMalloc(&d_gconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(hipMalloc(&d_gconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(hipMalloc(&d_gfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(hipMalloc(&d_gfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(hipMalloc(&d_gfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(hipMalloc(&d_gfc2bias, sizeof(float) * fc2.pbias.size())); // Differentials w.r.t. data float *d_dpool1, *d_dpool2, *d_dconv2, *d_dfc1, *d_dfc1relu, *d_dfc2, *d_dfc2smax, *d_dlossdata; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(hipMalloc(&d_dpool1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(hipMalloc(&d_dpool2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(hipMalloc(&d_dconv2, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(hipMalloc(&d_dfc1, sizeof(float) * context.m_batchSize * fc1.inputs)); checkCudaErrors(hipMalloc(&d_dfc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(hipMalloc(&d_dfc2, sizeof(float) * context.m_batchSize * fc2.inputs)); checkCudaErrors(hipMalloc(&d_dfc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(hipMalloc(&d_dlossdata,sizeof(float) * context.m_batchSize * fc2.outputs)); // Temporary buffers and workspaces float *d_onevec; void *d_cudnn_workspace = nullptr; checkCudaErrors(hipMalloc(&d_onevec, sizeof(float)* context.m_batchSize)); if (context.m_workspaceSize > 0) checkCudaErrors(hipMalloc(&d_cudnn_workspace, context.m_workspaceSize)); ///////////////////////////////////////////////////////////////////////////// // Copy initial network to device checkCudaErrors(hipMemcpyAsync(d_pconv1, &conv1.pconv[0], sizeof(float) * conv1.pconv.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv1bias, &conv1.pbias[0], sizeof(float) * conv1.pbias.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv2, &conv2.pconv[0], sizeof(float) * conv2.pconv.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pconv2bias, &conv2.pbias[0], sizeof(float) * conv2.pbias.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc1, &fc1.pneurons[0], sizeof(float) * fc1.pneurons.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc1bias, &fc1.pbias[0], sizeof(float) * fc1.pbias.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc2, &fc2.pneurons[0], sizeof(float) * fc2.pneurons.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_pfc2bias, &fc2.pbias[0], sizeof(float) * fc2.pbias.size(), hipMemcpyHostToDevice)); // Fill one-vector with ones hipLaunchKernelGGL(( FillOnes), dim3(RoundUp(context.m_batchSize, BW)), dim3(BW), 0, 0, d_onevec, context.m_batchSize); printf("Preparing dataset\n"); // Normalize training set to be in [0,1] std::vector<float> train_images_float(train_images.size()), train_labels_float(train_size); for (size_t i = 0; i < train_size * channels * width * height; ++i) train_images_float[i] = (float)train_images[i] / 255.0f; for (size_t i = 0; i < train_size; ++i) train_labels_float[i] = (float)train_labels[i]; printf("Training...\n"); // Use SGD to train the network checkCudaErrors(hipDeviceSynchronize()); auto t1 = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < FLAGS_iterations; ++iter) { // Train int imageid = iter % (train_size / context.m_batchSize); // Prepare current batch on device checkCudaErrors(hipMemcpyAsync(d_data, &train_images_float[imageid * context.m_batchSize * width*height*channels], sizeof(float) * context.m_batchSize * channels * width * height, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyAsync(d_labels, &train_labels_float[imageid * context.m_batchSize], sizeof(float) * context.m_batchSize, hipMemcpyHostToDevice)); // Forward propagation context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Backward propagation context.Backpropagation(conv1, pool1, conv2, pool2, d_data, d_labels, d_conv1, d_pool1, d_conv2, d_pool2, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_dlossdata, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_dpool1, d_gconv2, d_gconv2bias, d_dconv2, d_dpool2, d_gfc1, d_gfc1bias, d_dfc1, d_dfc1relu, d_gfc2, d_gfc2bias, d_dfc2, d_cudnn_workspace, d_onevec); // Compute learning rate float learningRate = static_cast<float>(FLAGS_learning_rate * pow((1.0 + FLAGS_lr_gamma * iter), (-FLAGS_lr_power))); // Update weights context.UpdateWeights(learningRate, conv1, conv2, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_gconv2, d_gconv2bias, d_gfc1, d_gfc1bias, d_gfc2, d_gfc2bias); } checkCudaErrors(hipDeviceSynchronize()); auto t2 = std::chrono::high_resolution_clock::now(); printf("Iteration time: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f / FLAGS_iterations); if (FLAGS_save_data) { // Copy trained weights from GPU to CPU checkCudaErrors(hipMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv1.pbias[0], d_pconv1bias, sizeof(float) * conv1.pbias.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv2.pconv[0], d_pconv2, sizeof(float) * conv2.pconv.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&conv2.pbias[0], d_pconv2bias, sizeof(float) * conv2.pbias.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc1.pneurons[0], d_pfc1, sizeof(float) * fc1.pneurons.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc1.pbias[0], d_pfc1bias, sizeof(float) * fc1.pbias.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc2.pneurons[0], d_pfc2, sizeof(float) * fc2.pneurons.size(), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(&fc2.pbias[0], d_pfc2bias, sizeof(float) * fc2.pbias.size(), hipMemcpyDeviceToHost)); // Now save data printf("Saving data to file\n"); conv1.ToFile("conv1"); conv2.ToFile("conv2"); fc1.ToFile("ip1"); fc2.ToFile("ip2"); } float classification_error = 1.0f; int classifications = FLAGS_classify; if (classifications < 0) classifications = (int)test_size; // Test the resulting neural network's classification if (classifications > 0) { // Initialize a TrainingContext structure for testing (different batch size) TrainingContext test_context(FLAGS_gpu, 1, conv1, pool1, conv2, pool2, fc1, fc2); // Ensure correct workspaceSize is allocated for testing if (context.m_workspaceSize < test_context.m_workspaceSize) { checkCudaErrors(hipFree(d_cudnn_workspace)); checkCudaErrors(hipMalloc(&d_cudnn_workspace, test_context.m_workspaceSize)); } int num_errors = 0; for (int i = 0; i < classifications; ++i) { std::vector<float> data(width * height); // Normalize image to be in [0,1] for (int j = 0; j < width * height; ++j) data[j] = (float)test_images[i * width*height*channels + j] / 255.0f; checkCudaErrors(hipMemcpyAsync(d_data, &data[0], sizeof(float) * width * height, hipMemcpyHostToDevice)); // Forward propagate test image test_context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Perform classification std::vector<float> class_vec(10); // Copy back result checkCudaErrors(hipMemcpy(&class_vec[0], d_fc2smax, sizeof(float) * 10, hipMemcpyDeviceToHost)); // Determine classification according to maximal response int chosen = 0; for (int id = 1; id < 10; ++id) { if (class_vec[chosen] < class_vec[id]) chosen = id; } if (chosen != test_labels[i]) ++num_errors; } classification_error = (float)num_errors / (float)classifications; printf("Classification result: %.2f%% error (used %d images)\n", classification_error * 100.0f, (int)classifications); } // Free data structures checkCudaErrors(hipFree(d_data)); checkCudaErrors(hipFree(d_conv1)); checkCudaErrors(hipFree(d_pool1)); checkCudaErrors(hipFree(d_conv2)); checkCudaErrors(hipFree(d_pool2)); checkCudaErrors(hipFree(d_fc1)); checkCudaErrors(hipFree(d_fc2)); checkCudaErrors(hipFree(d_pconv1)); checkCudaErrors(hipFree(d_pconv1bias)); checkCudaErrors(hipFree(d_pconv2)); checkCudaErrors(hipFree(d_pconv2bias)); checkCudaErrors(hipFree(d_pfc1)); checkCudaErrors(hipFree(d_pfc1bias)); checkCudaErrors(hipFree(d_pfc2)); checkCudaErrors(hipFree(d_pfc2bias)); checkCudaErrors(hipFree(d_gconv1)); checkCudaErrors(hipFree(d_gconv1bias)); checkCudaErrors(hipFree(d_gconv2)); checkCudaErrors(hipFree(d_gconv2bias)); checkCudaErrors(hipFree(d_gfc1)); checkCudaErrors(hipFree(d_gfc1bias)); checkCudaErrors(hipFree(d_dfc1)); checkCudaErrors(hipFree(d_gfc2)); checkCudaErrors(hipFree(d_gfc2bias)); checkCudaErrors(hipFree(d_dfc2)); checkCudaErrors(hipFree(d_dpool1)); checkCudaErrors(hipFree(d_dconv2)); checkCudaErrors(hipFree(d_dpool2)); checkCudaErrors(hipFree(d_labels)); checkCudaErrors(hipFree(d_dlossdata)); checkCudaErrors(hipFree(d_onevec)); if (d_cudnn_workspace != nullptr) checkCudaErrors(hipFree(d_cudnn_workspace)); return 0; }
c7ca18d736f2dc1b4cef4d25345f3309ea04fc72.cu
#include <cstdio> #include <cstdlib> #include <cmath> #include <ctime> #include <cfloat> #include <algorithm> #include <chrono> #include <iomanip> #include <iostream> #include <map> #include <memory> #include <random> #include <sstream> #include <string> #include <vector> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cublas_v2.h> #include <cudnn.h> #include "readubyte.h" /////////////////////////////////////////////////////////////////////////////////////////// // Definitions and helper utilities // Block width for CUDA kernels #define BW 128 #ifdef USE_GFLAGS #include <gflags/gflags.h> #ifndef _WIN32 #define gflags google #endif #else // Constant versions of gflags #define DEFINE_int32(flag, default_value, description) const int FLAGS_##flag = (default_value) #define DEFINE_uint64(flag, default_value, description) const unsigned long long FLAGS_##flag = (default_value) #define DEFINE_bool(flag, default_value, description) const bool FLAGS_##flag = (default_value) #define DEFINE_double(flag, default_value, description) const double FLAGS_##flag = (default_value) #define DEFINE_string(flag, default_value, description) const std::string FLAGS_##flag ((default_value)) #endif /** * Computes ceil(x / y) for integral nonnegative values. */ static inline unsigned int RoundUp(unsigned int nominator, unsigned int denominator) { return (nominator + denominator - 1) / denominator; } /** * Saves a PGM grayscale image out of unsigned 8-bit data */ void SavePGMFile(const unsigned char *data, size_t width, size_t height, const char *filename) { FILE *fp = fopen(filename, "wb"); if (fp) { fprintf(fp, "P5\n%lu %lu\n255\n", width, height); fwrite(data, sizeof(unsigned char), width * height, fp); fclose(fp); } } #define FatalError(s) do { \ std::stringstream _where, _message; \ _where << __FILE__ << ':' << __LINE__; \ _message << std::string(s) + "\n" << __FILE__ << ':' << __LINE__; \ std::cerr << _message.str() << "\nAborting...\n"; \ cudaDeviceReset(); \ exit(1); \ } while(0) #define checkCUDNN(status) do { \ std::stringstream _error; \ if (status != CUDNN_STATUS_SUCCESS) { \ _error << "CUDNN failure: " << cudnnGetErrorString(status); \ FatalError(_error.str()); \ } \ } while(0) #define checkCudaErrors(status) do { \ std::stringstream _error; \ if (status != 0) { \ _error << "Cuda failure: " << status; \ FatalError(_error.str()); \ } \ } while(0) /////////////////////////////////////////////////////////////////////////////////////////// // Command-line flags // Application parameters DEFINE_int32(gpu, 0, "The GPU ID to use"); DEFINE_int32(iterations, 1000, "Number of iterations for training"); DEFINE_int32(random_seed, -1, "Override random seed (default uses std::random_device)"); DEFINE_int32(classify, -1, "Number of images to classify to compute error rate (default uses entire test set)"); // Batch parameters DEFINE_uint64(batch_size, 64, "Batch size for training"); // Filenames DEFINE_bool(pretrained, false, "Use the pretrained CUDNN model as input"); DEFINE_bool(save_data, false, "Save pretrained weights to file"); DEFINE_string(train_images, "train-images-idx3-ubyte", "Training images filename"); DEFINE_string(train_labels, "train-labels-idx1-ubyte", "Training labels filename"); DEFINE_string(test_images, "t10k-images-idx3-ubyte", "Test images filename"); DEFINE_string(test_labels, "t10k-labels-idx1-ubyte", "Test labels filename"); // Solver parameters DEFINE_double(learning_rate, 0.01, "Base learning rate"); DEFINE_double(lr_gamma, 0.0001, "Learning rate policy gamma"); DEFINE_double(lr_power, 0.75, "Learning rate policy power"); /////////////////////////////////////////////////////////////////////////////////////////// // Layer representations /** * Represents a convolutional layer with bias. */ struct ConvBiasLayer { int in_channels, out_channels, kernel_size; int in_width, in_height, out_width, out_height; std::vector<float> pconv, pbias; ConvBiasLayer(int in_channels_, int out_channels_, int kernel_size_, int in_w_, int in_h_) : pconv(in_channels_ * kernel_size_ * kernel_size_ * out_channels_), pbias(out_channels_) { in_channels = in_channels_; out_channels = out_channels_; kernel_size = kernel_size_; in_width = in_w_; in_height = in_h_; out_width = in_w_ - kernel_size_ + 1; out_height = in_h_ - kernel_size_ + 1; } bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pconv[0], sizeof(float), in_channels * out_channels * kernel_size * kernel_size, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), out_channels, fp); fclose(fp); } }; /** * Represents a max-pooling layer. */ struct MaxPoolLayer { int size, stride; MaxPoolLayer(int size_, int stride_) : size(size_), stride(stride_) {} }; /** * Represents a fully-connected neural network layer with bias. */ struct FullyConnectedLayer { int inputs, outputs; std::vector<float> pneurons, pbias; FullyConnectedLayer(int inputs_, int outputs_) : outputs(outputs_), inputs(inputs_), pneurons(inputs_ * outputs_), pbias(outputs_) {} bool FromFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Read weights file FILE *fp = fopen(ssf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); return false; } fread(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Read bias file fp = fopen(ssbf.str().c_str(), "rb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); return false; } fread(&pbias[0], sizeof(float), outputs, fp); fclose(fp); return true; } void ToFile(const char *fileprefix) { std::stringstream ssf, ssbf; ssf << fileprefix << ".bin"; ssbf << fileprefix << ".bias.bin"; // Write weights file FILE *fp = fopen(ssf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssf.str().c_str()); exit(2); } fwrite(&pneurons[0], sizeof(float), inputs * outputs, fp); fclose(fp); // Write bias file fp = fopen(ssbf.str().c_str(), "wb"); if (!fp) { printf("ERROR: Cannot open file %s\n", ssbf.str().c_str()); exit(2); } fwrite(&pbias[0], sizeof(float), outputs, fp); fclose(fp); } }; /////////////////////////////////////////////////////////////////////////////////////////// // GPU Kernels /** * Fills a floating-point array with ones. * * @param vec The array to fill. * @param size The number of elements in the array. */ __global__ void FillOnes(float *vec, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= size) return; vec[idx] = 1.0f; } /** * Computes the backpropagation results of the Softmax loss for each result in a batch. * Uses the softmax values obtained from forward propagation to compute the difference. * * @param label The training batch label values. * @param num_labels The number of possible labels. * @param batch_size The size of the trained batch. * @param diff The resulting gradient. */ __global__ void SoftmaxLossBackprop(const float *label, int num_labels, int batch_size, float *diff) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = static_cast<int>(label[idx]); // For each item in the batch, decrease the result of the label's value by 1 diff[idx * num_labels + label_value] -= 1.0f; } /////////////////////////////////////////////////////////////////////////////////////////// // CUDNN/CUBLAS training context struct TrainingContext { cudnnHandle_t cudnnHandle; cublasHandle_t cublasHandle; cudnnTensorDescriptor_t dataTensor, conv1Tensor, conv1BiasTensor, pool1Tensor, conv2Tensor, conv2BiasTensor, pool2Tensor, fc1Tensor, fc2Tensor; cudnnFilterDescriptor_t conv1filterDesc, conv2filterDesc; cudnnConvolutionDescriptor_t conv1Desc, conv2Desc; cudnnConvolutionFwdAlgo_t conv1algo, conv2algo; cudnnConvolutionBwdFilterAlgo_t conv1bwfalgo, conv2bwfalgo; cudnnConvolutionBwdDataAlgo_t conv2bwdalgo; cudnnPoolingDescriptor_t poolDesc; cudnnActivationDescriptor_t fc1Activation; int m_gpuid; int m_batchSize; size_t m_workspaceSize; FullyConnectedLayer& ref_fc1, &ref_fc2; // Disable copying TrainingContext& operator=(const TrainingContext&) = delete; TrainingContext(const TrainingContext&) = delete; TrainingContext(int gpuid, int batch_size, ConvBiasLayer& conv1, MaxPoolLayer& pool1, ConvBiasLayer& conv2, MaxPoolLayer& pool2, FullyConnectedLayer& fc1, FullyConnectedLayer& fc2) : ref_fc1(fc1), ref_fc2(fc2), m_gpuid(gpuid) { m_batchSize = batch_size; // Create CUBLAS and CUDNN handles checkCudaErrors(cudaSetDevice(gpuid)); checkCudaErrors(cublasCreate(&cublasHandle)); checkCUDNN(cudnnCreate(&cudnnHandle)); // Create tensor descriptors checkCUDNN(cudnnCreateTensorDescriptor(&dataTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv1BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&conv2BiasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&pool2Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc1Tensor)); checkCUDNN(cudnnCreateTensorDescriptor(&fc2Tensor)); checkCUDNN(cudnnCreateActivationDescriptor(&fc1Activation)); checkCUDNN(cudnnCreateFilterDescriptor(&conv1filterDesc)); checkCUDNN(cudnnCreateFilterDescriptor(&conv2filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv1Desc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&conv2Desc)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); // Set tensor descriptor sizes checkCUDNN(cudnnSetTensor4dDescriptor(conv1BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv1.out_channels, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(conv2BiasTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, conv2.out_channels, 1, 1)); checkCUDNN(cudnnSetPooling2dDescriptor(poolDesc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, pool1.size, pool1.size, 0, 0, pool1.stride, pool1.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(pool2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, conv2.out_channels, conv2.out_height / pool2.stride, conv2.out_width / pool2.stride)); checkCUDNN(cudnnSetTensor4dDescriptor(fc1Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc1.outputs, 1, 1)); checkCUDNN(cudnnSetTensor4dDescriptor(fc2Tensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, fc2.outputs, 1, 1)); checkCUDNN(cudnnSetActivationDescriptor(fc1Activation, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0.0)); // Set convolution tensor sizes and compute workspace size size_t workspace = 0; workspace = std::max(workspace, SetFwdConvolutionTensors(conv1, dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, conv1algo)); workspace = std::max(workspace, SetBwdConvolutionTensors(dataTensor, conv1Tensor, conv1filterDesc, conv1Desc, &conv1bwfalgo, nullptr)); workspace = std::max(workspace, SetFwdConvolutionTensors(conv2, pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, conv2algo)); workspace = std::max(workspace, SetBwdConvolutionTensors(pool1Tensor, conv2Tensor, conv2filterDesc, conv2Desc, &conv2bwfalgo, &conv2bwdalgo)); // The workspace is allocated later (if necessary) m_workspaceSize = workspace; } ~TrainingContext() { checkCudaErrors(cudaSetDevice(m_gpuid)); checkCudaErrors(cublasDestroy(cublasHandle)); checkCUDNN(cudnnDestroy(cudnnHandle)); checkCUDNN(cudnnDestroyTensorDescriptor(dataTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv1BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(conv2BiasTensor)); checkCUDNN(cudnnDestroyTensorDescriptor(pool2Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc1Tensor)); checkCUDNN(cudnnDestroyTensorDescriptor(fc2Tensor)); checkCUDNN(cudnnDestroyActivationDescriptor(fc1Activation)); checkCUDNN(cudnnDestroyFilterDescriptor(conv1filterDesc)); checkCUDNN(cudnnDestroyFilterDescriptor(conv2filterDesc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv1Desc)); checkCUDNN(cudnnDestroyConvolutionDescriptor(conv2Desc)); checkCUDNN(cudnnDestroyPoolingDescriptor(poolDesc)); } size_t SetFwdConvolutionTensors(ConvBiasLayer& conv, cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionFwdAlgo_t& algo) { size_t sizeInBytes = 0; int n = m_batchSize; int c = conv.in_channels; int h = conv.in_height; int w = conv.in_width; checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnSetFilter4dDescriptor(filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, conv.out_channels, conv.in_channels, conv.kernel_size, conv.kernel_size)); #if CUDNN_MAJOR > 5 checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); #else checkCUDNN(cudnnSetConvolution2dDescriptor(convDesc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); #endif // Find dimension of convolution output checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convDesc, srcTensorDesc, filterDesc, &n, &c, &h, &w)); checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, n, c, h, w)); checkCUDNN(cudnnGetConvolutionForwardAlgorithm(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnnHandle, srcTensorDesc, filterDesc, convDesc, dstTensorDesc, algo, &sizeInBytes)); return sizeInBytes; } void ForwardPropagation(float *data, float *conv1, float *pool1, float *conv2, float *pool2, float *fc1, float *fc1relu, float *fc2, float *result, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; checkCudaErrors(cudaSetDevice(m_gpuid)); // Conv1 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, dataTensor, data, conv1filterDesc, pconv1, conv1Desc, conv1algo, workspace, m_workspaceSize, &beta, conv1Tensor, conv1)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv1BiasTensor, pconv1bias, &alpha, conv1Tensor, conv1)); // Pool1 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv1Tensor, conv1, &beta, pool1Tensor, pool1)); // Conv2 layer checkCUDNN(cudnnConvolutionForward(cudnnHandle, &alpha, pool1Tensor, pool1, conv2filterDesc, pconv2, conv2Desc, conv2algo, workspace, m_workspaceSize, &beta, conv2Tensor, conv2)); checkCUDNN(cudnnAddTensor(cudnnHandle, &alpha, conv2BiasTensor, pconv2bias, &alpha, conv2Tensor, conv2)); // Pool2 layer checkCUDNN(cudnnPoolingForward(cudnnHandle, poolDesc, &alpha, conv2Tensor, conv2, &beta, pool2Tensor, pool2)); // FC1 layer // Forward propagate neurons using weights (fc1 = pfc1'*pool2) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, ref_fc1.outputs, m_batchSize, ref_fc1.inputs, &alpha, pfc1, ref_fc1.inputs, pool2, ref_fc1.inputs, &beta, fc1, ref_fc1.outputs)); // Add bias using GEMM's "beta" (fc1 += pfc1bias*1_vec') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc1.outputs, m_batchSize, 1, &alpha, pfc1bias, ref_fc1.outputs, onevec, 1, &alpha, fc1, ref_fc1.outputs)); // ReLU activation checkCUDNN(cudnnActivationForward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1, &beta, fc1Tensor, fc1relu)); // FC2 layer // Forward propagate neurons using weights (fc2 = pfc2'*fc1relu) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, ref_fc2.outputs, m_batchSize, ref_fc2.inputs, &alpha, pfc2, ref_fc2.inputs, fc1relu, ref_fc2.inputs, &beta, fc2, ref_fc2.outputs)); // Add bias using GEMM's "beta" (fc2 += pfc2bias*1_vec') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc2.outputs, m_batchSize, 1, &alpha, pfc2bias, ref_fc2.outputs, onevec, 1, &alpha, fc2, ref_fc2.outputs)); // Softmax loss checkCUDNN(cudnnSoftmaxForward(cudnnHandle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, fc2Tensor, fc2, &beta, fc2Tensor, result)); } size_t SetBwdConvolutionTensors(cudnnTensorDescriptor_t& srcTensorDesc, cudnnTensorDescriptor_t& dstTensorDesc, cudnnFilterDescriptor_t& filterDesc, cudnnConvolutionDescriptor_t& convDesc, cudnnConvolutionBwdFilterAlgo_t *falgo, cudnnConvolutionBwdDataAlgo_t *dalgo) { size_t sizeInBytes = 0, tmpsize = 0; // If backprop filter algorithm was requested if (falgo) { checkCUDNN(cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, falgo)); checkCUDNN(cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle, srcTensorDesc, dstTensorDesc, convDesc, filterDesc, *falgo, &tmpsize)); sizeInBytes = std::max(sizeInBytes, tmpsize); } // If backprop data algorithm was requested if (dalgo) { checkCUDNN(cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, dalgo)); checkCUDNN(cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle, filterDesc, dstTensorDesc, convDesc, srcTensorDesc, *dalgo, &tmpsize)); sizeInBytes = std::max(sizeInBytes, tmpsize); } return sizeInBytes; } void Backpropagation(ConvBiasLayer& layer_conv1, MaxPoolLayer& layer_pool1, ConvBiasLayer& layer_conv2, MaxPoolLayer& layer_pool2, float *data, float *labels, float *conv1, float *pool1, float *conv2, float *pool2, float *fc1, float *fc1relu, float *fc2, float *fc2smax, float *dloss_data, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *dpool1, float *gconv2, float *gconv2bias, float *dconv2, float *dpool2, float *gfc1, float *gfc1bias, float *dfc1, float *dfc1relu, float *gfc2, float *gfc2bias, float *dfc2, void *workspace, float *onevec) { float alpha = 1.0f, beta = 0.0f; float scalVal = 1.0f / static_cast<float>(m_batchSize); checkCudaErrors(cudaSetDevice(m_gpuid)); // Initialization (using the training error function) checkCudaErrors(cudaMemcpyAsync(dloss_data, fc2smax, sizeof(float) * m_batchSize * ref_fc2.outputs, cudaMemcpyDeviceToDevice)); // Softmax layer SoftmaxLossBackprop<<<RoundUp(m_batchSize, BW), BW>>>(labels, ref_fc2.outputs, m_batchSize, dloss_data); // Accounting for batch size in SGD checkCudaErrors(cublasSscal(cublasHandle, ref_fc2.outputs * m_batchSize, &scalVal, dloss_data, 1)); // FC2 layer // Compute derivative with respect to weights: gfc2 = (fc1relu * dfc2smax') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ref_fc2.inputs, ref_fc2.outputs, m_batchSize, &alpha, fc1relu, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, gfc2, ref_fc2.inputs)); // Compute derivative with respect to bias: gfc2bias = dfc2smax * 1_vec checkCudaErrors(cublasSgemv(cublasHandle, CUBLAS_OP_N, ref_fc2.outputs, m_batchSize, &alpha, dloss_data, ref_fc2.outputs, onevec, 1, &beta, gfc2bias, 1)); // Compute derivative with respect to data (for previous layer): pfc2*dfc2smax (500x10*10xN) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc2.inputs, m_batchSize, ref_fc2.outputs, &alpha, pfc2, ref_fc2.inputs, dloss_data, ref_fc2.outputs, &beta, dfc2, ref_fc2.inputs)); // ReLU activation checkCUDNN(cudnnActivationBackward(cudnnHandle, fc1Activation, &alpha, fc1Tensor, fc1relu, fc1Tensor, dfc2, fc1Tensor, fc1, &beta, fc1Tensor, dfc1relu)); // FC1 layer // Compute derivative with respect to weights: gfc1 = (pool2 * dfc1relu') checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, ref_fc1.inputs, ref_fc1.outputs, m_batchSize, &alpha, pool2, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, gfc1, ref_fc1.inputs)); // Compute derivative with respect to bias: gfc1bias = dfc1relu * 1_vec checkCudaErrors(cublasSgemv(cublasHandle, CUBLAS_OP_N, ref_fc1.outputs, m_batchSize, &alpha, dfc1relu, ref_fc1.outputs, onevec, 1, &beta, gfc1bias, 1)); // Compute derivative with respect to data (for previous layer): pfc1*dfc1relu (800x500*500xN) checkCudaErrors(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ref_fc1.inputs, m_batchSize, ref_fc1.outputs, &alpha, pfc1, ref_fc1.inputs, dfc1relu, ref_fc1.outputs, &beta, dfc1, ref_fc1.inputs)); // Pool2 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool2Tensor, pool2, pool2Tensor, dfc1, conv2Tensor, conv2, &beta, conv2Tensor, dpool2)); // Conv2 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv2Tensor, dpool2, &beta, conv2BiasTensor, gconv2bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, pool1Tensor, pool1, conv2Tensor, dpool2, conv2Desc, conv2bwfalgo, workspace, m_workspaceSize, &beta, conv2filterDesc, gconv2)); checkCUDNN(cudnnConvolutionBackwardData(cudnnHandle, &alpha, conv2filterDesc, pconv2, conv2Tensor, dpool2, conv2Desc, conv2bwdalgo, workspace, m_workspaceSize, &beta, pool1Tensor, dconv2)); // Pool1 layer checkCUDNN(cudnnPoolingBackward(cudnnHandle, poolDesc, &alpha, pool1Tensor, pool1, pool1Tensor, dconv2, conv1Tensor, conv1, &beta, conv1Tensor, dpool1)); // Conv1 layer checkCUDNN(cudnnConvolutionBackwardBias(cudnnHandle, &alpha, conv1Tensor, dpool1, &beta, conv1BiasTensor, gconv1bias)); checkCUDNN(cudnnConvolutionBackwardFilter(cudnnHandle, &alpha, dataTensor, data, conv1Tensor, dpool1, conv1Desc, conv1bwfalgo, workspace, m_workspaceSize, &beta, conv1filterDesc, gconv1)); // No need for convBackwardData because there are no more layers below } void UpdateWeights(float learning_rate, ConvBiasLayer& conv1, ConvBiasLayer& conv2, float *pconv1, float *pconv1bias, float *pconv2, float *pconv2bias, float *pfc1, float *pfc1bias, float *pfc2, float *pfc2bias, float *gconv1, float *gconv1bias, float *gconv2, float *gconv2bias, float *gfc1, float *gfc1bias, float *gfc2, float *gfc2bias) { float alpha = -learning_rate; checkCudaErrors(cudaSetDevice(m_gpuid)); // Conv1 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv1.pconv.size()), &alpha, gconv1, 1, pconv1, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv1.pbias.size()), &alpha, gconv1bias, 1, pconv1bias, 1)); // Conv2 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv2.pconv.size()), &alpha, gconv2, 1, pconv2, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(conv2.pbias.size()), &alpha, gconv2bias, 1, pconv2bias, 1)); // Fully connected 1 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pneurons.size()), &alpha, gfc1, 1, pfc1, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc1.pbias.size()), &alpha, gfc1bias, 1, pfc1bias, 1)); // Fully connected 2 checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pneurons.size()), &alpha, gfc2, 1, pfc2, 1)); checkCudaErrors(cublasSaxpy(cublasHandle, static_cast<int>(ref_fc2.pbias.size()), &alpha, gfc2bias, 1, pfc2bias, 1)); } }; /////////////////////////////////////////////////////////////////////////////////////////// // Main function int main(int argc, char **argv) { #ifdef USE_GFLAGS gflags::ParseCommandLineFlags(&argc, &argv, true); #endif size_t width, height, channels = 1; // Open input data printf("Reading input data\n"); // Read dataset sizes size_t train_size = ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), nullptr, nullptr, width, height); size_t test_size = ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), nullptr, nullptr, width, height); if (train_size == 0) return 1; std::vector<uint8_t> train_images(train_size * width * height * channels), train_labels(train_size); std::vector<uint8_t> test_images(test_size * width * height * channels), test_labels(test_size); // Read data from datasets if (ReadUByteDataset(FLAGS_train_images.c_str(), FLAGS_train_labels.c_str(), &train_images[0], &train_labels[0], width, height) != train_size) return 2; if (ReadUByteDataset(FLAGS_test_images.c_str(), FLAGS_test_labels.c_str(), &test_images[0], &test_labels[0], width, height) != test_size) return 3; printf("Done. Training dataset size: %d, Test dataset size: %d\n", (int)train_size, (int)test_size); printf("Batch size: %lld, iterations: %d\n", FLAGS_batch_size, FLAGS_iterations); // This code snippet saves a random image and its label /* std::random_device rd_image; int random_image = rd_image() % train_size; std::stringstream ss; ss << "image-" << (int)train_labels[random_image] << ".pgm"; SavePGMFile(&train_images[0] + random_image * width*height*channels, width, height, ss.str().c_str()); */ // Choose GPU int num_gpus; checkCudaErrors(cudaGetDeviceCount(&num_gpus)); if (FLAGS_gpu < 0 || FLAGS_gpu >= num_gpus) { printf("ERROR: Invalid GPU ID %d (There are %d GPUs on this machine)\n", FLAGS_gpu, num_gpus); return 4; } // Create the LeNet network architecture ConvBiasLayer conv1((int)channels, 20, 5, (int)width, (int)height); MaxPoolLayer pool1(2, 2); ConvBiasLayer conv2(conv1.out_channels, 50, 5, conv1.out_width / pool1.stride, conv1.out_height / pool1.stride); MaxPoolLayer pool2(2, 2); FullyConnectedLayer fc1((conv2.out_channels*conv2.out_width*conv2.out_height) / (pool2.stride * pool2.stride), 500); FullyConnectedLayer fc2(fc1.outputs, 10); // Initialize CUDNN/CUBLAS training context TrainingContext context(FLAGS_gpu, FLAGS_batch_size, conv1, pool1, conv2, pool2, fc1, fc2); // Determine initial network structure bool bRet = true; if (FLAGS_pretrained) { bRet = conv1.FromFile("conv1"); bRet &= conv2.FromFile("conv2"); bRet &= fc1.FromFile("ip1"); bRet &= fc2.FromFile("ip2"); } if (!bRet || !FLAGS_pretrained) { // Create random network std::random_device rd; std::mt19937 gen(FLAGS_random_seed < 0 ? rd() : static_cast<unsigned int>(FLAGS_random_seed)); // Xavier weight filling float wconv1 = sqrt(3.0f / (conv1.kernel_size * conv1.kernel_size * conv1.in_channels)); std::uniform_real_distribution<> dconv1(-wconv1, wconv1); float wconv2 = sqrt(3.0f / (conv2.kernel_size * conv2.kernel_size * conv2.in_channels)); std::uniform_real_distribution<> dconv2(-wconv2, wconv2); float wfc1 = sqrt(3.0f / (fc1.inputs * fc1.outputs)); std::uniform_real_distribution<> dfc1(-wfc1, wfc1); float wfc2 = sqrt(3.0f / (fc2.inputs * fc2.outputs)); std::uniform_real_distribution<> dfc2(-wfc2, wfc2); // Randomize network for (auto&& iter : conv1.pconv) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv1.pbias) iter = static_cast<float>(dconv1(gen)); for (auto&& iter : conv2.pconv) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : conv2.pbias) iter = static_cast<float>(dconv2(gen)); for (auto&& iter : fc1.pneurons) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc1.pbias) iter = static_cast<float>(dfc1(gen)); for (auto&& iter : fc2.pneurons) iter = static_cast<float>(dfc2(gen)); for (auto&& iter : fc2.pbias) iter = static_cast<float>(dfc2(gen)); } ///////////////////////////////////////////////////////////////////////////// // Create GPU data structures // Forward propagation data float *d_data, *d_labels, *d_conv1, *d_pool1, *d_conv2, *d_pool2, *d_fc1, *d_fc1relu, *d_fc2, *d_fc2smax; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(cudaMalloc(&d_data, sizeof(float) * context.m_batchSize * channels * height * width)); checkCudaErrors(cudaMalloc(&d_labels, sizeof(float) * context.m_batchSize * 1 * 1 * 1)); checkCudaErrors(cudaMalloc(&d_conv1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(cudaMalloc(&d_pool1, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(cudaMalloc(&d_conv2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(cudaMalloc(&d_pool2, sizeof(float) * context.m_batchSize * conv2.out_channels * (conv2.out_height / pool2.stride) * (conv2.out_width / pool2.stride))); checkCudaErrors(cudaMalloc(&d_fc1, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(cudaMalloc(&d_fc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(cudaMalloc(&d_fc2, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(cudaMalloc(&d_fc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); // Network parameters float *d_pconv1, *d_pconv1bias, *d_pconv2, *d_pconv2bias; float *d_pfc1, *d_pfc1bias, *d_pfc2, *d_pfc2bias; checkCudaErrors(cudaMalloc(&d_pconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(cudaMalloc(&d_pconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(cudaMalloc(&d_pconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(cudaMalloc(&d_pconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(cudaMalloc(&d_pfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(cudaMalloc(&d_pfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(cudaMalloc(&d_pfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(cudaMalloc(&d_pfc2bias, sizeof(float) * fc2.pbias.size())); // Network parameter gradients float *d_gconv1, *d_gconv1bias, *d_gconv2, *d_gconv2bias; float *d_gfc1, *d_gfc1bias, *d_gfc2, *d_gfc2bias; checkCudaErrors(cudaMalloc(&d_gconv1, sizeof(float) * conv1.pconv.size())); checkCudaErrors(cudaMalloc(&d_gconv1bias, sizeof(float) * conv1.pbias.size())); checkCudaErrors(cudaMalloc(&d_gconv2, sizeof(float) * conv2.pconv.size())); checkCudaErrors(cudaMalloc(&d_gconv2bias, sizeof(float) * conv2.pbias.size())); checkCudaErrors(cudaMalloc(&d_gfc1, sizeof(float) * fc1.pneurons.size())); checkCudaErrors(cudaMalloc(&d_gfc1bias, sizeof(float) * fc1.pbias.size())); checkCudaErrors(cudaMalloc(&d_gfc2, sizeof(float) * fc2.pneurons.size())); checkCudaErrors(cudaMalloc(&d_gfc2bias, sizeof(float) * fc2.pbias.size())); // Differentials w.r.t. data float *d_dpool1, *d_dpool2, *d_dconv2, *d_dfc1, *d_dfc1relu, *d_dfc2, *d_dfc2smax, *d_dlossdata; // Buffer | Element | N | C | H | W //----------------------------------------------------------------------------------------------------------------------------------------- checkCudaErrors(cudaMalloc(&d_dpool1, sizeof(float) * context.m_batchSize * conv1.out_channels * conv1.out_height * conv1.out_width)); checkCudaErrors(cudaMalloc(&d_dpool2, sizeof(float) * context.m_batchSize * conv2.out_channels * conv2.out_height * conv2.out_width)); checkCudaErrors(cudaMalloc(&d_dconv2, sizeof(float) * context.m_batchSize * conv1.out_channels * (conv1.out_height / pool1.stride) * (conv1.out_width / pool1.stride))); checkCudaErrors(cudaMalloc(&d_dfc1, sizeof(float) * context.m_batchSize * fc1.inputs)); checkCudaErrors(cudaMalloc(&d_dfc1relu, sizeof(float) * context.m_batchSize * fc1.outputs)); checkCudaErrors(cudaMalloc(&d_dfc2, sizeof(float) * context.m_batchSize * fc2.inputs)); checkCudaErrors(cudaMalloc(&d_dfc2smax, sizeof(float) * context.m_batchSize * fc2.outputs)); checkCudaErrors(cudaMalloc(&d_dlossdata,sizeof(float) * context.m_batchSize * fc2.outputs)); // Temporary buffers and workspaces float *d_onevec; void *d_cudnn_workspace = nullptr; checkCudaErrors(cudaMalloc(&d_onevec, sizeof(float)* context.m_batchSize)); if (context.m_workspaceSize > 0) checkCudaErrors(cudaMalloc(&d_cudnn_workspace, context.m_workspaceSize)); ///////////////////////////////////////////////////////////////////////////// // Copy initial network to device checkCudaErrors(cudaMemcpyAsync(d_pconv1, &conv1.pconv[0], sizeof(float) * conv1.pconv.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv1bias, &conv1.pbias[0], sizeof(float) * conv1.pbias.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv2, &conv2.pconv[0], sizeof(float) * conv2.pconv.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pconv2bias, &conv2.pbias[0], sizeof(float) * conv2.pbias.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc1, &fc1.pneurons[0], sizeof(float) * fc1.pneurons.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc1bias, &fc1.pbias[0], sizeof(float) * fc1.pbias.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc2, &fc2.pneurons[0], sizeof(float) * fc2.pneurons.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_pfc2bias, &fc2.pbias[0], sizeof(float) * fc2.pbias.size(), cudaMemcpyHostToDevice)); // Fill one-vector with ones FillOnes<<<RoundUp(context.m_batchSize, BW), BW>>>(d_onevec, context.m_batchSize); printf("Preparing dataset\n"); // Normalize training set to be in [0,1] std::vector<float> train_images_float(train_images.size()), train_labels_float(train_size); for (size_t i = 0; i < train_size * channels * width * height; ++i) train_images_float[i] = (float)train_images[i] / 255.0f; for (size_t i = 0; i < train_size; ++i) train_labels_float[i] = (float)train_labels[i]; printf("Training...\n"); // Use SGD to train the network checkCudaErrors(cudaDeviceSynchronize()); auto t1 = std::chrono::high_resolution_clock::now(); for (int iter = 0; iter < FLAGS_iterations; ++iter) { // Train int imageid = iter % (train_size / context.m_batchSize); // Prepare current batch on device checkCudaErrors(cudaMemcpyAsync(d_data, &train_images_float[imageid * context.m_batchSize * width*height*channels], sizeof(float) * context.m_batchSize * channels * width * height, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyAsync(d_labels, &train_labels_float[imageid * context.m_batchSize], sizeof(float) * context.m_batchSize, cudaMemcpyHostToDevice)); // Forward propagation context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Backward propagation context.Backpropagation(conv1, pool1, conv2, pool2, d_data, d_labels, d_conv1, d_pool1, d_conv2, d_pool2, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_dlossdata, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_dpool1, d_gconv2, d_gconv2bias, d_dconv2, d_dpool2, d_gfc1, d_gfc1bias, d_dfc1, d_dfc1relu, d_gfc2, d_gfc2bias, d_dfc2, d_cudnn_workspace, d_onevec); // Compute learning rate float learningRate = static_cast<float>(FLAGS_learning_rate * pow((1.0 + FLAGS_lr_gamma * iter), (-FLAGS_lr_power))); // Update weights context.UpdateWeights(learningRate, conv1, conv2, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_gconv1, d_gconv1bias, d_gconv2, d_gconv2bias, d_gfc1, d_gfc1bias, d_gfc2, d_gfc2bias); } checkCudaErrors(cudaDeviceSynchronize()); auto t2 = std::chrono::high_resolution_clock::now(); printf("Iteration time: %f ms\n", std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count() / 1000.0f / FLAGS_iterations); if (FLAGS_save_data) { // Copy trained weights from GPU to CPU checkCudaErrors(cudaMemcpy(&conv1.pconv[0], d_pconv1, sizeof(float) * conv1.pconv.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv1.pbias[0], d_pconv1bias, sizeof(float) * conv1.pbias.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv2.pconv[0], d_pconv2, sizeof(float) * conv2.pconv.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&conv2.pbias[0], d_pconv2bias, sizeof(float) * conv2.pbias.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc1.pneurons[0], d_pfc1, sizeof(float) * fc1.pneurons.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc1.pbias[0], d_pfc1bias, sizeof(float) * fc1.pbias.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc2.pneurons[0], d_pfc2, sizeof(float) * fc2.pneurons.size(), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(&fc2.pbias[0], d_pfc2bias, sizeof(float) * fc2.pbias.size(), cudaMemcpyDeviceToHost)); // Now save data printf("Saving data to file\n"); conv1.ToFile("conv1"); conv2.ToFile("conv2"); fc1.ToFile("ip1"); fc2.ToFile("ip2"); } float classification_error = 1.0f; int classifications = FLAGS_classify; if (classifications < 0) classifications = (int)test_size; // Test the resulting neural network's classification if (classifications > 0) { // Initialize a TrainingContext structure for testing (different batch size) TrainingContext test_context(FLAGS_gpu, 1, conv1, pool1, conv2, pool2, fc1, fc2); // Ensure correct workspaceSize is allocated for testing if (context.m_workspaceSize < test_context.m_workspaceSize) { checkCudaErrors(cudaFree(d_cudnn_workspace)); checkCudaErrors(cudaMalloc(&d_cudnn_workspace, test_context.m_workspaceSize)); } int num_errors = 0; for (int i = 0; i < classifications; ++i) { std::vector<float> data(width * height); // Normalize image to be in [0,1] for (int j = 0; j < width * height; ++j) data[j] = (float)test_images[i * width*height*channels + j] / 255.0f; checkCudaErrors(cudaMemcpyAsync(d_data, &data[0], sizeof(float) * width * height, cudaMemcpyHostToDevice)); // Forward propagate test image test_context.ForwardPropagation(d_data, d_conv1, d_pool1, d_conv2, d_pool2, d_fc1, d_fc1relu, d_fc2, d_fc2smax, d_pconv1, d_pconv1bias, d_pconv2, d_pconv2bias, d_pfc1, d_pfc1bias, d_pfc2, d_pfc2bias, d_cudnn_workspace, d_onevec); // Perform classification std::vector<float> class_vec(10); // Copy back result checkCudaErrors(cudaMemcpy(&class_vec[0], d_fc2smax, sizeof(float) * 10, cudaMemcpyDeviceToHost)); // Determine classification according to maximal response int chosen = 0; for (int id = 1; id < 10; ++id) { if (class_vec[chosen] < class_vec[id]) chosen = id; } if (chosen != test_labels[i]) ++num_errors; } classification_error = (float)num_errors / (float)classifications; printf("Classification result: %.2f%% error (used %d images)\n", classification_error * 100.0f, (int)classifications); } // Free data structures checkCudaErrors(cudaFree(d_data)); checkCudaErrors(cudaFree(d_conv1)); checkCudaErrors(cudaFree(d_pool1)); checkCudaErrors(cudaFree(d_conv2)); checkCudaErrors(cudaFree(d_pool2)); checkCudaErrors(cudaFree(d_fc1)); checkCudaErrors(cudaFree(d_fc2)); checkCudaErrors(cudaFree(d_pconv1)); checkCudaErrors(cudaFree(d_pconv1bias)); checkCudaErrors(cudaFree(d_pconv2)); checkCudaErrors(cudaFree(d_pconv2bias)); checkCudaErrors(cudaFree(d_pfc1)); checkCudaErrors(cudaFree(d_pfc1bias)); checkCudaErrors(cudaFree(d_pfc2)); checkCudaErrors(cudaFree(d_pfc2bias)); checkCudaErrors(cudaFree(d_gconv1)); checkCudaErrors(cudaFree(d_gconv1bias)); checkCudaErrors(cudaFree(d_gconv2)); checkCudaErrors(cudaFree(d_gconv2bias)); checkCudaErrors(cudaFree(d_gfc1)); checkCudaErrors(cudaFree(d_gfc1bias)); checkCudaErrors(cudaFree(d_dfc1)); checkCudaErrors(cudaFree(d_gfc2)); checkCudaErrors(cudaFree(d_gfc2bias)); checkCudaErrors(cudaFree(d_dfc2)); checkCudaErrors(cudaFree(d_dpool1)); checkCudaErrors(cudaFree(d_dconv2)); checkCudaErrors(cudaFree(d_dpool2)); checkCudaErrors(cudaFree(d_labels)); checkCudaErrors(cudaFree(d_dlossdata)); checkCudaErrors(cudaFree(d_onevec)); if (d_cudnn_workspace != nullptr) checkCudaErrors(cudaFree(d_cudnn_workspace)); return 0; }
98e6f67992f1d5458f86bd025c83c8d17fb34d13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } }
98e6f67992f1d5458f86bd025c83c8d17fb34d13.cu
#include "includes.h" __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) { int k; int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= groups) return; sum[i] = 0; for(k = 0; k < n; ++k){ sum[i] += x[k*groups + i]; } }
4bf9ed8003e9b35661c09218188909f422fb6695.hip
// !!! This is a file automatically generated by hipify!!! // From PTT // (, ) #include <hip/hip_runtime.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> //////////////////////////////////////// // (GPU) // __global__ // void //////////////////////////////////////// __global__ void gpu_add(float* c, float* a, float* b, int n){ for (int k=threadIdx.x; k<n; k+=blockDim.x) { c[k] = a[k]+b[k]; } } //////////////////////////////////////// // (Host) //////////////////////////////////////// void host_add(float* c, float* a, float* b, int n){ for (int k=0; k<n; k++) { c[k] = a[k]+b[k]; } } //////////////////////////////////////// // //////////////////////////////////////// double diff(float* a, float* b, int n){ double s=0, r=0; for (int k=0; k<n; k++) { double w=a[k]-b[k]; s += w*w; r += a[k]*a[k]; } return sqrt(s/r); // } //////////////////////////////////////// // (:) //////////////////////////////////////// double ms_time(){ return (double)clock()/CLOCKS_PER_SEC*1000.0; } //////////////////////////////////////// // //////////////////////////////////////// int main(){ // int n = 1024*1024; int size = n*sizeof(float); // int grid = 1; // gridDim () int block = 512; // blockDim () // () int loop=100; // float *a,*b,*c,*d; a = (float*)malloc(size); b = (float*)malloc(size); c = (float*)malloc(size); d = (float*)malloc(size); // srand(time(0)); for (int k=0; k<n; k++) { a[k] = (float)rand()/RAND_MAX*2-1; b[k] = (float)rand()/RAND_MAX*2-1; } // float *ga,*gb,*gc; hipMalloc((void**)&ga, size); hipMalloc((void**)&gb, size); hipMalloc((void**)&gc, size); // a,b hipMemcpy(ga, a, size, hipMemcpyHostToDevice); hipMemcpy(gb, b, size, hipMemcpyHostToDevice); //////////////////////////////////////// // part 1 : //////////////////////////////////////// // (GPU) hipLaunchKernelGGL(( gpu_add), dim3(grid), dim3(block), 0, 0, gc, ga, gb, n); // (Host) host_add(d, a, b, n); // hipMemcpy(c, gc, size, hipMemcpyDeviceToHost); // printf("vector add N(%d) elements, diff = %g\n", n, diff(c,d,n)); //////////////////////////////////////// // part 2 : //////////////////////////////////////// // GPU double gpu_dt = ms_time(); for (int w=0; w<loop; w++) { hipLaunchKernelGGL(( gpu_add), dim3(grid), dim3(block), 0, 0, gc, ga, gb, n); hipDeviceSynchronize(); // } gpu_dt = (ms_time()-gpu_dt)/loop; // // Host double host_dt = ms_time(); for (int w=0; w<loop; w++) { host_add(d, a, b, n); } host_dt = (ms_time()-host_dt)/loop; // // printf("host time: %g ms\n", host_dt); printf("gpu time: %g ms\n", gpu_dt); // free(a); free(b); free(c); free(d); // hipFree(ga); hipFree(gb); hipFree(gc); return 0; } // vector add N(1048576) elements, diff = 0 // host time: 3.41618 ms // gpu time: 1.32161 ms
4bf9ed8003e9b35661c09218188909f422fb6695.cu
// From PTT // (單一區塊, 多執行緒) #include <cuda.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> //////////////////////////////////////// // 向量加法的運算核心 (GPU) // 函式前加 __global__ 即為核心 // 核心只傳回 void //////////////////////////////////////// __global__ void gpu_add(float* c, float* a, float* b, int n){ for (int k=threadIdx.x; k<n; k+=blockDim.x) { c[k] = a[k]+b[k]; } } //////////////////////////////////////// // 向量加法的一般函式 (Host) //////////////////////////////////////// void host_add(float* c, float* a, float* b, int n){ for (int k=0; k<n; k++) { c[k] = a[k]+b[k]; } } //////////////////////////////////////// // 計算誤差用的函式 //////////////////////////////////////// double diff(float* a, float* b, int n){ double s=0, r=0; for (int k=0; k<n; k++) { double w=a[k]-b[k]; s += w*w; r += a[k]*a[k]; } return sqrt(s/r); //相對誤差 } //////////////////////////////////////// // 時間函數 (傳回單位:千分之一秒) //////////////////////////////////////// double ms_time(){ return (double)clock()/CLOCKS_PER_SEC*1000.0; } //////////////////////////////////////// // 主程式 //////////////////////////////////////// int main(){ // 設定向量大小 int n = 1024*1024; int size = n*sizeof(float); // 網格與區塊設定 int grid = 1; // gridDim (每個網格具有的區塊數) int block = 512; // blockDim (每個區塊具有的執行緒數) // 設定呼叫次數 (測量平均效能) int loop=100; // 配置主機記憶體 float *a,*b,*c,*d; a = (float*)malloc(size); b = (float*)malloc(size); c = (float*)malloc(size); d = (float*)malloc(size); // 設定亂數的輸入向量 srand(time(0)); for (int k=0; k<n; k++) { a[k] = (float)rand()/RAND_MAX*2-1; b[k] = (float)rand()/RAND_MAX*2-1; } // 配置顯示卡記憶體 float *ga,*gb,*gc; cudaMalloc((void**)&ga, size); cudaMalloc((void**)&gb, size); cudaMalloc((void**)&gc, size); // 載入向量 a,b 到顯示卡記憶體中 cudaMemcpy(ga, a, size, cudaMemcpyHostToDevice); cudaMemcpy(gb, b, size, cudaMemcpyHostToDevice); //////////////////////////////////////// // part 1 : 測量精確度 //////////////////////////////////////// // 呼叫核心來運算 (GPU) gpu_add<<<grid, block>>>(gc, ga, gb, n); // 呼叫一般函數來運算 (Host) host_add(d, a, b, n); // 把計算結果存回主機 cudaMemcpy(c, gc, size, cudaMemcpyDeviceToHost); // 比較兩者差異 printf("vector add N(%d) elements, diff = %g\n", n, diff(c,d,n)); //////////////////////////////////////// // part 2 : 測量效能 //////////////////////////////////////// // 測量 GPU 核心效能 double gpu_dt = ms_time(); for (int w=0; w<loop; w++) { gpu_add<<<grid, block>>>(gc, ga, gb, n); cudaThreadSynchronize(); //避免核心執行不完全 } gpu_dt = (ms_time()-gpu_dt)/loop; //平均時間 // 測量 Host 函數效能 double host_dt = ms_time(); for (int w=0; w<loop; w++) { host_add(d, a, b, n); } host_dt = (ms_time()-host_dt)/loop; //平均時間 // 輸出平均執行時間 printf("host time: %g ms\n", host_dt); printf("gpu time: %g ms\n", gpu_dt); // 釋放主機記憶體 free(a); free(b); free(c); free(d); // 釋放顯示卡記憶體 cudaFree(ga); cudaFree(gb); cudaFree(gc); return 0; } // vector add N(1048576) elements, diff = 0 // host time: 3.41618 ms // gpu time: 1.32161 ms
288e26c827da19a8d1249faf248300e45a341112.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel(float *array, int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { array[index] += 1.f; if (index == 0) printf("### Array size: %d\n", size); } }
288e26c827da19a8d1249faf248300e45a341112.cu
#include "includes.h" __global__ void kernel(float *array, int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { array[index] += 1.f; if (index == 0) printf("### Array size: %d\n", size); } }
454673db31159599ea4001c2f62d8b62a989167a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nbody.h" #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> // drand48 #include <sys/time.h> enum Initializer initializer = RANDOM_INITIALIZER; #ifdef DUMP FILE *output; #endif __global__ void UpdateParticle(const int nParticles, struct ParticleArray *const particle, const float dt) { extern __shared__ float3 local_data[]; int i; int stride = blockDim.x * gridDim.x; int block_id; int block_tid; // Loop over particles that experience force for (i = blockIdx.x * blockDim.x + threadIdx.x; i < nParticles; i += stride) { // Components of the gravity force on particle i float Fx = 0, Fy = 0, Fz = 0; float3 position; position.x = particle->x[i]; position.y = particle->y[i]; position.z = particle->z[i]; for (block_id = 0; block_id * blockDim.x < nParticles; block_id++) { // Read global memory float3 other; other.x = particle->x[block_id * blockDim.x + threadIdx.x]; other.y = particle->y[block_id * blockDim.x + threadIdx.x]; other.z = particle->z[block_id * blockDim.x + threadIdx.x]; // Write shared memory local_data[threadIdx.x] = other; __syncthreads(); // Loop over particles that exert force for (block_tid = 0; block_tid < blockDim.x; block_tid++) { if (i != block_id * blockDim.x + block_tid) { // Avoid singularity and interaction with self const float softening = 1e-20; // Newton's law of universal gravity const float dx = local_data[block_tid].x - position.x; const float dy = local_data[block_tid].y - position.y; const float dz = local_data[block_tid].z - position.z; const float drSquared = dx * dx + dy * dy + dz * dz + softening; #ifdef OPTIMIZE_POW const float drPower32 = sqrtf(drSquared * drSquared * drSquared); #else const float drPower32 = pow(drSquared, 3.0 / 2.0); #endif // Calculate the net force Fx += dx / drPower32; Fy += dy / drPower32; Fz += dz / drPower32; } } __syncthreads(); } // Accelerate particles in response to the gravitational force particle->vx[i] += dt * Fx; particle->vy[i] += dt * Fy; particle->vz[i] += dt * Fz; } } void MoveParticles(const int nParticles, struct ParticleArray *const particle, const float dt) { struct ParticleArray gpu_particle_tmp; hipMalloc(&gpu_particle_tmp.x, sizeof(float) * nParticles); hipMalloc(&gpu_particle_tmp.y, sizeof(float) * nParticles); hipMalloc(&gpu_particle_tmp.z, sizeof(float) * nParticles); hipMalloc(&gpu_particle_tmp.vx, sizeof(float) * nParticles); hipMalloc(&gpu_particle_tmp.vy, sizeof(float) * nParticles); hipMalloc(&gpu_particle_tmp.vz, sizeof(float) * nParticles); struct ParticleArray *gpu_particle; hipMalloc(&gpu_particle, sizeof(struct ParticleArray)); hipMemcpy(gpu_particle, &gpu_particle_tmp, sizeof(struct ParticleArray), hipMemcpyHostToDevice); hipMemcpy(gpu_particle_tmp.x, particle->x, sizeof(float) * nParticles, hipMemcpyHostToDevice); hipMemcpy(gpu_particle_tmp.y, particle->y, sizeof(float) * nParticles, hipMemcpyHostToDevice); hipMemcpy(gpu_particle_tmp.z, particle->z, sizeof(float) * nParticles, hipMemcpyHostToDevice); hipMemcpy(gpu_particle_tmp.vx, particle->vx, sizeof(float) * nParticles, hipMemcpyHostToDevice); hipMemcpy(gpu_particle_tmp.vy, particle->vy, sizeof(float) * nParticles, hipMemcpyHostToDevice); hipMemcpy(gpu_particle_tmp.vz, particle->vz, sizeof(float) * nParticles, hipMemcpyHostToDevice); hipLaunchKernelGGL(( UpdateParticle), dim3((nParticles + 511) / 512), dim3(512), sizeof(float3) * 512, 0, nParticles, gpu_particle, dt); hipMemcpy(particle->x, gpu_particle_tmp.x, sizeof(float) * nParticles, hipMemcpyDeviceToHost); hipMemcpy(particle->y, gpu_particle_tmp.y, sizeof(float) * nParticles, hipMemcpyDeviceToHost); hipMemcpy(particle->z, gpu_particle_tmp.z, sizeof(float) * nParticles, hipMemcpyDeviceToHost); hipMemcpy(particle->vx, gpu_particle_tmp.vx, sizeof(float) * nParticles, hipMemcpyDeviceToHost); hipMemcpy(particle->vy, gpu_particle_tmp.vy, sizeof(float) * nParticles, hipMemcpyDeviceToHost); hipMemcpy(particle->vz, gpu_particle_tmp.vz, sizeof(float) * nParticles, hipMemcpyDeviceToHost); // Move particles according to their velocities // O(N) work, so using a serial loop for (int i = 0; i < nParticles; i++) { particle->x[i] += particle->vx[i] * dt; particle->y[i] += particle->vy[i] * dt; particle->z[i] += particle->vz[i] * dt; } } void dump(int iter, int nParticles, struct ParticleArray *particle) { int i; for (i = 0; i < nParticles; i++) { fwrite(&particle->x[i], sizeof(float), 1, output); fwrite(&particle->y[i], sizeof(float), 1, output); fwrite(&particle->z[i], sizeof(float), 1, output); fwrite(&particle->vx[i], sizeof(float), 1, output); fwrite(&particle->vy[i], sizeof(float), 1, output); fwrite(&particle->vz[i], sizeof(float), 1, output); } } int main(const int argc, const char **argv) { // Problem size and other parameters const int nParticles = (argc > 1 ? atoi(argv[1]) : 16384); // Duration of test const int nSteps = (argc > 2) ? atoi(argv[2]) : 10; // Particle propagation time step const float dt = 0.0005f; struct ParticleArray *particle = (struct ParticleArray *)malloc(sizeof(struct ParticleArray)); particle->x = (float *)malloc(nParticles * sizeof(float)); particle->y = (float *)malloc(nParticles * sizeof(float)); particle->z = (float *)malloc(nParticles * sizeof(float)); particle->vx = (float *)malloc(nParticles * sizeof(float)); particle->vy = (float *)malloc(nParticles * sizeof(float)); particle->vz = (float *)malloc(nParticles * sizeof(float)); // Initialize random number generator and particles srand48(0x2020); #ifdef DUMP char filename[64]; snprintf(filename, 64, "data/nbody/%s.nbody", VERSION); output = fopen(filename, "wb"); fwrite(&initializer, sizeof(enum Initializer), 1, output); fwrite(&nParticles, sizeof(int), 1, output); fwrite(&nSteps, sizeof(int), 1, output); #endif int i; for (i = 0; i < nParticles; i++) { particle->x[i] = 2.0 * drand48() - 1.0; particle->y[i] = 2.0 * drand48() - 1.0; particle->z[i] = 2.0 * drand48() - 1.0; particle->vx[i] = 2.0 * drand48() - 1.0; particle->vy[i] = 2.0 * drand48() - 1.0; particle->vz[i] = 2.0 * drand48() - 1.0; } // Perform benchmark printf("\nPropagating %d particles using 1 thread...\n\n", nParticles); double rate = 0, dRate = 0; // Benchmarking data const int skipSteps = 3; // Skip first iteration (warm-up) printf("\033[1m%5s %10s %10s %8s\033[0m\n", "Step", "Time, s", "Interact/s", "GFLOP/s"); fflush(stdout); for (int step = 1; step <= nSteps; step++) { struct timeval tv; gettimeofday(&tv, NULL); const double tStart = tv.tv_sec + tv.tv_usec / 1000000.0; // Start timing MoveParticles(nParticles, particle, dt); gettimeofday(&tv, NULL); const double tEnd = tv.tv_sec + tv.tv_usec / 1000000.0; // End timing const float HztoInts = ((float)nParticles) * ((float)(nParticles - 1)); const float HztoGFLOPs = 20.0 * 1e-9 * ((float)(nParticles)) * ((float)(nParticles - 1)); if (step > skipSteps) { // Collect statistics rate += HztoGFLOPs / (tEnd - tStart); dRate += HztoGFLOPs * HztoGFLOPs / ((tEnd - tStart) * (tEnd - tStart)); } printf("%5d %10.3e %10.3e %8.1f %s\n", step, (tEnd - tStart), HztoInts / (tEnd - tStart), HztoGFLOPs / (tEnd - tStart), (step <= skipSteps ? "*" : "")); fflush(stdout); #ifdef DUMP dump(step, nParticles, particle); #endif } rate /= (double)(nSteps - skipSteps); dRate = sqrt(dRate / (double)(nSteps - skipSteps) - rate * rate); printf("-----------------------------------------------------\n"); printf("\033[1m%s %4s \033[42m%10.1f +- %.1f GFLOP/s\033[0m\n", "Average performance:", "", rate, dRate); printf("-----------------------------------------------------\n"); printf("* - warm-up, not included in average\n\n"); free(particle); #ifdef DUMP fclose(output); #endif return EXIT_SUCCESS; }
454673db31159599ea4001c2f62d8b62a989167a.cu
#include "nbody.h" #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> // drand48 #include <sys/time.h> enum Initializer initializer = RANDOM_INITIALIZER; #ifdef DUMP FILE *output; #endif __global__ void UpdateParticle(const int nParticles, struct ParticleArray *const particle, const float dt) { extern __shared__ float3 local_data[]; int i; int stride = blockDim.x * gridDim.x; int block_id; int block_tid; // Loop over particles that experience force for (i = blockIdx.x * blockDim.x + threadIdx.x; i < nParticles; i += stride) { // Components of the gravity force on particle i float Fx = 0, Fy = 0, Fz = 0; float3 position; position.x = particle->x[i]; position.y = particle->y[i]; position.z = particle->z[i]; for (block_id = 0; block_id * blockDim.x < nParticles; block_id++) { // Read global memory float3 other; other.x = particle->x[block_id * blockDim.x + threadIdx.x]; other.y = particle->y[block_id * blockDim.x + threadIdx.x]; other.z = particle->z[block_id * blockDim.x + threadIdx.x]; // Write shared memory local_data[threadIdx.x] = other; __syncthreads(); // Loop over particles that exert force for (block_tid = 0; block_tid < blockDim.x; block_tid++) { if (i != block_id * blockDim.x + block_tid) { // Avoid singularity and interaction with self const float softening = 1e-20; // Newton's law of universal gravity const float dx = local_data[block_tid].x - position.x; const float dy = local_data[block_tid].y - position.y; const float dz = local_data[block_tid].z - position.z; const float drSquared = dx * dx + dy * dy + dz * dz + softening; #ifdef OPTIMIZE_POW const float drPower32 = sqrtf(drSquared * drSquared * drSquared); #else const float drPower32 = pow(drSquared, 3.0 / 2.0); #endif // Calculate the net force Fx += dx / drPower32; Fy += dy / drPower32; Fz += dz / drPower32; } } __syncthreads(); } // Accelerate particles in response to the gravitational force particle->vx[i] += dt * Fx; particle->vy[i] += dt * Fy; particle->vz[i] += dt * Fz; } } void MoveParticles(const int nParticles, struct ParticleArray *const particle, const float dt) { struct ParticleArray gpu_particle_tmp; cudaMalloc(&gpu_particle_tmp.x, sizeof(float) * nParticles); cudaMalloc(&gpu_particle_tmp.y, sizeof(float) * nParticles); cudaMalloc(&gpu_particle_tmp.z, sizeof(float) * nParticles); cudaMalloc(&gpu_particle_tmp.vx, sizeof(float) * nParticles); cudaMalloc(&gpu_particle_tmp.vy, sizeof(float) * nParticles); cudaMalloc(&gpu_particle_tmp.vz, sizeof(float) * nParticles); struct ParticleArray *gpu_particle; cudaMalloc(&gpu_particle, sizeof(struct ParticleArray)); cudaMemcpy(gpu_particle, &gpu_particle_tmp, sizeof(struct ParticleArray), cudaMemcpyHostToDevice); cudaMemcpy(gpu_particle_tmp.x, particle->x, sizeof(float) * nParticles, cudaMemcpyHostToDevice); cudaMemcpy(gpu_particle_tmp.y, particle->y, sizeof(float) * nParticles, cudaMemcpyHostToDevice); cudaMemcpy(gpu_particle_tmp.z, particle->z, sizeof(float) * nParticles, cudaMemcpyHostToDevice); cudaMemcpy(gpu_particle_tmp.vx, particle->vx, sizeof(float) * nParticles, cudaMemcpyHostToDevice); cudaMemcpy(gpu_particle_tmp.vy, particle->vy, sizeof(float) * nParticles, cudaMemcpyHostToDevice); cudaMemcpy(gpu_particle_tmp.vz, particle->vz, sizeof(float) * nParticles, cudaMemcpyHostToDevice); UpdateParticle<<<(nParticles + 511) / 512, 512, sizeof(float3) * 512>>>( nParticles, gpu_particle, dt); cudaMemcpy(particle->x, gpu_particle_tmp.x, sizeof(float) * nParticles, cudaMemcpyDeviceToHost); cudaMemcpy(particle->y, gpu_particle_tmp.y, sizeof(float) * nParticles, cudaMemcpyDeviceToHost); cudaMemcpy(particle->z, gpu_particle_tmp.z, sizeof(float) * nParticles, cudaMemcpyDeviceToHost); cudaMemcpy(particle->vx, gpu_particle_tmp.vx, sizeof(float) * nParticles, cudaMemcpyDeviceToHost); cudaMemcpy(particle->vy, gpu_particle_tmp.vy, sizeof(float) * nParticles, cudaMemcpyDeviceToHost); cudaMemcpy(particle->vz, gpu_particle_tmp.vz, sizeof(float) * nParticles, cudaMemcpyDeviceToHost); // Move particles according to their velocities // O(N) work, so using a serial loop for (int i = 0; i < nParticles; i++) { particle->x[i] += particle->vx[i] * dt; particle->y[i] += particle->vy[i] * dt; particle->z[i] += particle->vz[i] * dt; } } void dump(int iter, int nParticles, struct ParticleArray *particle) { int i; for (i = 0; i < nParticles; i++) { fwrite(&particle->x[i], sizeof(float), 1, output); fwrite(&particle->y[i], sizeof(float), 1, output); fwrite(&particle->z[i], sizeof(float), 1, output); fwrite(&particle->vx[i], sizeof(float), 1, output); fwrite(&particle->vy[i], sizeof(float), 1, output); fwrite(&particle->vz[i], sizeof(float), 1, output); } } int main(const int argc, const char **argv) { // Problem size and other parameters const int nParticles = (argc > 1 ? atoi(argv[1]) : 16384); // Duration of test const int nSteps = (argc > 2) ? atoi(argv[2]) : 10; // Particle propagation time step const float dt = 0.0005f; struct ParticleArray *particle = (struct ParticleArray *)malloc(sizeof(struct ParticleArray)); particle->x = (float *)malloc(nParticles * sizeof(float)); particle->y = (float *)malloc(nParticles * sizeof(float)); particle->z = (float *)malloc(nParticles * sizeof(float)); particle->vx = (float *)malloc(nParticles * sizeof(float)); particle->vy = (float *)malloc(nParticles * sizeof(float)); particle->vz = (float *)malloc(nParticles * sizeof(float)); // Initialize random number generator and particles srand48(0x2020); #ifdef DUMP char filename[64]; snprintf(filename, 64, "data/nbody/%s.nbody", VERSION); output = fopen(filename, "wb"); fwrite(&initializer, sizeof(enum Initializer), 1, output); fwrite(&nParticles, sizeof(int), 1, output); fwrite(&nSteps, sizeof(int), 1, output); #endif int i; for (i = 0; i < nParticles; i++) { particle->x[i] = 2.0 * drand48() - 1.0; particle->y[i] = 2.0 * drand48() - 1.0; particle->z[i] = 2.0 * drand48() - 1.0; particle->vx[i] = 2.0 * drand48() - 1.0; particle->vy[i] = 2.0 * drand48() - 1.0; particle->vz[i] = 2.0 * drand48() - 1.0; } // Perform benchmark printf("\nPropagating %d particles using 1 thread...\n\n", nParticles); double rate = 0, dRate = 0; // Benchmarking data const int skipSteps = 3; // Skip first iteration (warm-up) printf("\033[1m%5s %10s %10s %8s\033[0m\n", "Step", "Time, s", "Interact/s", "GFLOP/s"); fflush(stdout); for (int step = 1; step <= nSteps; step++) { struct timeval tv; gettimeofday(&tv, NULL); const double tStart = tv.tv_sec + tv.tv_usec / 1000000.0; // Start timing MoveParticles(nParticles, particle, dt); gettimeofday(&tv, NULL); const double tEnd = tv.tv_sec + tv.tv_usec / 1000000.0; // End timing const float HztoInts = ((float)nParticles) * ((float)(nParticles - 1)); const float HztoGFLOPs = 20.0 * 1e-9 * ((float)(nParticles)) * ((float)(nParticles - 1)); if (step > skipSteps) { // Collect statistics rate += HztoGFLOPs / (tEnd - tStart); dRate += HztoGFLOPs * HztoGFLOPs / ((tEnd - tStart) * (tEnd - tStart)); } printf("%5d %10.3e %10.3e %8.1f %s\n", step, (tEnd - tStart), HztoInts / (tEnd - tStart), HztoGFLOPs / (tEnd - tStart), (step <= skipSteps ? "*" : "")); fflush(stdout); #ifdef DUMP dump(step, nParticles, particle); #endif } rate /= (double)(nSteps - skipSteps); dRate = sqrt(dRate / (double)(nSteps - skipSteps) - rate * rate); printf("-----------------------------------------------------\n"); printf("\033[1m%s %4s \033[42m%10.1f +- %.1f GFLOP/s\033[0m\n", "Average performance:", "", rate, dRate); printf("-----------------------------------------------------\n"); printf("* - warm-up, not included in average\n\n"); free(particle); #ifdef DUMP fclose(output); #endif return EXIT_SUCCESS; }
a28f4db34f27cc4e55e32247738f6379708a08a7.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2018 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ops.h" #include "cuda_helper.h" Model::Model(bool training) : isTraining(training) { //checkCUDA(hipSetDevice(0)); checkCUDNN(cudnnCreate(&dnn)); checkCUDA(hipblasCreate(&blas)); workSpaceSize = WORK_SPACE_SIZE; global_unique_id = 100; checkCUDA(hipMalloc(&workSpace, workSpaceSize)); //printf("handle.workSpace = 0x%p\n", workSpace); // create all descriptors checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&scaleTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc)); // allocate tensors for measuring performance checkCUDA(hipMalloc(&inputPtr, MAX_TENSOR_SIZE)); checkCUDA(hipMalloc(&biasPtr, MAX_TENSOR_SIZE)); checkCUDA(hipMalloc(&outputPtr, MAX_TENSOR_SIZE)); checkCUDA(hipMalloc(&filterPtr, MAX_TENSOR_SIZE)); // create tensors for batch norm checkCUDA(hipMalloc(&scalePtr, MAX_TENSOR_SIZE)); checkCUDA(hipMalloc(&runningMean, MAX_TENSOR_SIZE)); checkCUDA(hipMalloc(&runningVar, MAX_TENSOR_SIZE)); checkCUDA(hipMalloc(&saveMean, MAX_TENSOR_SIZE)); checkCUDA(hipMalloc(&saveVar, MAX_TENSOR_SIZE)); // create cuda events checkCUDA(hipEventCreate(&startEvent)); checkCUDA(hipEventCreate(&endEvent)); } float Model::measure_oplist_runtime(const std::vector<OpBase*>& opBaseList) { const int num_runs = 100; // warmup for (int i = 0; i < opBaseList.size(); i++) opBaseList[i]->forward(); // measure runtime checkCUDA(hipDeviceSynchronize()); checkCUDA(hipEventRecord(startEvent)); for (int times = 0; times < num_runs; times++) { for (int i = 0; i < opBaseList.size(); i++) opBaseList[i]->forward(); } checkCUDA(hipEventRecord(endEvent)); checkCUDA(hipEventSynchronize(endEvent)); float milliseconds; hipEventElapsedTime(&milliseconds, startEvent, endEvent); double runtime=milliseconds/num_runs; double power_time=measure_time/ (runtime); start_check_power(); double t=get_current_time(); for (int times = 0; times < power_time; times++) { for (int i = 0; i < opBaseList.size(); i++) opBaseList[i]->forward(); } t=get_current_time()-t; double power=finish_check_power(); double new_run_time=t/power_time; power=power_no_idle(power); printf("\n Measured power=%f energy=%f\n",power,new_run_time*power); return new_run_time; } void* Model::allocate_memory(size_t size) { void* ptr; checkCUDA(hipMalloc(&ptr, size)); return ptr; }
a28f4db34f27cc4e55e32247738f6379708a08a7.cu
/* Copyright 2018 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ops.h" #include "cuda_helper.h" Model::Model(bool training) : isTraining(training) { //checkCUDA(cudaSetDevice(0)); checkCUDNN(cudnnCreate(&dnn)); checkCUDA(cublasCreate(&blas)); workSpaceSize = WORK_SPACE_SIZE; global_unique_id = 100; checkCUDA(cudaMalloc(&workSpace, workSpaceSize)); //printf("handle.workSpace = 0x%p\n", workSpace); // create all descriptors checkCUDNN(cudnnCreateTensorDescriptor(&inputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&biasTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); checkCUDNN(cudnnCreateTensorDescriptor(&scaleTensor)); checkCUDNN(cudnnCreateFilterDescriptor(&filterDesc)); checkCUDNN(cudnnCreateConvolutionDescriptor(&convDesc)); checkCUDNN(cudnnCreatePoolingDescriptor(&poolDesc)); checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnCreateOpTensorDescriptor(&opDesc)); // allocate tensors for measuring performance checkCUDA(cudaMalloc(&inputPtr, MAX_TENSOR_SIZE)); checkCUDA(cudaMalloc(&biasPtr, MAX_TENSOR_SIZE)); checkCUDA(cudaMalloc(&outputPtr, MAX_TENSOR_SIZE)); checkCUDA(cudaMalloc(&filterPtr, MAX_TENSOR_SIZE)); // create tensors for batch norm checkCUDA(cudaMalloc(&scalePtr, MAX_TENSOR_SIZE)); checkCUDA(cudaMalloc(&runningMean, MAX_TENSOR_SIZE)); checkCUDA(cudaMalloc(&runningVar, MAX_TENSOR_SIZE)); checkCUDA(cudaMalloc(&saveMean, MAX_TENSOR_SIZE)); checkCUDA(cudaMalloc(&saveVar, MAX_TENSOR_SIZE)); // create cuda events checkCUDA(cudaEventCreate(&startEvent)); checkCUDA(cudaEventCreate(&endEvent)); } float Model::measure_oplist_runtime(const std::vector<OpBase*>& opBaseList) { const int num_runs = 100; // warmup for (int i = 0; i < opBaseList.size(); i++) opBaseList[i]->forward(); // measure runtime checkCUDA(cudaDeviceSynchronize()); checkCUDA(cudaEventRecord(startEvent)); for (int times = 0; times < num_runs; times++) { for (int i = 0; i < opBaseList.size(); i++) opBaseList[i]->forward(); } checkCUDA(cudaEventRecord(endEvent)); checkCUDA(cudaEventSynchronize(endEvent)); float milliseconds; cudaEventElapsedTime(&milliseconds, startEvent, endEvent); double runtime=milliseconds/num_runs; double power_time=measure_time/ (runtime); start_check_power(); double t=get_current_time(); for (int times = 0; times < power_time; times++) { for (int i = 0; i < opBaseList.size(); i++) opBaseList[i]->forward(); } t=get_current_time()-t; double power=finish_check_power(); double new_run_time=t/power_time; power=power_no_idle(power); printf("\n Measured power=%f energy=%f\n",power,new_run_time*power); return new_run_time; } void* Model::allocate_memory(size_t size) { void* ptr; checkCUDA(cudaMalloc(&ptr, size)); return ptr; }
6bbd14aa77b75627692322f3491f4510e3a1ed7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/LinearAlgebra.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <c10/core/Scalar.h> namespace at { namespace native { namespace { void addr_kernel_cuda(TensorIterator &iter, const Scalar& beta, const Scalar& alpha) { if (iter.dtype() == ScalarType::Bool) { using scalar_t = bool; auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); // when beta is false, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == false) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val && vec1_val && vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); } ); } return; } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, iter.dtype(), "addr_cuda", [&] { auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); scalar_t zero_val(0); // when beta==0, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == zero_val) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val * vec1_val * vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return beta_val * self_val + alpha_val * vec1_val * vec2_val; } ); } }); } template <int n_threads, int n_elems_per_thread, typename func_t> C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread) __global__ void _elementwise_kernel(int total_n_elems, func_t f) { constexpr int total_work_block = n_threads * n_elems_per_thread; int idx = total_work_block * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < n_elems_per_thread; ++i) { if (idx < total_n_elems) { f(idx); idx += n_threads; } } } template <int n_threads, int n_elems_per_thread, typename func_t> static void _launch_kernel(int total_n_elems, func_t f) { TORCH_INTERNAL_ASSERT( total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max() ); dim3 block(n_threads); constexpr int total_work_block = n_threads * n_elems_per_thread; dim3 grid((total_n_elems + total_work_block - 1) / total_work_block); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( _elementwise_kernel<n_threads, n_elems_per_thread, func_t>) , dim3(grid), dim3(block), 0, stream, total_n_elems, f); C10_HIP_KERNEL_LAUNCH_CHECK(); } void _unpack_pivots_internal_kernel( TensorIterator& iter, int64_t dim_size ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _unpack_pivots_internal_kernel(sub_iter, dim_size); } return; } auto offset_calculator = make_offset_calculator<2>(iter); char* unpacked_pivots_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const char* const __restrict__ pivots_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calculator.get(i); auto* unpacked_pivots_data = reinterpret_cast<int32_t*>( unpacked_pivots_ptr + offsets[0]); const auto* const __restrict__ pivots_data = reinterpret_cast<const int32_t*>( pivots_ptr + offsets[1]); // QUESTION: can we mix 64bit offsets with 32bit Iterator indexing? for (int64_t i = 0; i < dim_size; ++i) { thrust::swap( unpacked_pivots_data[i], unpacked_pivots_data[pivots_data[i]] ); } }; _launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } void unpack_pivots_cuda_kernel( TensorIterator& iter, int64_t dim_size ) { _unpack_pivots_internal_kernel(iter, dim_size); } } // anonymous namespace REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda); REGISTER_DISPATCH(unpack_pivots_stub, &unpack_pivots_cuda_kernel); }}
6bbd14aa77b75627692322f3491f4510e3a1ed7c.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/Dispatch.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/LinearAlgebra.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/SharedReduceOps.h> #include <ATen/native/ReduceOps.h> #include <c10/core/Scalar.h> namespace at { namespace native { namespace { void addr_kernel_cuda(TensorIterator &iter, const Scalar& beta, const Scalar& alpha) { if (iter.dtype() == ScalarType::Bool) { using scalar_t = bool; auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); // when beta is false, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == false) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val && vec1_val && vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return (beta_val && self_val) || (alpha_val && vec1_val && vec2_val); } ); } return; } AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(kBFloat16, kHalf, iter.dtype(), "addr_cuda", [&] { auto beta_val = beta.to<scalar_t>(); auto alpha_val = alpha.to<scalar_t>(); scalar_t zero_val(0); // when beta==0, values in self should be ignored, // nans and infs in self should not propagate. if (beta_val == zero_val) { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return alpha_val * vec1_val * vec2_val; } ); } else { gpu_kernel( iter, [=] GPU_LAMBDA (scalar_t self_val, scalar_t vec1_val, scalar_t vec2_val) -> scalar_t { return beta_val * self_val + alpha_val * vec1_val * vec2_val; } ); } }); } template <int n_threads, int n_elems_per_thread, typename func_t> C10_LAUNCH_BOUNDS_2(n_threads, n_elems_per_thread) __global__ void _elementwise_kernel(int total_n_elems, func_t f) { constexpr int total_work_block = n_threads * n_elems_per_thread; int idx = total_work_block * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < n_elems_per_thread; ++i) { if (idx < total_n_elems) { f(idx); idx += n_threads; } } } template <int n_threads, int n_elems_per_thread, typename func_t> static void _launch_kernel(int total_n_elems, func_t f) { TORCH_INTERNAL_ASSERT( total_n_elems >= 0 && total_n_elems <= std::numeric_limits<int32_t>::max() ); dim3 block(n_threads); constexpr int total_work_block = n_threads * n_elems_per_thread; dim3 grid((total_n_elems + total_work_block - 1) / total_work_block); auto stream = at::cuda::getCurrentCUDAStream(); _elementwise_kernel<n_threads, n_elems_per_thread, func_t> <<<grid, block, 0, stream>>>(total_n_elems, f); C10_CUDA_KERNEL_LAUNCH_CHECK(); } void _unpack_pivots_internal_kernel( TensorIterator& iter, int64_t dim_size ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _unpack_pivots_internal_kernel(sub_iter, dim_size); } return; } auto offset_calculator = make_offset_calculator<2>(iter); char* unpacked_pivots_ptr = reinterpret_cast<char*>(iter.data_ptr(0)); const char* const __restrict__ pivots_ptr = reinterpret_cast<const char*>(iter.data_ptr(1)); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calculator.get(i); auto* unpacked_pivots_data = reinterpret_cast<int32_t*>( unpacked_pivots_ptr + offsets[0]); const auto* const __restrict__ pivots_data = reinterpret_cast<const int32_t*>( pivots_ptr + offsets[1]); // QUESTION: can we mix 64bit offsets with 32bit Iterator indexing? for (int64_t i = 0; i < dim_size; ++i) { thrust::swap( unpacked_pivots_data[i], unpacked_pivots_data[pivots_data[i]] ); } }; _launch_kernel<num_threads(), thread_work_size()>(iter.numel(), loop); } void unpack_pivots_cuda_kernel( TensorIterator& iter, int64_t dim_size ) { _unpack_pivots_internal_kernel(iter, dim_size); } } // anonymous namespace REGISTER_DISPATCH(addr_stub, &addr_kernel_cuda); REGISTER_DISPATCH(unpack_pivots_stub, &unpack_pivots_cuda_kernel); }}
1b093fcc0b3c79946d3a4518ad2c802bd4cb2fe0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void CopyConnectionsCoordinatesKernel( int *connectionMatrix, float *pointsCoordinates, float *vertexData, int *connectionCount, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells * maxCells) { if(connectionMatrix[threadId] == 1) { int from = threadId / maxCells; int to = threadId % maxCells; if(to > from) { //int vertexDataOffset = maxCells * 3; int vertexDataOffset = 0; int connIdx = atomicAdd( &connectionCount[0], 1); vertexData[vertexDataOffset + connIdx * 6] = pointsCoordinates[from * 3]; vertexData[vertexDataOffset + connIdx * 6 + 1] = pointsCoordinates[from * 3 + 1]; vertexData[vertexDataOffset + connIdx * 6 + 2] = pointsCoordinates[from * 3 + 2]; vertexData[vertexDataOffset + connIdx * 6 + 3] = pointsCoordinates[to * 3]; vertexData[vertexDataOffset + connIdx * 6 + 4] = pointsCoordinates[to * 3 + 1]; vertexData[vertexDataOffset + connIdx * 6 + 5] = pointsCoordinates[to * 3 + 2]; } } } }
1b093fcc0b3c79946d3a4518ad2c802bd4cb2fe0.cu
#include "includes.h" __global__ void CopyConnectionsCoordinatesKernel( int *connectionMatrix, float *pointsCoordinates, float *vertexData, int *connectionCount, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells * maxCells) { if(connectionMatrix[threadId] == 1) { int from = threadId / maxCells; int to = threadId % maxCells; if(to > from) { //int vertexDataOffset = maxCells * 3; int vertexDataOffset = 0; int connIdx = atomicAdd( &connectionCount[0], 1); vertexData[vertexDataOffset + connIdx * 6] = pointsCoordinates[from * 3]; vertexData[vertexDataOffset + connIdx * 6 + 1] = pointsCoordinates[from * 3 + 1]; vertexData[vertexDataOffset + connIdx * 6 + 2] = pointsCoordinates[from * 3 + 2]; vertexData[vertexDataOffset + connIdx * 6 + 3] = pointsCoordinates[to * 3]; vertexData[vertexDataOffset + connIdx * 6 + 4] = pointsCoordinates[to * 3 + 1]; vertexData[vertexDataOffset + connIdx * 6 + 5] = pointsCoordinates[to * 3 + 2]; } } } }
39b9e86df456101fcd95ba2150c03f8db4df014d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <helpers/PointersManager.h> #include <math/templatemath.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x input is [bS, iC, iD, iH, iW] // z output is [bS, iC, oD, oH, oW] const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) max = val; } } } z[zOffset] = max; } break; /*** avg ***/ case 1: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)]; if (extraParam0 == 0) { //Exclude padding uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1); uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1); uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1); sum /= static_cast<T>(a * b * c); // /= sd::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation } else if (extraParam0 == 1) //Include padding sum /= kProd; z[zOffset] = sum; } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); sum = sd::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0); z[zOffset] = sum; } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { hipLaunchKernelGGL(( pooling3dCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { PointersManager manager(block.launchContext(), "pooling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } } }
39b9e86df456101fcd95ba2150c03f8db4df014d.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/convolutions.h> #include <helpers/PointersManager.h> #include <math/templatemath.h> namespace sd { namespace ops { ////////////////////////////////////////////////////////////////////////// template <typename T> __global__ static void pooling3dCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { // x input is [bS, iC, iD, iH, iW] // z output is [bS, iC, oD, oH, oW] const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank, kDeff, kHeff, kWeff, iD, iH, iW, kProd; __shared__ Nd4jLong zLen, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); rank = 5; kDeff = kD + (kD - 1) * (dD - 1); kHeff = kH + (kH - 1) * (dH - 1); kWeff = kW + (kW - 1) * (dW - 1); iD = xShapeInfo[3]; iH = xShapeInfo[4]; iW = xShapeInfo[5]; kProd = kD * kH * kW; } __syncthreads(); const auto zInd = threadIdx.x + blockIdx.x * blockDim.x; if(zInd >= zLen) return; auto coords = sharedMem + threadIdx.x * rank; shape::index2coords(zInd, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); int dstart = coords[2] * sD - pD; int hstart = coords[3] * sH - pH; int wstart = coords[4] * sW - pW; int dend = dstart + kDeff; int hend = hstart + kHeff; int wend = wstart + kWeff; if(dstart < 0) dstart += dD * ((-dstart + dD - 1) / dD); if(hstart < 0) hstart += dH * ((-hstart + dH - 1) / dH); if(wstart < 0) wstart += dW * ((-wstart + dW - 1) / dW); if(dend > iD) dend -= dD * ((dend - iD + dD - 1) / dD); if(hend > iH) hend -= dH * ((hend - iH + dH - 1) / dH); if(wend > iW) wend -= dW * ((wend - iW + dW - 1) / dW); switch (poolingMode) { /*** max ***/ case 0: { T max = -DataTypeUtils::max<T>(); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) { for (coords[3] = hstart; coords[3] < hend; coords[3] += dH){ for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) { T val = x[shape::getOffset(xShapeInfo, coords)]; if (val > max) max = val; } } } z[zOffset] = max; } break; /*** avg ***/ case 1: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += x[shape::getOffset(xShapeInfo, coords)]; if (extraParam0 == 0) { //Exclude padding uint a = (dend - dstart) / dD + ((dend - dstart) % dD == 0 ? 0 : 1); uint b = (hend - hstart) / dH + ((hend - hstart) % dH == 0 ? 0 : 1); uint c = (wend - wstart) / dW + ((wend - wstart) % dW == 0 ? 0 : 1); sum /= static_cast<T>(a * b * c); // /= sd::math::nd4j_ceil<double,T>(static_cast<double>(dend - dstart) / static_cast<double>(dD)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(hend - hstart) / static_cast<double>(dH)) * sd::math::nd4j_ceil<double,T>(static_cast<double>(wend - wstart) / static_cast<double>(dW)); //Accounts for dilation } else if (extraParam0 == 1) //Include padding sum /= kProd; z[zOffset] = sum; } break; /*** pnorm ***/ case 2: { T sum = static_cast<T>(0.); for (coords[2] = dstart; coords[2] < dend; coords[2] += dD) for (coords[3] = hstart; coords[3] < hend; coords[3] += dH) for (coords[4] = wstart; coords[4] < wend; coords[4] += dW) sum += sd::math::nd4j_pow<T,T,T>(sd::math::nd4j_abs<T>(x[shape::getOffset(xShapeInfo, coords)]), extraParam0); sum = sd::math::nd4j_pow<T,T,T>(sum, (T) 1.f / extraParam0); z[zOffset] = sum; } break; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void pooling3dCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { pooling3dCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void ConvolutionUtils::pooling3d(sd::graph::Context& block, const NDArray& input, NDArray& output, const int kD, const int kH, const int kW, const int sD, const int sH, const int sW, const int pD, const int pH, const int pW, const int dD, const int dH, const int dW, const int poolingMode, const int extraParam0) { PointersManager manager(block.launchContext(), "pooling3d"); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = output.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), pooling3dCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, block.launchContext()->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), kD, kH, kW, sD, sH, sW, pD, pH, pW, dD, dH, dW, poolingMode, extraParam0), FLOAT_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } } }
90b490704dc3727e4879d2fcb963f3e4a3a01834.hip
// !!! This is a file automatically generated by hipify!!! #include <primitiv/config.h> #include <iostream> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace primitiv { namespace devices { void CUDA::dump_description() const { using std::cerr; using std::endl; cerr << "Device " << this << endl; cerr << " Type: CUDA" << endl; const ::hipDeviceProp_t &prop = state_->prop; cerr << " Device ID: " << dev_id_ << endl; cerr << " Name .................. " << prop.name << endl; cerr << " Global memory ......... " << prop.totalGlobalMem << endl; cerr << " Shared memory/block ... " << prop.sharedMemPerBlock << endl; cerr << " Threads/block ......... " << prop.maxThreadsPerBlock << endl; cerr << " Block size ............ " << prop.maxThreadsDim[0] << ", " << prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << endl; cerr << " Grid size ............. " << prop.maxGridSize[0] << ", " << prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << endl; cerr << " Compute capability .... " << prop.major << '.' << prop.minor << endl; /* cerr << " Configurations:" << endl; cerr << " 1 dim ........... " << dim1_x_ << " threads" << endl; cerr << " 2 dims .......... " << dim2_x_ << "x" << dim2_y_ << " threads" << endl; cerr << " Maximum batch ... " << max_batch_ << endl; */ } } // namespace devices } // namespace primitiv
90b490704dc3727e4879d2fcb963f3e4a3a01834.cu
#include <primitiv/config.h> #include <iostream> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace primitiv { namespace devices { void CUDA::dump_description() const { using std::cerr; using std::endl; cerr << "Device " << this << endl; cerr << " Type: CUDA" << endl; const ::cudaDeviceProp &prop = state_->prop; cerr << " Device ID: " << dev_id_ << endl; cerr << " Name .................. " << prop.name << endl; cerr << " Global memory ......... " << prop.totalGlobalMem << endl; cerr << " Shared memory/block ... " << prop.sharedMemPerBlock << endl; cerr << " Threads/block ......... " << prop.maxThreadsPerBlock << endl; cerr << " Block size ............ " << prop.maxThreadsDim[0] << ", " << prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << endl; cerr << " Grid size ............. " << prop.maxGridSize[0] << ", " << prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << endl; cerr << " Compute capability .... " << prop.major << '.' << prop.minor << endl; /* cerr << " Configurations:" << endl; cerr << " 1 dim ........... " << dim1_x_ << " threads" << endl; cerr << " 2 dims .......... " << dim2_x_ << "x" << dim2_y_ << " threads" << endl; cerr << " Maximum batch ... " << max_batch_ << endl; */ } } // namespace devices } // namespace primitiv
b61ed1f0ad7749786fcf9a3c6db6fc38cec6e7d7.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu, * Wei Kang) * Mobvoi Inc. (authors: Fangjun Kuang) * * See LICENSE for clarification regarding multiple authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <limits> #include <memory> #include <type_traits> #include <utility> #include <vector> #include "k2/csrc/array_ops.h" #include "k2/csrc/fsa_algo.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/host/aux_labels.h" #include "k2/csrc/host/connect.h" #include "k2/csrc/host/determinize.h" #include "k2/csrc/host/intersect.h" #include "k2/csrc/host/rmepsilon.h" #include "k2/csrc/host/topsort.h" #include "k2/csrc/host_shim.h" #include "k2/csrc/macros.h" #include "k2/csrc/rm_epsilon.h" // this contains a subset of the algorithms in fsa_algo.h; currently it just // contains one that are wrappings of the corresponding algorithms in // host/. namespace k2 { bool RecursionWrapper(bool (*f)(Fsa &, Fsa *, Array1<int32_t> *), Fsa &src, Fsa *dest, Array1<int32_t> *arc_map) { NVTX_RANGE(K2_FUNC); // src is actually an FsaVec. Just recurse for now. int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Array1<int32_t>> arc_maps(num_fsas); int32_t tot_num_arcs = 0; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); // Recurse. if (!f(srcs[i], &(dests[i]), (arc_map != nullptr ? &(arc_maps[i]) : nullptr))) return false; if (arc_map != nullptr) { // convert arc indexes in arc_maps from idx2 to idx012 arc_maps[i] = Plus(arc_maps[i], tot_num_arcs); tot_num_arcs += srcs[i].NumElements(); } } *dest = Stack(0, num_fsas, dests.data()); if (arc_map != nullptr) *arc_map = Cat(src.Context(), num_fsas, arc_maps.data()); return true; } bool ConnectHost(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapper(ConnectHost, src, dest, arc_map); } k2host::Fsa host_fsa = FsaToHostFsa(src); k2host::Connection c(host_fsa); k2host::Array2Size<int32_t> size; c.GetSizes(&size); FsaCreator creator(size); k2host::Fsa host_dest_fsa = creator.GetHostFsa(); int32_t *arc_map_data = nullptr; if (arc_map != nullptr) { *arc_map = Array1<int32_t>(src.Context(), size.size2); arc_map_data = arc_map->Data(); } bool ans = c.GetOutput(&host_dest_fsa, arc_map_data); *dest = creator.GetFsa(); return ans; } bool TopSortHost(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapper(TopSortHost, src, dest, arc_map); } k2host::Fsa host_fsa = FsaToHostFsa(src); k2host::TopSorter sorter(host_fsa); k2host::Array2Size<int32_t> size; sorter.GetSizes(&size); FsaCreator creator(size); k2host::Fsa host_dest_fsa = creator.GetHostFsa(); int32_t *arc_map_data = nullptr; if (arc_map != nullptr) { *arc_map = Array1<int32_t>(src.Context(), size.size2); arc_map_data = arc_map->Data(); } bool ans = sorter.GetOutput(&host_dest_fsa, arc_map_data); *dest = creator.GetFsa(); return ans; } bool Intersect(FsaOrVec &a_fsas, int32_t properties_a, FsaOrVec &b_fsas, int32_t properties_b, bool treat_epsilons_specially, FsaVec *out, Array1<int32_t> *arc_map_a, Array1<int32_t> *arc_map_b) { NVTX_RANGE(K2_FUNC); K2_CHECK(a_fsas.NumAxes() >= 2 && a_fsas.NumAxes() <= 3); K2_CHECK(b_fsas.NumAxes() >= 2 && b_fsas.NumAxes() <= 3); ContextPtr c = a_fsas.Context(); K2_CHECK_EQ(c->GetDeviceType(), kCpu); if (a_fsas.NumAxes() == 2) { FsaVec a_fsas_vec = FsaToFsaVec(a_fsas); return Intersect(a_fsas_vec, properties_a, b_fsas, properties_b, treat_epsilons_specially, out, arc_map_a, arc_map_b); } if (b_fsas.NumAxes() == 2) { FsaVec b_fsas_vec = FsaToFsaVec(b_fsas); return Intersect(a_fsas, properties_a, b_fsas_vec, properties_b, treat_epsilons_specially, out, arc_map_a, arc_map_b); } int32_t num_fsas_a = a_fsas.Dim0(), num_fsas_b = b_fsas.Dim0(); K2_CHECK_GT(num_fsas_a, 0); K2_CHECK_GT(num_fsas_b, 0); int32_t stride_a = 1, stride_b = 1; if (num_fsas_a != num_fsas_b) { if (num_fsas_a == 1) { stride_a = 0; } else if (num_fsas_b == 1) { stride_b = 0; } else { K2_CHECK_EQ(num_fsas_a, num_fsas_b); } // the check on the previous line will fail. } if (properties_a < 0) { Array1<int32_t> properties_a_out(c, num_fsas_a); GetFsaVecBasicProperties(a_fsas, &properties_a_out, &properties_a); } if (properties_b < 0) { Array1<int32_t> properties_b_out(c, num_fsas_b); GetFsaVecBasicProperties(b_fsas, &properties_b_out, &properties_b); } bool arc_sorted = (properties_a & kFsaPropertiesArcSorted) && (properties_b & kFsaPropertiesArcSorted); K2_CHECK(arc_sorted) << "Both a_fsas and b_fsas should be arc-sorted"; int32_t num_fsas = ::max(num_fsas_a, num_fsas_b); std::vector<std::unique_ptr<k2host::Intersection>> intersections(num_fsas); std::vector<k2host::Array2Size<int32_t>> sizes(num_fsas); for (int32_t i = 0; i < num_fsas; ++i) { k2host::Fsa host_fsa_a = FsaVecToHostFsa(a_fsas, i * stride_a), host_fsa_b = FsaVecToHostFsa(b_fsas, i * stride_b); intersections[i] = std::make_unique<k2host::Intersection>( host_fsa_a, host_fsa_b, treat_epsilons_specially, false); intersections[i]->GetSizes(&(sizes[i])); } FsaVecCreator creator(sizes); int32_t num_arcs = creator.NumArcs(); if (arc_map_a) *arc_map_a = Array1<int32_t>(c, num_arcs); if (arc_map_b) *arc_map_b = Array1<int32_t>(c, num_arcs); // the following few lines will allow us to add suitable offsets to the // `arc_map`. Array1<int32_t> a_fsas_row_splits12 = a_fsas.RowSplits(2)[a_fsas.RowSplits(1)], b_fsas_row_splits12 = b_fsas.RowSplits(2)[b_fsas.RowSplits(1)]; const int32_t *a_fsas_row_splits12_data = a_fsas_row_splits12.Data(), *b_fsas_row_splits12_data = b_fsas_row_splits12.Data(); bool ok = true; for (int32_t i = 0; i < num_fsas; ++i) { k2host::Fsa host_fsa_out = creator.GetHostFsa(i); int32_t arc_offset = creator.GetArcOffsetFor(i); int32_t *this_arc_map_a = (arc_map_a ? arc_map_a->Data() + arc_offset : nullptr), *this_arc_map_b = (arc_map_b ? arc_map_b->Data() + arc_offset : nullptr); bool ans = intersections[i]->GetOutput(&host_fsa_out, this_arc_map_a, this_arc_map_b); ok = ok && ans; int32_t this_num_arcs = creator.GetArcOffsetFor(i + 1) - arc_offset; if (arc_map_a) { int32_t arc_offset_a = a_fsas_row_splits12_data[i * stride_a]; for (int32_t i = 0; i < this_num_arcs; i++) if (this_arc_map_a[i] != -1) this_arc_map_a[i] += arc_offset_a; } if (arc_map_b) { int32_t arc_offset_b = b_fsas_row_splits12_data[i * stride_b]; for (int32_t i = 0; i < this_num_arcs; i++) if (this_arc_map_b[i] != -1) this_arc_map_b[i] += arc_offset_b; } } *out = creator.GetFsaVec(); return ok; } // Will be used in RemoveEpsilonHost and Determinize below to process FsaVec // input recursively. void RecursionWrapper(void (*f)(FsaOrVec &, FsaOrVec *, Ragged<int32_t> *), FsaOrVec &src, FsaOrVec *dest, Ragged<int32_t> *arc_deriv) { NVTX_RANGE(K2_FUNC); // src is actually an FsaVec. Just recurse for now. K2_CHECK_EQ(src.NumAxes(), 3); int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Ragged<int32_t>> arc_derivs(num_fsas); int32_t tot_num_arcs = 0; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); f(srcs[i], &(dests[i]), arc_deriv != nullptr ? &(arc_derivs[i]) : nullptr); if (arc_deriv != nullptr) { // convert arc indexes in arc_derivs from idx2 to idx012 Array1<int32_t> &values = arc_derivs[i].values; values = Plus(values, tot_num_arcs); tot_num_arcs += srcs[i].NumElements(); } } *dest = Stack(0, num_fsas, dests.data()); if (arc_deriv != nullptr) *arc_deriv = Cat(0, num_fsas, arc_derivs.data()); } void RemoveEpsilonHost(FsaOrVec &src, FsaOrVec *dest, Ragged<int32_t> *arc_derivs /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapper(RemoveEpsilonHost, src, dest, arc_derivs); } k2host::Fsa host_fsa = FsaToHostFsa(src); int32_t num_states = host_fsa.NumStates(); K2_CHECK_EQ(num_states, src.Dim0()); std::vector<double> max_forward_weights(num_states); std::vector<double> max_backward_weights(num_states); k2host::WfsaWithFbWeights max_wfsa(host_fsa, k2host::kMaxWeight, max_forward_weights.data(), max_backward_weights.data()); // pass infinity as beam since we don't do pruning here. float beam = std::numeric_limits<float>::infinity(); k2host::EpsilonsRemoverPrunedMax eps_remover(max_wfsa, beam); k2host::Array2Size<int32_t> fsa_size, arc_derivs_size; eps_remover.GetSizes(&fsa_size, &arc_derivs_size); FsaCreator fsa_creator(fsa_size); k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); K2_STATIC_ASSERT( (std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value)); Ragged2Creator<int32_t> ragged_creator(arc_derivs_size); k2host::Array2<int32_t *, int32_t> host_arc_derivs = ragged_creator.GetHostArray2(); eps_remover.GetOutput(&host_dest_fsa, &host_arc_derivs); *dest = fsa_creator.GetFsa(); if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2(); } void RemoveEpsilon(FsaOrVec &src, int32_t properties, FsaOrVec *dest, Ragged<int32_t> *arc_derivs) { if ((properties & kFsaPropertiesTopSortedAndAcyclic) != 0 && src.Context()->GetDeviceType() == kCpu) { // Host version of the algorithm RemoveEpsilonHost(src, dest, arc_derivs); } else { RemoveEpsilonDevice(src, dest, arc_derivs); } } void RemoveEpsilonAndAddSelfLoops(FsaOrVec &src, int32_t properties, FsaOrVec *dest, Ragged<int32_t> *arc_derivs) { NVTX_RANGE(K2_FUNC); Ragged<int32_t> arc_derivs1; FsaOrVec temp; RemoveEpsilon(src, properties, &temp, (arc_derivs != nullptr ? &arc_derivs1 : nullptr)); Array1<int32_t> arc_derivs2; AddEpsilonSelfLoops(temp, dest, (arc_derivs != nullptr ? &arc_derivs2 : nullptr)); if (arc_derivs != nullptr) { *arc_derivs = Index(arc_derivs1, 0, arc_derivs2, nullptr); } } void Determinize(FsaOrVec &src, DeterminizeWeightPushingType weight_pushing_type, FsaOrVec *dest, Ragged<int32_t> *arc_derivs /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Ragged<int32_t>> derivs_vector(num_fsas); int32_t tot_num_arcs = 0; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); Determinize(srcs[i], weight_pushing_type, &(dests[i]), arc_derivs != nullptr ? &(derivs_vector[i]) : nullptr); if (arc_derivs != nullptr) { // convert arc indexes in arc_derivs from idx2 to idx012 Array1<int32_t> &values = arc_derivs[i].values; values = Plus(values, tot_num_arcs); tot_num_arcs += srcs[i].NumElements(); } } *dest = Stack(0, num_fsas, dests.data()); if (arc_derivs != nullptr) *arc_derivs = Cat(0, num_fsas, derivs_vector.data()); return; } k2host::Fsa host_fsa = FsaToHostFsa(src); int32_t num_states = host_fsa.NumStates(); K2_CHECK_EQ(num_states, src.Dim0()); int32_t max_step = -1; // no limit k2host::FbWeightType host_weight_pushing_type = static_cast<k2host::FbWeightType>(static_cast<int>(weight_pushing_type)); k2host::DeterminizerMax determinizer(host_fsa, max_step, host_weight_pushing_type); k2host::Array2Size<int32_t> fsa_size, arc_derivs_size; determinizer.GetSizes(&fsa_size, &arc_derivs_size); FsaCreator fsa_creator(fsa_size); k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); K2_STATIC_ASSERT( (std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value)); Ragged2Creator<int32_t> ragged_creator(arc_derivs_size); k2host::Array2<int32_t *, int32_t> host_arc_derivs = ragged_creator.GetHostArray2(); determinizer.GetOutput(&host_dest_fsa, &host_arc_derivs); *dest = fsa_creator.GetFsa(); if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2(); } Fsa LinearFsa(const Array1<int32_t> &symbols) { NVTX_RANGE(K2_FUNC); ContextPtr &c = symbols.Context(); int32_t n = symbols.Dim(), num_states = n + 2, num_arcs = n + 1; Array1<int32_t> row_splits1 = Range(c, num_states + 1, 0), row_ids1 = Range(c, num_arcs, 0); int32_t *row_splits1_data = row_splits1.Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *symbols_data = symbols.Data(); K2_EVAL( c, num_arcs, lambda_set_arcs, (int32_t arc_idx01)->void { int32_t src_state = arc_idx01, dest_state = arc_idx01 + 1, // -1 == kFinalSymbol symbol = (arc_idx01 < n ? symbols_data[arc_idx01] : -1); if (arc_idx01 < n) K2_CHECK_NE(symbol, -1); float score = 0.0; arcs_data[arc_idx01] = Arc(src_state, dest_state, symbol, score); // the final state has no leaving arcs. if (arc_idx01 == 0) row_splits1_data[num_states] = num_arcs; }); return Ragged<Arc>(RaggedShape2(&row_splits1, &row_ids1, num_arcs), arcs); } FsaVec LinearFsas(const Ragged<int32_t> &symbols) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(symbols.NumAxes(), 2); ContextPtr &c = symbols.Context(); // if there are n symbols, there are n+2 states and n+1 arcs. RaggedShape states_shape = ChangeSublistSize(symbols.shape, 2); int32_t num_states = states_shape.NumElements(), num_arcs = symbols.NumElements() + symbols.Dim0(); // row_splits2 maps from state_idx01 to arc_idx012; row_ids2 does the reverse. // We'll set them in the lambda below. Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); // If num_states equals to zero, the code below won't set the last value of // row_splits2, we should initialize here, or it will be a random value. if (num_states == 0) row_splits2 = 0; int32_t *row_ids2_data = row_ids2.Data(), *row_splits2_data = row_splits2.Data(); const int32_t *row_ids1_data = states_shape.RowIds(1).Data(), *row_splits1_data = states_shape.RowSplits(1).Data(), *symbols_data = symbols.values.Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); K2_EVAL( c, num_states, lambda, (int32_t state_idx01)->void { int32_t fsa_idx0 = row_ids1_data[state_idx01], state_idx0x = row_splits1_data[fsa_idx0], next_state_idx0x = row_splits1_data[fsa_idx0 + 1], idx1 = state_idx01 - state_idx0x; // the following works because each FSA has one fewer arcs than states. int32_t arc_idx0xx = state_idx0x - fsa_idx0, next_arc_idx0xx = next_state_idx0x - (fsa_idx0 + 1), // the following may look a bit wrong.. here, the idx1 is the // same as the idx12 if the arc exists, because each state has // one arc leaving it (except the last state). arc_idx012 = arc_idx0xx + idx1; // the following works because each FSA has one fewer symbols than arcs // (however it doesn't work for the last arc of each FSA; we check // below.) int32_t symbol_idx01 = arc_idx012 - fsa_idx0; if (arc_idx012 < next_arc_idx0xx) { int32_t src_state = idx1, dest_state = idx1 + 1, symbol = (arc_idx012 + 1 < next_arc_idx0xx ? symbols_data[symbol_idx01] : -1); // kFinalSymbol float score = 0.0; arcs_data[arc_idx012] = Arc(src_state, dest_state, symbol, score); row_ids2_data[arc_idx012] = state_idx01; } else { // The following ensures that the last element of row_splits1_data // (i.e. row_splits1[num_states]) is set to num_arcs. It also writes // something unnecessary for the last state of each FSA but the last // one, which will cause 2 threads to write the same item to the same // location. Note that there is no arc with index `arc_idx01`, if you // reach here. row_splits2_data[state_idx01 + 1] = arc_idx012; } row_splits2_data[state_idx01] = arc_idx012; }); return Ragged<Arc>( RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs), arcs); } FsaVec LevenshteinGraphs(const Ragged<int32_t> &symbols, float ins_del_score /* = -0.501 */, Array1<int32_t> *aux_labels /*= nullptr*/, Array1<float> *score_offsets /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(symbols.NumAxes(), 2); ContextPtr &c = symbols.Context(); // For each fsa, the number of states will be number of symbols plus 2, we // plus 2 because we need an extra super final arc for each fsa. RaggedShape fsa_to_states = ChangeSublistSize(symbols.shape, 2); int32_t num_states = fsa_to_states.NumElements(); Array1<int32_t> num_arcs_for(c, num_states + 1); int32_t *num_arcs_for_data = num_arcs_for.Data(); // "fts" is short for fsa to states const int32_t *fts_row_splits1_data = fsa_to_states.RowSplits(1).Data(), *fts_row_ids1_data = fsa_to_states.RowIds(1).Data(); // set the arcs number for each state K2_EVAL( c, num_states, lambda_set_num_arcs, (int32_t state_idx01)->void { int32_t fsa_idx0 = fts_row_ids1_data[state_idx01], final_state = fts_row_splits1_data[fsa_idx0 + 1] - 1, current_num_arcs = 3; // normally there are three arcs, // self-loop and two arcs pointing to // the next state. if (state_idx01 == final_state - 1) current_num_arcs = 2; else if (state_idx01 == final_state) current_num_arcs = 0; num_arcs_for_data[state_idx01] = current_num_arcs; }); ExclusiveSum(num_arcs_for, &num_arcs_for); Array1<int32_t> &states_to_arcs_row_splits = num_arcs_for; int32_t num_arcs = symbols.NumElements() * 3 + symbols.Dim0() * 2; RaggedShape states_to_arcs = RaggedShape2(&states_to_arcs_row_splits, nullptr, num_arcs); // shape with a index of [fsa][state][arc] RaggedShape shape = ComposeRaggedShapes(fsa_to_states, states_to_arcs); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *row_splits1_data = shape.RowSplits(1).Data(), *row_ids1_data = shape.RowIds(1).Data(), *row_splits2_data = shape.RowSplits(2).Data(), *row_ids2_data = shape.RowIds(2).Data(), *symbols_data = symbols.values.Data(); int32_t *aux_labels_data = nullptr; if (aux_labels != nullptr) { *aux_labels = Array1<int32_t>(c, num_arcs); aux_labels_data = aux_labels->Data(); } float *score_offsets_data = nullptr; if (score_offsets != nullptr) { *score_offsets = Array1<float>(c, num_arcs); score_offsets_data = score_offsets->Data(); } K2_EVAL( c, num_arcs, lambda_set_arcs, (int32_t arc_idx012)->void { int32_t state_idx01 = row_ids2_data[arc_idx012], fsa_idx0 = row_ids1_data[state_idx01], state_idx0x = row_splits1_data[fsa_idx0], final_state_idx01 = row_splits1_data[fsa_idx0 + 1] - 1, state_idx1 = state_idx01 - state_idx0x, arc_idx01x = row_splits2_data[state_idx01], arc_idx2 = arc_idx012 - arc_idx01x, sym_state_idx01 = state_idx01 - 2 * fsa_idx0, current_symbol = 0, aux_labels_value = 0; if (state_idx01 != final_state_idx01 - 1 && state_idx01 != final_state_idx01) { current_symbol = symbols_data[sym_state_idx01]; K2_CHECK((current_symbol != 0) && (current_symbol != -1)) << "0 and -1 are not expected to be a symbol."; } float score_offset_value = 0; Arc arc; arc.src_state = state_idx1; switch (arc_idx2) { case 0: // the self loop arc arc.label = 0; arc.dest_state = state_idx1; arc.score = ins_del_score; aux_labels_value = 0; score_offset_value = ins_del_score - (-0.5); break; case 1: // the arc pointing to next state with blank if (state_idx01 == final_state_idx01 - 1) { // the arc pointing to // final state arc.label = -1; arc.score = 0; aux_labels_value = -1; } else { arc.label = 0; arc.score = -0.5; aux_labels_value = current_symbol; } arc.dest_state = state_idx1 + 1; break; case 2: // the arc pointing to the next state with symbol arc.label = current_symbol; arc.dest_state = state_idx1 + 1; arc.score = 0; aux_labels_value = current_symbol; break; default: K2_LOG(FATAL) << "Arc index must be less than 3"; } arcs_data[arc_idx012] = arc; if (aux_labels) aux_labels_data[arc_idx012] = aux_labels_value; if (score_offsets) score_offsets_data[arc_idx012] = score_offset_value; }); return Ragged<Arc>(shape, arcs); } FsaVec CtcGraphs(const Ragged<int32_t> &symbols, bool modified /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(symbols.NumAxes(), 2); ContextPtr &c = symbols.Context(); int32_t num_fsas = symbols.Dim0(); Array1<int32_t> num_states_for(c, num_fsas + 1); int32_t *num_states_for_data = num_states_for.Data(); const int32_t *symbol_row_split1_data = symbols.RowSplits(1).Data(); // symbols indexed with [fsa][symbol] // for each fsa we need `symbol_num * 2 + 1 + 1` states, `symbol_num * 2 + 1` // means that we need a blank state on each side of a symbol state, `+ 1` is // for final state in k2 K2_EVAL( c, num_fsas, lambda_set_num_states, (int32_t fsa_idx0)->void { int32_t symbol_idx0x = symbol_row_split1_data[fsa_idx0], symbol_idx0x_next = symbol_row_split1_data[fsa_idx0 + 1], symbol_num = symbol_idx0x_next - symbol_idx0x; num_states_for_data[fsa_idx0] = symbol_num * 2 + 2; }); ExclusiveSum(num_states_for, &num_states_for); Array1<int32_t> &fsa_to_states_row_splits = num_states_for; RaggedShape fsa_to_states = RaggedShape2(&fsa_to_states_row_splits, nullptr, -1); int32_t num_states = fsa_to_states.NumElements(); Array1<int32_t> num_arcs_for(c, num_states + 1); int32_t *num_arcs_for_data = num_arcs_for.Data(); const int32_t *fts_row_splits1_data = fsa_to_states.RowSplits(1).Data(), *fts_row_ids1_data = fsa_to_states.RowIds(1).Data(), *symbol_data = symbols.values.Data(); // set the arcs number for each state K2_EVAL( c, num_states, lambda_set_num_arcs, (int32_t state_idx01)->void { int32_t fsa_idx0 = fts_row_ids1_data[state_idx01], // we minus fsa_idx0 here, because we are adding one more state, // the final state for each fsa sym_state_idx01 = state_idx01 / 2 - fsa_idx0, remainder = state_idx01 % 2, current_num_arcs = 2; // normally there are two arcs, self-loop // and arc pointing to the next state // blank state always has two arcs if (remainder) { // symbol state int32_t sym_final_state = symbol_row_split1_data[fsa_idx0 + 1]; // There are no arcs for final states if (sym_state_idx01 == sym_final_state) { current_num_arcs = 0; } else if (modified) { current_num_arcs = 3; } else { int32_t current_symbol = symbol_data[sym_state_idx01], // we set the next symbol of the last symbol to -1, so // the following if clause will always be true, which means // we will have 3 arcs for last symbol state next_symbol = (sym_state_idx01 + 1) == sym_final_state ? -1 : symbol_data[sym_state_idx01 + 1]; // symbols must be not equal to -1, which is specially used in k2 K2_CHECK_NE(current_symbol, -1); // if current_symbol equals next_symbol, we need a blank state // between them, so there are two arcs for this state // otherwise, this state will point to blank state and next symbol // state, so we need three arcs here. // Note: for the simpilfied topology (standard equals false), there // are always 3 arcs leaving symbol states. if (current_symbol != next_symbol) current_num_arcs = 3; } } num_arcs_for_data[state_idx01] = current_num_arcs; }); ExclusiveSum(num_arcs_for, &num_arcs_for); Array1<int32_t> &states_to_arcs_row_splits = num_arcs_for; RaggedShape states_to_arcs = RaggedShape2(&states_to_arcs_row_splits, nullptr, -1); // ctc_shape with a index of [fsa][state][arc] RaggedShape ctc_shape = ComposeRaggedShapes(fsa_to_states, states_to_arcs); int32_t num_arcs = ctc_shape.NumElements(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *ctc_row_splits1_data = ctc_shape.RowSplits(1).Data(), *ctc_row_ids1_data = ctc_shape.RowIds(1).Data(), *ctc_row_splits2_data = ctc_shape.RowSplits(2).Data(), *ctc_row_ids2_data = ctc_shape.RowIds(2).Data(); int32_t *aux_labels_data = nullptr; if (aux_labels != nullptr) { *aux_labels = Array1<int32_t>(c, num_arcs); aux_labels_data = aux_labels->Data(); } K2_EVAL( c, num_arcs, lambda_set_arcs, (int32_t arc_idx012)->void { int32_t state_idx01 = ctc_row_ids2_data[arc_idx012], fsa_idx0 = ctc_row_ids1_data[state_idx01], state_idx0x = ctc_row_splits1_data[fsa_idx0], state_idx1 = state_idx01 - state_idx0x, arc_idx01x = ctc_row_splits2_data[state_idx01], arc_idx2 = arc_idx012 - arc_idx01x, sym_state_idx01 = state_idx01 / 2 - fsa_idx0, remainder = state_idx01 % 2, sym_final_state = symbol_row_split1_data[fsa_idx0 + 1]; bool final_state = sym_final_state == sym_state_idx01; int32_t current_symbol = final_state ? -1 : symbol_data[sym_state_idx01]; Arc arc; arc.score = 0; arc.src_state = state_idx1; int32_t aux_labels_value = 0; if (remainder) { if (final_state) return; int32_t next_symbol = (sym_state_idx01 + 1) == sym_final_state ? -1 : symbol_data[sym_state_idx01 + 1]; // for standard topology, the symbol state can not point to next // symbol state if the next symbol is identical to current symbol. if (current_symbol == next_symbol && !modified) { K2_CHECK_LT(arc_idx2, 2); arc.label = arc_idx2 == 0 ? 0 : current_symbol; arc.dest_state = arc_idx2 == 0 ? state_idx1 + 1 : state_idx1; } else { switch (arc_idx2) { case 0: // the arc pointing to blank state arc.label = 0; arc.dest_state = state_idx1 + 1; break; case 1: // the self loop arc arc.label = current_symbol; arc.dest_state = state_idx1; break; case 2: // the arc pointing to the next symbol state arc.label = next_symbol; aux_labels_value = sym_state_idx01 + 1 == sym_final_state ? 0 : next_symbol; arc.dest_state = state_idx1 + 2; break; default: K2_LOG(FATAL) << "Arc index must be less than 3"; } } } else { K2_CHECK_LT(arc_idx2, 2); arc.label = arc_idx2 == 0 ? 0 : current_symbol; arc.dest_state = arc_idx2 == 0 ? state_idx1 : state_idx1 + 1; aux_labels_value = (arc_idx2 == 0 || final_state) ? 0 : current_symbol; } arcs_data[arc_idx012] = arc; if (aux_labels) aux_labels_data[arc_idx012] = aux_labels_value; }); return Ragged<Arc>(ctc_shape, arcs); } Fsa CtcTopo(const ContextPtr &c, int32_t max_token, bool modified, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels); if (modified) { // plusing 2 here to include 0(epsilon) and final state int32_t states = max_token + 2; // for modified topology, the number of self loops and leaving arcs for // state 0 are all the number of states minus one. // and there two arcs(one for self loop, the other points to state 0) for // each of other states. see links belove for details : // https://github.com/k2-fsa/k2/issues/746#issuecomment-856421616 // https://github.com/k2-fsa/snowfall/pull/209 int32_t num_arcs = (states - 1) * 2 + (states - 2) * 2; *aux_labels = Array1<int32_t>(c, num_arcs); Array1<int32_t> row_ids(c, num_arcs); Array1<Arc> arcs(c, num_arcs); int32_t *row_ids_data = row_ids.Data(), *aux_labels_data = aux_labels->Data(); Arc *arcs_data = arcs.Data(); K2_EVAL( c, num_arcs, lambad_set_row_ids_and_arcs, (int32_t idx01) -> void { Arc arc; arc.score = 0; if (idx01 < states - 1) { // state 0 self loop arc.src_state = 0; arc.dest_state = 0; arc.label = idx01; row_ids_data[idx01] = 0; aux_labels_data[idx01] = idx01; } else if (idx01 < (states - 1) * 2) { // arcs leaving state 0 int32_t dest_state = idx01 - (states - 1) + 1; arc.src_state = 0; arc.dest_state = dest_state; arc.label = dest_state == states - 1 ? -1 : dest_state; row_ids_data[idx01] = 0; aux_labels_data[idx01] = dest_state == states -1 ? -1 : dest_state; } else { // arcs for other states int32_t bias = idx01 - (states - 1) * 2; int32_t state = bias / 2 + 1; arc.src_state = state; arc.label = state; if (bias % 2) arc.dest_state = 0; else arc.dest_state = state; row_ids_data[idx01] = state; aux_labels_data[idx01] = 0; } arcs_data[idx01] = arc; }); Array1<int32_t> row_splits(c, states + 1); RowIdsToRowSplits(row_ids, &row_splits); return Ragged<Arc>(RaggedShape2(&row_splits, &row_ids, num_arcs), arcs); } else { // plusing 2 here to include 0(epsilon) and final state int32_t states = max_token + 2, dim0 = states - 1, // minusing 1 here because there is not // any leaving arcs for final state dim1 = max_token + 2, // there are number of states arcs leaving // each state for standard topolopy num_arcs = dim0 * dim1; *aux_labels = Array1<int32_t>(c, num_arcs); Array1<int32_t> row_ids(c, num_arcs); Array1<Arc> arcs(c, num_arcs); int32_t *row_ids_data = row_ids.Data(), *aux_labels_data = aux_labels->Data(); Arc *arcs_data = arcs.Data(); K2_EVAL2( c, dim0, dim1, lambda_set_row_ids_and_arcs, (int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; Arc arc; arc.src_state = i; arc.dest_state = j; arc.label = j == (dim1 - 1) ? -1 : j; arc.score = 0; arcs_data[i * dim1 + j] = arc; int32_t olabel = i == j ? 0 : (j == (dim1 - 1) ? -1 : j); aux_labels_data[i * dim1 + j] = olabel; }); Array1<int32_t> row_splits(c, states + 1); RowIdsToRowSplits(row_ids, &row_splits); return Ragged<Arc>(RaggedShape2(&row_splits, &row_ids, dim0 * dim1), arcs); } } void ArcSort(Fsa *fsa) { if (fsa->NumAxes() < 2) return; // it is empty SortSublists<Arc>(fsa); } void ArcSort(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); if (!src.values.IsValid()) return; if (arc_map != nullptr) *arc_map = Array1<int32_t>(src.Context(), src.NumElements()); Fsa tmp(src.shape, src.values.Clone()); SortSublists<Arc>(&tmp, arc_map); *dest = tmp; } // TODO(fangjun): use the following method suggested by Dan // // ... incidentally, it's possible to further optimize this so the run // time is less than linear, by using methods similar to what I use // in GetStateBatches(); imagine computing a table that instead of // the best traceback, is the best 2-step traceback; and then the 4-step // traceback, and so on. There's no need for this right now, since the // forward-pass algorithm is already at least linear-time in the length // of this path. But we can consider it for the future. Ragged<int32_t> ShortestPath(FsaVec &fsas, const Array1<int32_t> &entering_arcs) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); const int32_t *entering_arcs_data = entering_arcs.Data(); const Arc *arcs_data = fsas.values.Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = fsas.TotSize(1); ContextPtr &context = fsas.Context(); // allocate an extra element for ExclusiveSum Array1<int32_t> num_best_arcs_per_fsa(context, num_fsas + 1); int32_t *num_best_arcs_per_fsa_data = num_best_arcs_per_fsa.Data(); const int32_t *row_splits1_data = fsas.RowSplits(1).Data(); // -1 represents an invalid arc_index. // This extra array avoids an extra iteration over `entering_arcs`. Array1<int32_t> state_best_arc_index_array(context, num_states, -1); int32_t *state_best_arc_index_array_data = state_best_arc_index_array.Data(); K2_EVAL( context, num_fsas, lambda_set_num_best_arcs, (int32_t fsas_idx0) { int32_t state_idx01 = row_splits1_data[fsas_idx0]; int32_t state_idx01_next = row_splits1_data[fsas_idx0 + 1]; if (state_idx01_next == state_idx01) { // this fsa is empty, so there is no best path available num_best_arcs_per_fsa_data[fsas_idx0] = 0; return; } int32_t final_state_idx01 = state_idx01_next - 1; int32_t cur_state = final_state_idx01; int32_t cur_index = entering_arcs_data[cur_state]; int32_t num_arcs = 0; int32_t *p = state_best_arc_index_array_data + final_state_idx01; while (cur_index != -1) { *p = cur_index; --p; cur_state = arcs_data[cur_index].src_state + state_idx01; cur_index = entering_arcs_data[cur_state]; ++num_arcs; } num_best_arcs_per_fsa_data[fsas_idx0] = num_arcs; }); ExclusiveSum(num_best_arcs_per_fsa, &num_best_arcs_per_fsa); RaggedShape shape = RaggedShape2(&num_best_arcs_per_fsa, nullptr, -1); const int32_t *shape_row_splits1_data = shape.RowSplits(1).Data(); const int32_t *shape_row_ids1_data = shape.RowIds(1).Data(); const int32_t *ans_row_splits_data = shape.RowSplits(1).Data(); Array1<int32_t> best_path_arc_indexes(context, shape.NumElements()); int32_t *best_path_arc_indexes_data = best_path_arc_indexes.Data(); K2_EVAL( context, shape.NumElements(), lambda_set_best_arcs, (int32_t ans_idx01) { int32_t fsa_idx0 = shape_row_ids1_data[ans_idx01]; int32_t ans_idx0x = shape_row_splits1_data[fsa_idx0]; int32_t ans_idx1 = ans_idx01 - ans_idx0x; int32_t num_arcs_this_fsa = num_best_arcs_per_fsa_data[fsa_idx0 + 1] - num_best_arcs_per_fsa_data[fsa_idx0]; if (num_arcs_this_fsa == 0) return; int32_t final_state_idx01_this_fsa = row_splits1_data[fsa_idx0 + 1] - 1; const int32_t *p_start = state_best_arc_index_array_data + final_state_idx01_this_fsa - num_arcs_this_fsa + 1; best_path_arc_indexes_data[ans_idx01] = p_start[ans_idx1]; }); Ragged<int32_t> ans(shape, best_path_arc_indexes); return ans; } void AddEpsilonSelfLoops(FsaOrVec &src, FsaOrVec *dest, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); const int32_t *old_row_splits1_data = src.RowSplits(1).Data(), *old_row_ids1_data = src.RowIds(1).Data(); const Arc *old_arcs_data = src.values.Data(); if (src.NumAxes() == 2) { int32_t num_states = src.Dim0(); if (num_states < 2) { K2_CHECK_EQ(num_states, 0); *dest = src; if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0); return; } int32_t old_num_arcs = src.TotSize(1), new_num_arcs = old_num_arcs + (num_states - 1); Array1<int32_t> new_row_splits(c, num_states + 1), new_row_ids(c, new_num_arcs); Array1<Arc> new_arcs(c, new_num_arcs); int32_t *new_row_splits1_data = new_row_splits.Data(), *new_row_ids1_data = new_row_ids.Data(); Arc *new_arcs_data = new_arcs.Data(); int32_t *arc_map_data = nullptr; if (arc_map) { *arc_map = Array1<int32_t>(c, new_num_arcs); arc_map_data = arc_map->Data(); } ParallelRunner pr(c); { With w(pr.NewStream()); K2_EVAL( c, old_num_arcs, lambda_copy_data, (int32_t arc_idx01)->void { int32_t state_idx0 = old_row_ids1_data[arc_idx01], new_arc_idx01 = arc_idx01 + 1 + state_idx0; // the "+1" above is because we put the self-loop first. new_row_ids1_data[new_arc_idx01] = state_idx0; new_arcs_data[new_arc_idx01] = old_arcs_data[arc_idx01]; if (arc_map_data) arc_map_data[new_arc_idx01] = arc_idx01; }); } { With w(pr.NewStream()); K2_EVAL( c, num_states, lambda_set_new_data, (int32_t state_idx0)->void { int32_t old_arc_idx0x = old_row_splits1_data[state_idx0], new_arc_idx0x = old_arc_idx0x + state_idx0; new_row_splits1_data[state_idx0] = new_arc_idx0x; if (state_idx0 + 1 < num_states) { // not final-state int32_t new_arc_idx01 = new_arc_idx0x; // the 1st arc is the loop new_row_ids1_data[new_arc_idx01] = state_idx0; new_arcs_data[new_arc_idx01] = Arc(state_idx0, state_idx0, 0, 0.0); if (arc_map_data) arc_map_data[new_arc_idx01] = -1; } else { // Note: if num_states was zero we would have returned above, so // we don't have to worry about empty FSAs. new_row_splits1_data[num_states] = new_arc_idx0x; } }); } pr.Finish(); *dest = Ragged<Arc>( RaggedShape2(&new_row_splits, &new_row_ids, new_num_arcs), new_arcs); } else { K2_CHECK_EQ(src.NumAxes(), 3); // Get a vector saying, for each FSA, whether it's nonempty. int32_t num_fsas = src.Dim0(), num_states = src.TotSize(1), old_num_arcs = src.TotSize(2); if (num_states == 0) { *dest = src; if (arc_map) *arc_map = Array1<int32_t>(c, 0); return; } Array1<int32_t> fsa_nonempty(c, num_fsas + 1); int32_t *fsa_nonempty_data = fsa_nonempty.Data(); K2_EVAL( c, num_fsas, lambda_set_fsa_nonempty, (int32_t fsa_idx0)->void { fsa_nonempty_data[fsa_idx0] = (old_row_splits1_data[fsa_idx0 + 1] > old_row_splits1_data[fsa_idx0]); }); ExclusiveSum(fsa_nonempty, &fsa_nonempty); const int32_t *old_row_splits2_data = src.RowSplits(2).Data(), *old_row_ids2_data = src.RowIds(2).Data(); int32_t num_nonempty_fsas = fsa_nonempty.Back(), new_num_arcs = old_num_arcs + num_states - num_nonempty_fsas; // we subtract `num_nonempty_fsas` because final-states don't get a // self-loop. Array1<int32_t> new_row_splits2(c, num_states + 1), new_row_ids2(c, new_num_arcs); Array1<Arc> new_arcs(c, new_num_arcs); // fsa_idx0_mod_data maps from fsa_idx0 to a modified fsa_idx0 that // "doesn't count" FSAs with zero states. const int32_t *fsa_idx0_mod_data = fsa_nonempty_data; int32_t *new_row_splits2_data = new_row_splits2.Data(), *new_row_ids2_data = new_row_ids2.Data(); Arc *new_arcs_data = new_arcs.Data(); int32_t *arc_map_data = nullptr; if (arc_map) { *arc_map = Array1<int32_t>(c, new_num_arcs); arc_map_data = arc_map->Data(); } ParallelRunner pr(c); { With w(pr.NewStream()); K2_EVAL( c, old_num_arcs, lambda_copy_data, (int32_t arc_idx012)->void { int32_t state_idx01 = old_row_ids2_data[arc_idx012], fsa_idx0 = old_row_ids1_data[state_idx01], fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0], new_arc_idx012 = arc_idx012 + 1 + state_idx01 - fsa_idx0_mod; // The "+1" above is because we put the self-loop first. The // "-fsa_idx0_mod" is because final-states don't get a self-loop. new_row_ids2_data[new_arc_idx012] = state_idx01; new_arcs_data[new_arc_idx012] = old_arcs_data[arc_idx012]; if (arc_map_data) arc_map_data[new_arc_idx012] = arc_idx012; }); } { With w(pr.NewStream()); K2_EVAL( c, num_states, lambda_set_new_data, (int32_t state_idx01)->void { int32_t fsa_idx0 = old_row_ids1_data[state_idx01], fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0], state_idx0x = old_row_splits1_data[fsa_idx0], next_state_idx0x = old_row_splits1_data[fsa_idx0 + 1], old_arc_idx01x = old_row_splits2_data[state_idx01]; // Below the "+ state_idx01" is because each state gets a self-loop, // and the "- fsa_idx0_mod" is because final-states don't get a // self-loop. int32_t new_arc_idx01x = old_arc_idx01x + state_idx01 - fsa_idx0_mod; // The self-loop arc is the first arc: int32_t new_arc_idx012 = new_arc_idx01x; new_row_splits2_data[state_idx01] = new_arc_idx01x; if (state_idx01 + 1 < next_state_idx0x) { // not final-state new_row_ids2_data[new_arc_idx012] = state_idx01; int32_t state_idx1 = state_idx01 - state_idx0x; new_arcs_data[new_arc_idx012] = Arc(state_idx1, state_idx1, 0, 0.0); if (arc_map_data) arc_map_data[new_arc_idx012] = -1; } else if (state_idx01 + 1 == num_states) { // Note: if num_states was zero we would have returned above, so // we dont have to worry about an empty FsaVec. new_row_splits2_data[num_states] = new_arc_idx01x; } }); } pr.Finish(); *dest = Ragged<Arc>(RaggedShape3(&src.RowSplits(1), &src.RowIds(1), num_states, &new_row_splits2, &new_row_ids2, new_num_arcs), new_arcs); } } Fsa Union(FsaVec &fsas, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &context = fsas.Context(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); const int32_t *fsas_row_splits2_data = fsas.RowSplits(2).Data(); const int32_t *fsas_row_ids1_data = fsas.RowIds(1).Data(); const int32_t *fsas_row_ids2_data = fsas.RowIds(2).Data(); const Arc *arcs_data = fsas.values.Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = fsas.TotSize(1); int32_t num_arcs = fsas.TotSize(2); // A new start state and a new final state are added (+2). // The final state of each fsa is removed (-num_fsas) int32_t num_out_states = num_states + 2 - num_fsas; int32_t out_final_state = num_out_states - 1; // For every fsa, a new arc is added from the new start state // to its original start state (+num_fsas) int32_t num_out_arcs = num_arcs + num_fsas; Array1<int32_t> out_row_ids(context, num_out_arcs); Array1<Arc> out_arcs(context, num_out_arcs); Array1<int32_t> tmp_arc_map(context, num_out_arcs, -1); int32_t *tmp_arc_map_data = tmp_arc_map.Data(); int32_t *out_row_ids_data = out_row_ids.Data(); Arc *out_arcs_data = out_arcs.Data(); K2_EVAL( context, num_arcs, lambda_set_out, (int32_t fsas_arc_idx012) { int32_t fsas_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012]; int32_t fsas_idx0 = fsas_row_ids1_data[fsas_state_idx01]; int32_t this_fsa_final_state_idx01 = fsas_row_splits1_data[fsas_idx0 + 1] - 1; K2_DCHECK_GT(this_fsa_final_state_idx01, fsas_state_idx01) << "We support only FSAs with at least two states at present"; int32_t fsas_state_idx0x = fsas_row_splits1_data[fsas_idx0]; int32_t fsas_state_idx1 = fsas_state_idx01 - fsas_state_idx0x; int32_t this_fsa_final_state_idx1 = this_fsa_final_state_idx01 - fsas_state_idx0x; int32_t fsas_arc_idx0xx = fsas_row_splits2_data[fsas_state_idx0x]; // fsa0: +1 (a new start state) // fsa1: +0 (the final state of fsa0 is removed) // fsa2: -1 (the final state of fsa1 is removed) // fsa3: -2 (the final state of fsa2 is removed) int32_t state_offset = 1 - fsas_idx0; int32_t out_state_idx0 = fsas_state_idx01 + state_offset; int32_t out_arc_idx01 = fsas_arc_idx012 + num_fsas; out_row_ids_data[out_arc_idx01] = out_state_idx0; Arc arc = arcs_data[fsas_arc_idx012]; K2_DCHECK_EQ(arc.src_state, fsas_state_idx1); if (arc.dest_state == this_fsa_final_state_idx1) arc.dest_state = out_final_state; else arc.dest_state = arc.dest_state - arc.src_state + out_state_idx0; arc.src_state = out_state_idx0; out_arcs_data[out_arc_idx01] = arc; tmp_arc_map_data[out_arc_idx01] = fsas_arc_idx012; if (fsas_arc_idx0xx == fsas_arc_idx012) { // add a new arc from the new start state to the start state // of this fsa // // WARNING: we cannot use fsas_state_idx01 here // since the start state may have no leaving arcs! Arc arc(0, fsas_state_idx0x + state_offset, 0, 0); out_arcs_data[fsas_idx0] = arc; out_row_ids_data[fsas_idx0] = 0; } }); if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map); Array1<int32_t> out_row_splits(context, num_out_states + 1); RowIdsToRowSplits(out_row_ids, &out_row_splits); RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs); Fsa ans = Ragged<Arc>(shape, out_arcs); return ans; } Fsa Closure(Fsa &fsa, Array1<int32_t> *arc_map /* = nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2) << "We support only a single FSA."; ContextPtr &c = fsa.Context(); int32_t num_states = fsa.Dim0(); if (num_states < 2) { K2_CHECK_EQ(num_states, 0) << "An empty fsa should contain no states at all"; if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0); return fsa; // return itself if the input fsa is empty } const int32_t *fsa_row_splits_data = fsa.RowSplits(1).Data(); const int32_t *fsa_row_ids_data = fsa.RowIds(1).Data(); const Arc *fsa_arcs_data = fsa.values.Data(); int32_t fsa_final_state = num_states - 1; int32_t num_out_states = num_states; // An arc from the start state to the final state with label == -1 is added. int32_t num_out_arcs = fsa.values.Dim() + 1; Array1<int32_t> out_row_ids(c, num_out_arcs); int32_t *out_row_ids_data = out_row_ids.Data(); Array1<Arc> out_arcs(c, num_out_arcs); Arc *out_arcs_data = out_arcs.Data(); Array1<int32_t> tmp_arc_map(c, num_out_arcs); int32_t *tmp_arc_map_data = tmp_arc_map.Data(); K2_EVAL( c, fsa.values.Dim(), lambda_set_arcs, (int32_t fsa_arc_idx01) { int32_t fsa_state_idx0 = fsa_row_ids_data[fsa_arc_idx01]; int32_t fsa_arc_idx0x = fsa_row_splits_data[fsa_state_idx0]; int32_t fsa_arc_idx1 = fsa_arc_idx01 - fsa_arc_idx0x; int32_t this_state_num_arcs = fsa_row_splits_data[fsa_state_idx0 + 1] - fsa_arc_idx0x; Arc arc = fsa_arcs_data[fsa_arc_idx01]; if (arc.dest_state == fsa_final_state) { // modify arcs entering the final state such that: // - dest_state == 0 // - label == 0 arc.dest_state = 0; K2_DCHECK_EQ(arc.label, -1); arc.label = 0; } int out_arc_idx01; if (arc.src_state > 0) { // this arc is not originated from the start state, so its index is // incremented out_arc_idx01 = fsa_arc_idx01 + 1; } else { out_arc_idx01 = fsa_arc_idx01; if (fsa_arc_idx1 == this_state_num_arcs - 1) { // This is the last arc of the original start state, // so we add a new arc just after it. Arc new_arc(0, fsa_final_state, -1, 0.0f); out_arcs_data[out_arc_idx01 + 1] = new_arc; out_row_ids_data[out_arc_idx01 + 1] = 0; tmp_arc_map_data[out_arc_idx01 + 1] = -1; } } // it may happen that the start state has no leaving arcs if (fsa_row_splits_data[1] == 0) { Arc new_arc(0, fsa_final_state, -1, 0.0f); out_arcs_data[0] = new_arc; out_row_ids_data[0] = 0; tmp_arc_map_data[0] = -1; } tmp_arc_map_data[out_arc_idx01] = fsa_arc_idx01; out_arcs_data[out_arc_idx01] = arc; out_row_ids_data[out_arc_idx01] = arc.src_state; }); if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map); Array1<int32_t> out_row_splits(c, num_out_states + 1); int32_t *out_row_splits_data = out_row_splits.Data(); K2_EVAL( c, out_row_splits.Dim(), lambda_set_row_splits, (int32_t i) { if (i == 0) out_row_splits_data[i] = 0; else out_row_splits_data[i] = fsa_row_splits_data[i] + 1; }); RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs); Fsa ans = Ragged<Arc>(shape, out_arcs); return ans; } FsaOrVec ExpandArcs(FsaOrVec &fsas, RaggedShape &labels_shape, Array1<int32_t> *fsas_arc_map /*=nullptr*/, Array1<int32_t> *labels_arc_map /*=nullptr*/) { NVTX_RANGE(K2_FUNC); if (fsas.NumAxes() == 2) { FsaVec fsas_temp = FsaToFsaVec(fsas); return ExpandArcs(fsas_temp, labels_shape, fsas_arc_map, labels_arc_map) .RemoveAxis(0); } K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(labels_shape.NumAxes(), 2); K2_CHECK_EQ(fsas.NumElements(), labels_shape.Dim0()); ContextPtr &c = fsas.Context(); K2_CHECK(c->IsCompatible(*labels_shape.Context())); RaggedShape state_to_arcs = GetLayer(fsas.shape, 1); // `state_to_foo` is a RaggedShape that, for each state in `fsas`, has a list // of length `num_arcs + 1`, where `num_arcs` is the number of arcs leaving // this state in `fsas`. Interpret this as: one element for the state // itself, then one for each arc leaving it. This `foo` is an index that // corresponds to num-arcs plus one, but because it is really a placeholder // and we want to keep it distinct from other things, we call it `foo`. RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1); int32_t foo_size = state_to_foo.NumElements(); // For each element of `state_to_foo`, `num_ostates_for` says how many states // there will be for this (state,foo) in the returned (output) FSA. Here, the // idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the // state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the // original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `fsas`], // and we set `num_ostates_for[idx01] = max(0, seq_len-1)`, where seq_len is // the length of the sequence in `labels_shape` corresponding to this // arc-index. Array1<int32_t> num_ostates_for(c, foo_size + 1); int32_t *num_ostates_for_data = num_ostates_for.Data(); const int32_t *labels_row_splits1_data = labels_shape.RowSplits(1).Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(), *state_to_foo_row_splits1_data = state_to_foo.RowSplits(1).Data(), *state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data(); K2_EVAL( c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void { // note: the idx01, idx0, idx0x are into `state_to_foo`. // This idx0 is a state-index into `fsas` (an idx01 w.r.t. `fsas`). int32_t idx0 = state_to_foo_row_ids1_data[idx01], idx0x = state_to_foo_row_splits1_data[idx0], idx1 = idx01 - idx0x; // idx1 is `foo`. int32_t num_ostates; if (idx1 == 0) { num_ostates = 1; // this is a copy of the original state. } else { int32_t fsas_arc_idx2 = idx1 - 1, fsas_state_idx01 = idx0, fsas_arc_idx01x = fsas_row_splits2_data[fsas_state_idx01], fsas_arc_idx012 = fsas_arc_idx01x + fsas_arc_idx2, labels_shape_idx0 = fsas_arc_idx012, labels_shape_idx0x = labels_row_splits1_data[labels_shape_idx0], labels_shape_idx0x_next = labels_row_splits1_data[labels_shape_idx0 + 1], labels_shape_len1 = labels_shape_idx0x_next - labels_shape_idx0x; // A sequence of n symbols will require n-1 extra states to represent // it. num_ostates = max(labels_shape_len1 - 1, (int32_t)0); } num_ostates_for_data[idx01] = num_ostates; }); ExclusiveSum(num_ostates_for, &num_ostates_for); Array1<int32_t> &foo_to_ostates_row_splits = num_ostates_for; RaggedShape foo_to_ostates = RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1); // to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate] // where foo is a general-purpose index that ranges over the (num_arcs + 1) of // the original state. RaggedShape to_ostates_shape = ComposeRaggedShapes3( GetLayer(fsas.shape, 0), state_to_foo, foo_to_ostates); // Below, `tos` means `to_ostates_shape`. const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(), *tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(), *tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(), *tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(), *tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(), *tos_row_ids3_data = to_ostates_shape.RowIds(3).Data(); // `num_oarcs` gives the number of arcs in the returned (output) FSA for each // `ostate` (i.e. leaving each state in the returned FSA). int32_t tot_ostates = to_ostates_shape.NumElements(); Array1<int32_t> num_oarcs(c, tot_ostates + 1); int32_t *num_oarcs_data = num_oarcs.Data(); K2_EVAL( c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void { // All these indexes are into `to_ostates_shape`, indexed // `[fsa][state][foo][ostate].` int32_t idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx01x_next = tos_row_splits2_data[idx01 + 1], len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x, idx3 = idx0123 - idx012x; int32_t num_arcs; if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); // This ostate corresponds to the original state; it is not one of the // extra states added to support chains of arcs. // The original state had `orig_num_arcs` leaving it, which is the // number of `foo` indexes minus one. int32_t orig_num_arcs = len2 - 1; num_arcs = orig_num_arcs; } else { // All newly-created states have exactly one arc leaving them. num_arcs = 1; } num_oarcs_data[idx0123] = num_arcs; }); ExclusiveSum(num_oarcs, &num_oarcs); Array1<int32_t> &ostate_to_oarcs_row_splits = num_oarcs; RaggedShape ostate_to_oarcs = RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1); // `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc] RaggedShape full_shape = ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs); // for the lower-order row-splits and row-ids, use tot_row_{splits,idx}n_data const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(), *full_row_ids4_data = full_shape.RowIds(4).Data(); int32_t tot_oarcs = full_shape.NumElements(); K2_CHECK_GE(tot_oarcs, fsas.NumElements()); int32_t *fsas_arc_map_data = nullptr, *labels_arc_map_data = nullptr; if (fsas_arc_map) { *fsas_arc_map = Array1<int32_t>(c, tot_oarcs); fsas_arc_map_data = fsas_arc_map->Data(); } if (labels_arc_map) { *labels_arc_map = Array1<int32_t>(c, tot_oarcs); labels_arc_map_data = labels_arc_map->Data(); } Array1<Arc> oarcs(c, tot_oarcs); Arc *oarcs_data = oarcs.Data(); const Arc *arcs_data = fsas.values.Data(); K2_EVAL( c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void { // All these indexes are into `full_shape`, indexed // `[fsa][state][foo][ostate][oarc].` int32_t idx0123 = full_row_ids4_data[idx01234], idx0123x = full_row_splits4_data[idx0123], idx4 = idx01234 - idx0123x, idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx3 = idx0123 - idx012x, idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx2 = idx012 - idx01x, idx0 = tos_row_ids1_data[idx01], idx0x = tos_row_splits1_data[idx0], idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]]; int32_t fsa_idx01x = fsas_row_splits2_data[idx01]; int32_t fsa_idx2; // the idx2 (arc-index) into `fsas` of the input arc // that's most relevant to us.. int32_t seq_pos; // seq_pos is our index into the sequence of arcs that // we produce for each original arc if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); fsa_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; the idx4 // enumerates the arcs leaving it.. seq_pos = 0; } else { // this is one of the extra `foo` indexes, one per arc in the input // FSA that leaves this state; each of those `foo` indexes has // (seq_len - 1) states in it (idx3=0,1..seq_len-1); and each state // has one arc leaving it (idx4==0). K2_CHECK_EQ(idx4, 0); fsa_idx2 = idx2 - 1; seq_pos = idx3 + 1; } int32_t fsa_idx012 = fsa_idx01x + fsa_idx2; // index of the arc in // source FSA FSA that // we're expanding.. Arc iarc = arcs_data[fsa_idx012]; int32_t labels_idx0x = labels_row_splits1_data[fsa_idx012], labels_next_idx0x = labels_row_splits1_data[fsa_idx012 + 1], labels_len1 = labels_next_idx0x - labels_idx0x; // labels_len1 is length of label sequence for this arc K2_CHECK_LT(seq_pos, max(int32_t(1), labels_len1)); int32_t dest_idx01 = idx0x + iarc.dest_state, // original destination // state-index orig_dest_idx0123 = tos_row_splits3_data[tos_row_splits2_data[dest_idx01]]; Arc oarc; oarc.src_state = idx0123 - idx0xxx; // If this is the last arc in the sequence, the dest-state is the // original dest-state of the arc. Otherwise the dest-state is one of // the new states that we created. The idx123 will be an idx1 after // removing axes. int32_t dest_idx123; if (seq_pos + 1 >= labels_len1) { // last arc in sequence.. dest_idx123 = orig_dest_idx0123 - idx0xxx; } else { int32_t dest_state_idx2 = fsa_idx2 + 1, // index `foo` equals // orig_arc_idx+1 dest_state_idx3 = seq_pos, // ostate index.. dest_idx012 = idx01x + dest_state_idx2, dest_idx012x = tos_row_splits3_data[dest_idx012], dest_idx0123 = dest_idx012x + dest_state_idx3; dest_idx123 = dest_idx0123 - idx0xxx; } oarc.dest_state = dest_idx123; // indexes 1,2,3 will be combined; in // the output FSA it will be an idx1. if (fsas_arc_map_data) fsas_arc_map_data[idx01234] = (seq_pos == 0 ? fsa_idx012 : -1); if (labels_arc_map_data) labels_arc_map_data[idx01234] = (seq_pos < labels_len1 ? labels_idx0x + seq_pos : -1); if (iarc.label != -1) { // normal case.. label goes on 1st arc in sequence oarc.label = (seq_pos == 0 ? iarc.label : 0); } else { // If the arc was to the final-state, we need to keep the label on the // last arc of the sequence to keep the output valid. The following // would be "seq_pos + 1 == labels_len1 ? -1 : 0", but we make it ">=" // not "=" to account for the case seq_pos=0, labels_len1 = 0. oarc.label = (seq_pos + 1 >= labels_len1 ? -1 : 0); } oarc.score = (seq_pos == 0 ? iarc.score : 0.0); oarcs_data[idx01234] = oarc; }); // remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes // axis 1, so remove axis 1 twice]. RaggedShape temp = RemoveAxis(full_shape, 1); return FsaVec(RemoveAxis(temp, 1), oarcs); } void Invert(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest, Ragged<int32_t> *dest_aux_labels, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src_aux_labels.NumAxes(), 2); K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements()); K2_CHECK(dest != nullptr && dest_aux_labels != nullptr); ContextPtr c = GetContext(src, src_aux_labels); if (src.NumAxes() == 2) { Fsa *srcs = &src; FsaVec src_vec = CreateFsaVec(1, &srcs), dest_vec; Invert(src_vec, src_aux_labels, &dest_vec, dest_aux_labels, arc_map); *dest = GetFsaVecElement(dest_vec, 0); return; } Array1<int32_t> src_arc_map, labels_arc_map; *dest = ExpandArcs(src, src_aux_labels.shape, &src_arc_map, &labels_arc_map); // swap labels and aux_labels int32_t dest_num_arcs = dest->NumElements(); Arc *dest_arcs_data = dest->values.Data(); const int32_t *labels_arc_map_data = labels_arc_map.Data(), *src_aux_labels_data = src_aux_labels.values.Data(); Array1<int32_t> dest_aux_labels_row_splits(c, dest_num_arcs + 1); int32_t *dest_aux_labels_row_splits_data = dest_aux_labels_row_splits.Data(); K2_EVAL( c, dest_num_arcs, lambda_set_dest_aux_labels_num, (int32_t dest_idx012)->void { Arc &dest_arc = dest_arcs_data[dest_idx012]; // we'll remove epsilons in dest_aux_labels dest_aux_labels_row_splits_data[dest_idx012] = dest_arc.label == 0 ? 0 : 1; }); ExclusiveSum(dest_aux_labels_row_splits.Arange(0, dest_num_arcs), &dest_aux_labels_row_splits); RaggedShape dest_aux_labels_shape = RaggedShape2(&dest_aux_labels_row_splits, nullptr, -1); Array1<int32_t> dest_aux_labels_values(c, dest_aux_labels_shape.NumElements()); int32_t *dest_aux_labels_values_data = dest_aux_labels_values.Data(); K2_EVAL( c, dest_num_arcs, lambda_set_dest_labels_and_aux_labels, (int32_t dest_idx012)->void { Arc &dest_arc = dest_arcs_data[dest_idx012]; // swap label and aux_label if (dest_arc.label != 0) { int32_t dest_aux_labels_idx0x = dest_aux_labels_row_splits_data[dest_idx012]; // every arc in dest has at most one aux_label (as the aux_label is // the label of src on this arc) dest_aux_labels_values_data[dest_aux_labels_idx0x] = dest_arc.label; } int32_t src_aux_labels_idx01 = labels_arc_map_data[dest_idx012]; dest_arc.label = src_aux_labels_idx01 == -1 ? 0 : src_aux_labels_data[src_aux_labels_idx01]; }); *dest_aux_labels = Ragged<int32_t>(dest_aux_labels_shape, dest_aux_labels_values); if (arc_map != nullptr) *arc_map = src_arc_map; } // Will be used in InvertHost to process FsaVec input recursively. void RecursionWrapperAuxLabels(void (*f)(FsaOrVec &, Ragged<int32_t> &, FsaOrVec *, Ragged<int32_t> *), FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest, Ragged<int32_t> *dest_aux_labels) { NVTX_RANGE(K2_FUNC); // src is actually an FsaVec. Just recurse for now. K2_CHECK_EQ(src.NumAxes(), 3); int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Ragged<int32_t>> src_aux_labels_vec(num_fsas), dest_aux_labels_vec(num_fsas); int32_t tot_num_arcs = 0; Array1<int32_t> src_aux_labels_row_splits = src_aux_labels.RowSplits(1), src_aux_labels_values = src_aux_labels.values; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); int32_t cur_num_arcs = srcs[i].NumElements(); // below block get aux_labels for srcs[i] // TODO(haowen): replace with Range op for ragged { Array1<int32_t> row_splits = src_aux_labels_row_splits.Arange( tot_num_arcs, tot_num_arcs + cur_num_arcs + 1); Array1<int32_t> values = src_aux_labels_values.Arange(row_splits[0], row_splits.Back()); row_splits = Minus(row_splits, row_splits[0]); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); src_aux_labels_vec[i] = Ragged<int32_t>(shape, values); } f(srcs[i], src_aux_labels_vec[i], &(dests[i]), &(dest_aux_labels_vec[i])); tot_num_arcs += cur_num_arcs; } *dest = Stack(0, num_fsas, dests.data()); *dest_aux_labels = Cat(0, num_fsas, dest_aux_labels_vec.data()); } void InvertHost(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest, Ragged<int32_t> *dest_aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src_aux_labels.NumAxes(), 2); K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements()); K2_CHECK(dest != nullptr && dest_aux_labels != nullptr); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapperAuxLabels(InvertHost, src, src_aux_labels, dest, dest_aux_labels); } k2host::Fsa host_fsa = FsaToHostFsa(src); // k2host::AuxLabels is a k2host::Array2 k2host::AuxLabels host_aux_labels( src_aux_labels.Dim0(), src_aux_labels.NumElements(), src_aux_labels.RowSplits(1).Data(), src_aux_labels.values.Data()); k2host::FstInverter inverter(host_fsa, host_aux_labels); k2host::Array2Size<int32_t> fsa_size, aux_size; inverter.GetSizes(&fsa_size, &aux_size); FsaCreator fsa_creator(fsa_size); k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); Ragged2Creator<int32_t> ragged_creator(aux_size); k2host::AuxLabels host_dest_aux_labels = ragged_creator.GetHostArray2(); inverter.GetOutput(&host_dest_fsa, &host_dest_aux_labels); *dest = fsa_creator.GetFsa(); *dest_aux_labels = ragged_creator.GetRagged2(); } FsaOrVec ReplaceFsa(FsaVec &src, FsaOrVec &index, int32_t symbol_range_begin, Array1<int32_t> *arc_map_src /* = nullptr */, Array1<int32_t> *arc_map_index /* = nullptr */) { NVTX_RANGE(K2_FUNC); if (index.NumAxes() == 2) { FsaVec index_temp = FsaToFsaVec(index); return ReplaceFsa(src, index_temp, symbol_range_begin, arc_map_src, arc_map_index).RemoveAxis(0); } K2_CHECK_EQ(index.NumAxes(), 3); ContextPtr &c = index.Context(); K2_CHECK(c->IsCompatible(*src.Context())); RaggedShape state_to_arcs = GetLayer(index.shape, 1); // `state_to_foo` is a RaggedShape that, for each state in `index`, has a list // of length `tot_arcs + 1`. Interpret this as: one element for the state // itself, then one for each arc leaving it. This `foo` is an index that // corresponds to num-arcs plus one, but because it is really a placeholder // and we want to keep it distinct from other things, we call it `foo`. RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1); int32_t foo_size = state_to_foo.NumElements(), num_src_fsas = src.Dim0(); // For each element of `state_to_foo`, `num_ostates_for` says how many states // there will be for this (state,foo) in the returned (output) FSA. Here, the // idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the // state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the // original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `index`] // and we set `num_ostates_for[idx01] = max(0, state_num-1)`, where state_num // is the states number of the fsa in `src` that would repalce into this arc, // the final state of this fsa will identify with the dest-state of this arc, // so we minus 1. Array1<int32_t> num_ostates_for(c, foo_size + 1); int32_t *num_ostates_for_data = num_ostates_for.Data(); const Arc *index_arcs_data = index.values.Data(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(), *index_row_splits2_data = index.RowSplits(2).Data(), *state_to_foo_row_splits1_data = state_to_foo.RowSplits(1).Data(), *state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data(); K2_EVAL( c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void { // note: the idx01, idx0, idx0x are into `state_to_foo`. // This idx0 is a state-index into `index` (an idx01 w.r.t. `index`). int32_t idx0 = state_to_foo_row_ids1_data[idx01], idx0x = state_to_foo_row_splits1_data[idx0], idx1 = idx01 - idx0x; // idx1 is `foo`. int32_t num_ostates; if (idx1 == 0) { num_ostates = 1; // this is a copy of the original state. } else { int32_t index_arc_idx2 = idx1 - 1, index_state_idx01 = idx0, index_arc_idx01x = index_row_splits2_data[index_state_idx01], index_arc_idx012 = index_arc_idx01x + index_arc_idx2, index_label = index_arcs_data[index_arc_idx012].label, src_idx0 = index_label - symbol_range_begin; // will not replace for this arc if (src_idx0 < 0 || src_idx0 >= num_src_fsas) { num_ostates = 0; } else { int32_t src_idx0x = src_row_splits1_data[src_idx0], src_idx0x_next = src_row_splits1_data[src_idx0 + 1], src_len1 = src_idx0x_next - src_idx0x; num_ostates = max(src_len1 - 1, (int32_t)0); } } num_ostates_for_data[idx01] = num_ostates; }); ExclusiveSum(num_ostates_for, &num_ostates_for); Array1<int32_t> &foo_to_ostates_row_splits = num_ostates_for; RaggedShape foo_to_ostates = RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1); // to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate] // where foo is a general-purpose index that ranges over the (num_arcs + 1) of // the original state. RaggedShape to_ostates_shape = ComposeRaggedShapes3( GetLayer(index.shape, 0), state_to_foo, foo_to_ostates); // Below, `tos` means `to_ostates_shape`. const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(), *tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(), *tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(), *tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(), *tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(), *tos_row_ids3_data = to_ostates_shape.RowIds(3).Data(), *src_row_splits2_data = src.RowSplits(2).Data(); // `num_oarcs` gives the number of arcs in the returned (output) FSA for each // `ostate` (i.e. leaving each state in the returned FSA). int32_t tot_ostates = to_ostates_shape.NumElements(); Array1<int32_t> num_oarcs(c, tot_ostates + 1); int32_t *num_oarcs_data = num_oarcs.Data(); K2_EVAL( c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void { // All these indexes are into `to_ostates_shape`, indexed // `[fsa][state][foo][ostate].` int32_t idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx01x_next = tos_row_splits2_data[idx01 + 1], len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x, idx3 = idx0123 - idx012x; int32_t num_arcs; if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); // This ostate corresponds to the original state; // The original state had `orig_num_arcs` leaving it, which is the // number of `foo` indexes minus one. int32_t orig_num_arcs = len2 - 1; num_arcs = orig_num_arcs; } else { // All inserted states have the same num of arcs as in the src. // note: the prefix `index_` means it is an idxXXX w.r.t. `index`. // the prefix `src_` means the variable is an idxXXX w.r.t. `src`. int32_t index_arc_idx2 = idx2 - 1, index_arc_idx01x = index_row_splits2_data[idx01], index_arc_idx012 = index_arc_idx01x + index_arc_idx2, index_label = index_arcs_data[index_arc_idx012].label, src_fsa_idx0 = index_label - symbol_range_begin; K2_CHECK_GE(src_fsa_idx0, 0); K2_CHECK_LT(src_fsa_idx0, num_src_fsas); int32_t src_state_idx1 = idx3, src_state_idx0x = src_row_splits1_data[src_fsa_idx0], src_state_idx01 = src_state_idx0x + src_state_idx1, src_arc_idx01x = src_row_splits2_data[src_state_idx01], src_arc_idx01x_next = src_row_splits2_data[src_state_idx01 + 1], src_num_arcs = src_arc_idx01x_next - src_arc_idx01x; num_arcs = src_num_arcs; } num_oarcs_data[idx0123] = num_arcs; }); ExclusiveSum(num_oarcs, &num_oarcs); Array1<int32_t> &ostate_to_oarcs_row_splits = num_oarcs; RaggedShape ostate_to_oarcs = RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1); // `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc] RaggedShape full_shape = ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs); // for the lower-order row-splits and row-ids, use tot_row_{splits,ids}n_data const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(), *full_row_ids4_data = full_shape.RowIds(4).Data(); int32_t tot_oarcs = full_shape.NumElements(); K2_CHECK_GE(tot_oarcs, index.NumElements()); int32_t *arc_map_src_data = nullptr, *arc_map_index_data = nullptr; if (arc_map_src) { *arc_map_src = Array1<int32_t>(c, tot_oarcs); arc_map_src_data = arc_map_src->Data(); } if (arc_map_index) { *arc_map_index = Array1<int32_t>(c, tot_oarcs); arc_map_index_data = arc_map_index->Data(); } Array1<Arc> oarcs(c, tot_oarcs); Arc *oarcs_data = oarcs.Data(); const Arc *src_arcs_data = src.values.Data(); K2_EVAL( c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void { // All these indexes are into `full_shape`, indexed // `[fsa][state][foo][ostate][oarc].` // The prefix `index_` means it is an idxXXX w.r.t. `index`. // the prefix `src_` means the variable is an idxXXX w.r.t. `src`. int32_t idx0123 = full_row_ids4_data[idx01234], idx0123x = full_row_splits4_data[idx0123], idx4 = idx01234 - idx0123x, idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx3 = idx0123 - idx012x, idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx2 = idx012 - idx01x, idx0 = tos_row_ids1_data[idx01], idx0x = tos_row_splits1_data[idx0], idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]]; int32_t index_arc_idx2; // the idx2 (arc-index) into `index` if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); index_arc_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; // the idx4 enumerates the arcs leaving it.. } else { // this is one of the extra `foo` indexes, it's conrespoding index // into `index` is `foo` index minus 1 index_arc_idx2 = idx2 - 1; } int32_t index_arc_idx01x = index_row_splits2_data[idx01]; // index of the arc in source FSA, FSA that we're replaceing.. int32_t index_arc_idx012 = index_arc_idx01x + index_arc_idx2; Arc index_arc = index_arcs_data[index_arc_idx012]; // original destination state-index int32_t dest_state_idx01 = idx0x + index_arc.dest_state, orig_dest_state_idx0123 = tos_row_splits3_data[tos_row_splits2_data[dest_state_idx01]]; Arc src_arc; Arc oarc; oarc.src_state = idx0123 - idx0xxx; // initialize mapping index int32_t arc_src_map_idx = -1, arc_index_map_idx = -1; int32_t src_fsa_idx0 = index_arc.label - symbol_range_begin; // will not replace for this arc // dest state is the dest state of index arc if (src_fsa_idx0 < 0 || src_fsa_idx0 >= num_src_fsas) { K2_CHECK_EQ(idx2, 0); oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; oarc.label = index_arc.label; oarc.score = index_arc.score; arc_index_map_idx = index_arc_idx012; } else { int32_t src_state_idx0x = src_row_splits1_data[src_fsa_idx0], src_state_idx0x_next = src_row_splits1_data[src_fsa_idx0 + 1], num_states = src_state_idx0x_next - src_state_idx0x, src_state_idx1 = idx3, src_state_idx01 = src_state_idx0x + src_state_idx1, src_arc_idx01x = src_row_splits2_data[src_state_idx01], src_arc_idx2 = idx4, src_arc_idx012 = src_arc_idx01x + src_arc_idx2; src_arc = src_arcs_data[src_arc_idx012]; // handle the arcs belongs to index if (idx2 == 0) { // if the fsa to be replaced in is empty, this arc would point to // its original dest-state if (0 == num_states) { oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; } else { // this arc would point to the initial state of the fsa in src, // the state id bias to current state(the src-state) is the count // of all the ostates coresponding to the original state util now, // the idx4 enumerates foo index int32_t idx012_t = idx01x + 0, idx2_t = idx4, idx012x_t = tos_row_splits3_data[idx012_t], idx012x_next_t = tos_row_splits3_data[idx012_t + idx2_t + 1], bias = idx012x_next_t - idx012x_t; oarc.dest_state = idx0123 + bias - idx0xxx; } // set the label of the arc we are replacing to be 0(epsilon) oarc.label = 0; oarc.score = index_arc.score; arc_index_map_idx = index_arc_idx012; } else { // handle the arcs belongs to src // the arc point to the final state of the fsa in src would point to // the dest state of the arc we're replaceing if (src_arc.label == -1) { oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; } else { // this is the inner arc of the fsa in src int32_t dest_state_idx012x = idx0123 - idx3, dest_state_idx0123 = dest_state_idx012x + src_arc.dest_state; oarc.dest_state = dest_state_idx0123 - idx0xxx; } // arcs in src fsas that point to final state would set to epsilon // arc (label from -1 to 0) oarc.label = src_arc.label == -1 ? 0 : src_arc.label; oarc.score = src_arc.score; arc_src_map_idx = src_arc_idx012; } } if (arc_map_src_data) arc_map_src_data[idx01234] = arc_src_map_idx; if (arc_map_index_data) arc_map_index_data[idx01234] = arc_index_map_idx; oarcs_data[idx01234] = oarc; }); // remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes // axis 1, so remove axis 1 twice]. RaggedShape temp = RemoveAxis(full_shape, 1); return FsaVec(RemoveAxis(temp, 1), oarcs); } FsaOrVec RemoveEpsilonSelfLoops(FsaOrVec &src, Array1<int32_t> *arc_map /* = nullptr */) { NVTX_RANGE(K2_FUNC); if (src.NumAxes() == 2) { FsaVec temp = FsaToFsaVec(src); return RemoveEpsilonSelfLoops(temp, arc_map).RemoveAxis(0); } K2_CHECK_EQ(src.NumAxes(), 3); ContextPtr &c = src.Context(); int32_t num_arcs = src.NumElements(); Renumbering renumber_lists(c, num_arcs); char *keep_list_data = renumber_lists.Keep().Data(); const Arc *arcs_data = src.values.Data(); K2_EVAL( c, num_arcs, lambda_set_keep, (int32_t i)->void { Arc arc = arcs_data[i]; char keep; if (arc.label == 0 && arc.src_state == arc.dest_state) { // This arc is an epsilon self-loop, so it should be removed keep = 0; } else { keep = 1; } keep_list_data[i] = keep; }); FsaVec ans = Index(src, 2, renumber_lists.New2Old(), arc_map); return ans; } } // namespace k2
b61ed1f0ad7749786fcf9a3c6db6fc38cec6e7d7.cu
/** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu, * Wei Kang) * Mobvoi Inc. (authors: Fangjun Kuang) * * See LICENSE for clarification regarding multiple authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> #include <limits> #include <memory> #include <type_traits> #include <utility> #include <vector> #include "k2/csrc/array_ops.h" #include "k2/csrc/fsa_algo.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/host/aux_labels.h" #include "k2/csrc/host/connect.h" #include "k2/csrc/host/determinize.h" #include "k2/csrc/host/intersect.h" #include "k2/csrc/host/rmepsilon.h" #include "k2/csrc/host/topsort.h" #include "k2/csrc/host_shim.h" #include "k2/csrc/macros.h" #include "k2/csrc/rm_epsilon.h" // this contains a subset of the algorithms in fsa_algo.h; currently it just // contains one that are wrappings of the corresponding algorithms in // host/. namespace k2 { bool RecursionWrapper(bool (*f)(Fsa &, Fsa *, Array1<int32_t> *), Fsa &src, Fsa *dest, Array1<int32_t> *arc_map) { NVTX_RANGE(K2_FUNC); // src is actually an FsaVec. Just recurse for now. int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Array1<int32_t>> arc_maps(num_fsas); int32_t tot_num_arcs = 0; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); // Recurse. if (!f(srcs[i], &(dests[i]), (arc_map != nullptr ? &(arc_maps[i]) : nullptr))) return false; if (arc_map != nullptr) { // convert arc indexes in arc_maps from idx2 to idx012 arc_maps[i] = Plus(arc_maps[i], tot_num_arcs); tot_num_arcs += srcs[i].NumElements(); } } *dest = Stack(0, num_fsas, dests.data()); if (arc_map != nullptr) *arc_map = Cat(src.Context(), num_fsas, arc_maps.data()); return true; } bool ConnectHost(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapper(ConnectHost, src, dest, arc_map); } k2host::Fsa host_fsa = FsaToHostFsa(src); k2host::Connection c(host_fsa); k2host::Array2Size<int32_t> size; c.GetSizes(&size); FsaCreator creator(size); k2host::Fsa host_dest_fsa = creator.GetHostFsa(); int32_t *arc_map_data = nullptr; if (arc_map != nullptr) { *arc_map = Array1<int32_t>(src.Context(), size.size2); arc_map_data = arc_map->Data(); } bool ans = c.GetOutput(&host_dest_fsa, arc_map_data); *dest = creator.GetFsa(); return ans; } bool TopSortHost(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapper(TopSortHost, src, dest, arc_map); } k2host::Fsa host_fsa = FsaToHostFsa(src); k2host::TopSorter sorter(host_fsa); k2host::Array2Size<int32_t> size; sorter.GetSizes(&size); FsaCreator creator(size); k2host::Fsa host_dest_fsa = creator.GetHostFsa(); int32_t *arc_map_data = nullptr; if (arc_map != nullptr) { *arc_map = Array1<int32_t>(src.Context(), size.size2); arc_map_data = arc_map->Data(); } bool ans = sorter.GetOutput(&host_dest_fsa, arc_map_data); *dest = creator.GetFsa(); return ans; } bool Intersect(FsaOrVec &a_fsas, int32_t properties_a, FsaOrVec &b_fsas, int32_t properties_b, bool treat_epsilons_specially, FsaVec *out, Array1<int32_t> *arc_map_a, Array1<int32_t> *arc_map_b) { NVTX_RANGE(K2_FUNC); K2_CHECK(a_fsas.NumAxes() >= 2 && a_fsas.NumAxes() <= 3); K2_CHECK(b_fsas.NumAxes() >= 2 && b_fsas.NumAxes() <= 3); ContextPtr c = a_fsas.Context(); K2_CHECK_EQ(c->GetDeviceType(), kCpu); if (a_fsas.NumAxes() == 2) { FsaVec a_fsas_vec = FsaToFsaVec(a_fsas); return Intersect(a_fsas_vec, properties_a, b_fsas, properties_b, treat_epsilons_specially, out, arc_map_a, arc_map_b); } if (b_fsas.NumAxes() == 2) { FsaVec b_fsas_vec = FsaToFsaVec(b_fsas); return Intersect(a_fsas, properties_a, b_fsas_vec, properties_b, treat_epsilons_specially, out, arc_map_a, arc_map_b); } int32_t num_fsas_a = a_fsas.Dim0(), num_fsas_b = b_fsas.Dim0(); K2_CHECK_GT(num_fsas_a, 0); K2_CHECK_GT(num_fsas_b, 0); int32_t stride_a = 1, stride_b = 1; if (num_fsas_a != num_fsas_b) { if (num_fsas_a == 1) { stride_a = 0; } else if (num_fsas_b == 1) { stride_b = 0; } else { K2_CHECK_EQ(num_fsas_a, num_fsas_b); } // the check on the previous line will fail. } if (properties_a < 0) { Array1<int32_t> properties_a_out(c, num_fsas_a); GetFsaVecBasicProperties(a_fsas, &properties_a_out, &properties_a); } if (properties_b < 0) { Array1<int32_t> properties_b_out(c, num_fsas_b); GetFsaVecBasicProperties(b_fsas, &properties_b_out, &properties_b); } bool arc_sorted = (properties_a & kFsaPropertiesArcSorted) && (properties_b & kFsaPropertiesArcSorted); K2_CHECK(arc_sorted) << "Both a_fsas and b_fsas should be arc-sorted"; int32_t num_fsas = std::max(num_fsas_a, num_fsas_b); std::vector<std::unique_ptr<k2host::Intersection>> intersections(num_fsas); std::vector<k2host::Array2Size<int32_t>> sizes(num_fsas); for (int32_t i = 0; i < num_fsas; ++i) { k2host::Fsa host_fsa_a = FsaVecToHostFsa(a_fsas, i * stride_a), host_fsa_b = FsaVecToHostFsa(b_fsas, i * stride_b); intersections[i] = std::make_unique<k2host::Intersection>( host_fsa_a, host_fsa_b, treat_epsilons_specially, false); intersections[i]->GetSizes(&(sizes[i])); } FsaVecCreator creator(sizes); int32_t num_arcs = creator.NumArcs(); if (arc_map_a) *arc_map_a = Array1<int32_t>(c, num_arcs); if (arc_map_b) *arc_map_b = Array1<int32_t>(c, num_arcs); // the following few lines will allow us to add suitable offsets to the // `arc_map`. Array1<int32_t> a_fsas_row_splits12 = a_fsas.RowSplits(2)[a_fsas.RowSplits(1)], b_fsas_row_splits12 = b_fsas.RowSplits(2)[b_fsas.RowSplits(1)]; const int32_t *a_fsas_row_splits12_data = a_fsas_row_splits12.Data(), *b_fsas_row_splits12_data = b_fsas_row_splits12.Data(); bool ok = true; for (int32_t i = 0; i < num_fsas; ++i) { k2host::Fsa host_fsa_out = creator.GetHostFsa(i); int32_t arc_offset = creator.GetArcOffsetFor(i); int32_t *this_arc_map_a = (arc_map_a ? arc_map_a->Data() + arc_offset : nullptr), *this_arc_map_b = (arc_map_b ? arc_map_b->Data() + arc_offset : nullptr); bool ans = intersections[i]->GetOutput(&host_fsa_out, this_arc_map_a, this_arc_map_b); ok = ok && ans; int32_t this_num_arcs = creator.GetArcOffsetFor(i + 1) - arc_offset; if (arc_map_a) { int32_t arc_offset_a = a_fsas_row_splits12_data[i * stride_a]; for (int32_t i = 0; i < this_num_arcs; i++) if (this_arc_map_a[i] != -1) this_arc_map_a[i] += arc_offset_a; } if (arc_map_b) { int32_t arc_offset_b = b_fsas_row_splits12_data[i * stride_b]; for (int32_t i = 0; i < this_num_arcs; i++) if (this_arc_map_b[i] != -1) this_arc_map_b[i] += arc_offset_b; } } *out = creator.GetFsaVec(); return ok; } // Will be used in RemoveEpsilonHost and Determinize below to process FsaVec // input recursively. void RecursionWrapper(void (*f)(FsaOrVec &, FsaOrVec *, Ragged<int32_t> *), FsaOrVec &src, FsaOrVec *dest, Ragged<int32_t> *arc_deriv) { NVTX_RANGE(K2_FUNC); // src is actually an FsaVec. Just recurse for now. K2_CHECK_EQ(src.NumAxes(), 3); int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Ragged<int32_t>> arc_derivs(num_fsas); int32_t tot_num_arcs = 0; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); f(srcs[i], &(dests[i]), arc_deriv != nullptr ? &(arc_derivs[i]) : nullptr); if (arc_deriv != nullptr) { // convert arc indexes in arc_derivs from idx2 to idx012 Array1<int32_t> &values = arc_derivs[i].values; values = Plus(values, tot_num_arcs); tot_num_arcs += srcs[i].NumElements(); } } *dest = Stack(0, num_fsas, dests.data()); if (arc_deriv != nullptr) *arc_deriv = Cat(0, num_fsas, arc_derivs.data()); } void RemoveEpsilonHost(FsaOrVec &src, FsaOrVec *dest, Ragged<int32_t> *arc_derivs /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapper(RemoveEpsilonHost, src, dest, arc_derivs); } k2host::Fsa host_fsa = FsaToHostFsa(src); int32_t num_states = host_fsa.NumStates(); K2_CHECK_EQ(num_states, src.Dim0()); std::vector<double> max_forward_weights(num_states); std::vector<double> max_backward_weights(num_states); k2host::WfsaWithFbWeights max_wfsa(host_fsa, k2host::kMaxWeight, max_forward_weights.data(), max_backward_weights.data()); // pass infinity as beam since we don't do pruning here. float beam = std::numeric_limits<float>::infinity(); k2host::EpsilonsRemoverPrunedMax eps_remover(max_wfsa, beam); k2host::Array2Size<int32_t> fsa_size, arc_derivs_size; eps_remover.GetSizes(&fsa_size, &arc_derivs_size); FsaCreator fsa_creator(fsa_size); k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); K2_STATIC_ASSERT( (std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value)); Ragged2Creator<int32_t> ragged_creator(arc_derivs_size); k2host::Array2<int32_t *, int32_t> host_arc_derivs = ragged_creator.GetHostArray2(); eps_remover.GetOutput(&host_dest_fsa, &host_arc_derivs); *dest = fsa_creator.GetFsa(); if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2(); } void RemoveEpsilon(FsaOrVec &src, int32_t properties, FsaOrVec *dest, Ragged<int32_t> *arc_derivs) { if ((properties & kFsaPropertiesTopSortedAndAcyclic) != 0 && src.Context()->GetDeviceType() == kCpu) { // Host version of the algorithm RemoveEpsilonHost(src, dest, arc_derivs); } else { RemoveEpsilonDevice(src, dest, arc_derivs); } } void RemoveEpsilonAndAddSelfLoops(FsaOrVec &src, int32_t properties, FsaOrVec *dest, Ragged<int32_t> *arc_derivs) { NVTX_RANGE(K2_FUNC); Ragged<int32_t> arc_derivs1; FsaOrVec temp; RemoveEpsilon(src, properties, &temp, (arc_derivs != nullptr ? &arc_derivs1 : nullptr)); Array1<int32_t> arc_derivs2; AddEpsilonSelfLoops(temp, dest, (arc_derivs != nullptr ? &arc_derivs2 : nullptr)); if (arc_derivs != nullptr) { *arc_derivs = Index(arc_derivs1, 0, arc_derivs2, nullptr); } } void Determinize(FsaOrVec &src, DeterminizeWeightPushingType weight_pushing_type, FsaOrVec *dest, Ragged<int32_t> *arc_derivs /*=nullptr*/) { NVTX_RANGE(K2_FUNC); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Ragged<int32_t>> derivs_vector(num_fsas); int32_t tot_num_arcs = 0; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); Determinize(srcs[i], weight_pushing_type, &(dests[i]), arc_derivs != nullptr ? &(derivs_vector[i]) : nullptr); if (arc_derivs != nullptr) { // convert arc indexes in arc_derivs from idx2 to idx012 Array1<int32_t> &values = arc_derivs[i].values; values = Plus(values, tot_num_arcs); tot_num_arcs += srcs[i].NumElements(); } } *dest = Stack(0, num_fsas, dests.data()); if (arc_derivs != nullptr) *arc_derivs = Cat(0, num_fsas, derivs_vector.data()); return; } k2host::Fsa host_fsa = FsaToHostFsa(src); int32_t num_states = host_fsa.NumStates(); K2_CHECK_EQ(num_states, src.Dim0()); int32_t max_step = -1; // no limit k2host::FbWeightType host_weight_pushing_type = static_cast<k2host::FbWeightType>(static_cast<int>(weight_pushing_type)); k2host::DeterminizerMax determinizer(host_fsa, max_step, host_weight_pushing_type); k2host::Array2Size<int32_t> fsa_size, arc_derivs_size; determinizer.GetSizes(&fsa_size, &arc_derivs_size); FsaCreator fsa_creator(fsa_size); k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); K2_STATIC_ASSERT( (std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value)); Ragged2Creator<int32_t> ragged_creator(arc_derivs_size); k2host::Array2<int32_t *, int32_t> host_arc_derivs = ragged_creator.GetHostArray2(); determinizer.GetOutput(&host_dest_fsa, &host_arc_derivs); *dest = fsa_creator.GetFsa(); if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2(); } Fsa LinearFsa(const Array1<int32_t> &symbols) { NVTX_RANGE(K2_FUNC); ContextPtr &c = symbols.Context(); int32_t n = symbols.Dim(), num_states = n + 2, num_arcs = n + 1; Array1<int32_t> row_splits1 = Range(c, num_states + 1, 0), row_ids1 = Range(c, num_arcs, 0); int32_t *row_splits1_data = row_splits1.Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *symbols_data = symbols.Data(); K2_EVAL( c, num_arcs, lambda_set_arcs, (int32_t arc_idx01)->void { int32_t src_state = arc_idx01, dest_state = arc_idx01 + 1, // -1 == kFinalSymbol symbol = (arc_idx01 < n ? symbols_data[arc_idx01] : -1); if (arc_idx01 < n) K2_CHECK_NE(symbol, -1); float score = 0.0; arcs_data[arc_idx01] = Arc(src_state, dest_state, symbol, score); // the final state has no leaving arcs. if (arc_idx01 == 0) row_splits1_data[num_states] = num_arcs; }); return Ragged<Arc>(RaggedShape2(&row_splits1, &row_ids1, num_arcs), arcs); } FsaVec LinearFsas(const Ragged<int32_t> &symbols) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(symbols.NumAxes(), 2); ContextPtr &c = symbols.Context(); // if there are n symbols, there are n+2 states and n+1 arcs. RaggedShape states_shape = ChangeSublistSize(symbols.shape, 2); int32_t num_states = states_shape.NumElements(), num_arcs = symbols.NumElements() + symbols.Dim0(); // row_splits2 maps from state_idx01 to arc_idx012; row_ids2 does the reverse. // We'll set them in the lambda below. Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); // If num_states equals to zero, the code below won't set the last value of // row_splits2, we should initialize here, or it will be a random value. if (num_states == 0) row_splits2 = 0; int32_t *row_ids2_data = row_ids2.Data(), *row_splits2_data = row_splits2.Data(); const int32_t *row_ids1_data = states_shape.RowIds(1).Data(), *row_splits1_data = states_shape.RowSplits(1).Data(), *symbols_data = symbols.values.Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); K2_EVAL( c, num_states, lambda, (int32_t state_idx01)->void { int32_t fsa_idx0 = row_ids1_data[state_idx01], state_idx0x = row_splits1_data[fsa_idx0], next_state_idx0x = row_splits1_data[fsa_idx0 + 1], idx1 = state_idx01 - state_idx0x; // the following works because each FSA has one fewer arcs than states. int32_t arc_idx0xx = state_idx0x - fsa_idx0, next_arc_idx0xx = next_state_idx0x - (fsa_idx0 + 1), // the following may look a bit wrong.. here, the idx1 is the // same as the idx12 if the arc exists, because each state has // one arc leaving it (except the last state). arc_idx012 = arc_idx0xx + idx1; // the following works because each FSA has one fewer symbols than arcs // (however it doesn't work for the last arc of each FSA; we check // below.) int32_t symbol_idx01 = arc_idx012 - fsa_idx0; if (arc_idx012 < next_arc_idx0xx) { int32_t src_state = idx1, dest_state = idx1 + 1, symbol = (arc_idx012 + 1 < next_arc_idx0xx ? symbols_data[symbol_idx01] : -1); // kFinalSymbol float score = 0.0; arcs_data[arc_idx012] = Arc(src_state, dest_state, symbol, score); row_ids2_data[arc_idx012] = state_idx01; } else { // The following ensures that the last element of row_splits1_data // (i.e. row_splits1[num_states]) is set to num_arcs. It also writes // something unnecessary for the last state of each FSA but the last // one, which will cause 2 threads to write the same item to the same // location. Note that there is no arc with index `arc_idx01`, if you // reach here. row_splits2_data[state_idx01 + 1] = arc_idx012; } row_splits2_data[state_idx01] = arc_idx012; }); return Ragged<Arc>( RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs), arcs); } FsaVec LevenshteinGraphs(const Ragged<int32_t> &symbols, float ins_del_score /* = -0.501 */, Array1<int32_t> *aux_labels /*= nullptr*/, Array1<float> *score_offsets /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(symbols.NumAxes(), 2); ContextPtr &c = symbols.Context(); // For each fsa, the number of states will be number of symbols plus 2, we // plus 2 because we need an extra super final arc for each fsa. RaggedShape fsa_to_states = ChangeSublistSize(symbols.shape, 2); int32_t num_states = fsa_to_states.NumElements(); Array1<int32_t> num_arcs_for(c, num_states + 1); int32_t *num_arcs_for_data = num_arcs_for.Data(); // "fts" is short for fsa to states const int32_t *fts_row_splits1_data = fsa_to_states.RowSplits(1).Data(), *fts_row_ids1_data = fsa_to_states.RowIds(1).Data(); // set the arcs number for each state K2_EVAL( c, num_states, lambda_set_num_arcs, (int32_t state_idx01)->void { int32_t fsa_idx0 = fts_row_ids1_data[state_idx01], final_state = fts_row_splits1_data[fsa_idx0 + 1] - 1, current_num_arcs = 3; // normally there are three arcs, // self-loop and two arcs pointing to // the next state. if (state_idx01 == final_state - 1) current_num_arcs = 2; else if (state_idx01 == final_state) current_num_arcs = 0; num_arcs_for_data[state_idx01] = current_num_arcs; }); ExclusiveSum(num_arcs_for, &num_arcs_for); Array1<int32_t> &states_to_arcs_row_splits = num_arcs_for; int32_t num_arcs = symbols.NumElements() * 3 + symbols.Dim0() * 2; RaggedShape states_to_arcs = RaggedShape2(&states_to_arcs_row_splits, nullptr, num_arcs); // shape with a index of [fsa][state][arc] RaggedShape shape = ComposeRaggedShapes(fsa_to_states, states_to_arcs); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *row_splits1_data = shape.RowSplits(1).Data(), *row_ids1_data = shape.RowIds(1).Data(), *row_splits2_data = shape.RowSplits(2).Data(), *row_ids2_data = shape.RowIds(2).Data(), *symbols_data = symbols.values.Data(); int32_t *aux_labels_data = nullptr; if (aux_labels != nullptr) { *aux_labels = Array1<int32_t>(c, num_arcs); aux_labels_data = aux_labels->Data(); } float *score_offsets_data = nullptr; if (score_offsets != nullptr) { *score_offsets = Array1<float>(c, num_arcs); score_offsets_data = score_offsets->Data(); } K2_EVAL( c, num_arcs, lambda_set_arcs, (int32_t arc_idx012)->void { int32_t state_idx01 = row_ids2_data[arc_idx012], fsa_idx0 = row_ids1_data[state_idx01], state_idx0x = row_splits1_data[fsa_idx0], final_state_idx01 = row_splits1_data[fsa_idx0 + 1] - 1, state_idx1 = state_idx01 - state_idx0x, arc_idx01x = row_splits2_data[state_idx01], arc_idx2 = arc_idx012 - arc_idx01x, sym_state_idx01 = state_idx01 - 2 * fsa_idx0, current_symbol = 0, aux_labels_value = 0; if (state_idx01 != final_state_idx01 - 1 && state_idx01 != final_state_idx01) { current_symbol = symbols_data[sym_state_idx01]; K2_CHECK((current_symbol != 0) && (current_symbol != -1)) << "0 and -1 are not expected to be a symbol."; } float score_offset_value = 0; Arc arc; arc.src_state = state_idx1; switch (arc_idx2) { case 0: // the self loop arc arc.label = 0; arc.dest_state = state_idx1; arc.score = ins_del_score; aux_labels_value = 0; score_offset_value = ins_del_score - (-0.5); break; case 1: // the arc pointing to next state with blank if (state_idx01 == final_state_idx01 - 1) { // the arc pointing to // final state arc.label = -1; arc.score = 0; aux_labels_value = -1; } else { arc.label = 0; arc.score = -0.5; aux_labels_value = current_symbol; } arc.dest_state = state_idx1 + 1; break; case 2: // the arc pointing to the next state with symbol arc.label = current_symbol; arc.dest_state = state_idx1 + 1; arc.score = 0; aux_labels_value = current_symbol; break; default: K2_LOG(FATAL) << "Arc index must be less than 3"; } arcs_data[arc_idx012] = arc; if (aux_labels) aux_labels_data[arc_idx012] = aux_labels_value; if (score_offsets) score_offsets_data[arc_idx012] = score_offset_value; }); return Ragged<Arc>(shape, arcs); } FsaVec CtcGraphs(const Ragged<int32_t> &symbols, bool modified /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(symbols.NumAxes(), 2); ContextPtr &c = symbols.Context(); int32_t num_fsas = symbols.Dim0(); Array1<int32_t> num_states_for(c, num_fsas + 1); int32_t *num_states_for_data = num_states_for.Data(); const int32_t *symbol_row_split1_data = symbols.RowSplits(1).Data(); // symbols indexed with [fsa][symbol] // for each fsa we need `symbol_num * 2 + 1 + 1` states, `symbol_num * 2 + 1` // means that we need a blank state on each side of a symbol state, `+ 1` is // for final state in k2 K2_EVAL( c, num_fsas, lambda_set_num_states, (int32_t fsa_idx0)->void { int32_t symbol_idx0x = symbol_row_split1_data[fsa_idx0], symbol_idx0x_next = symbol_row_split1_data[fsa_idx0 + 1], symbol_num = symbol_idx0x_next - symbol_idx0x; num_states_for_data[fsa_idx0] = symbol_num * 2 + 2; }); ExclusiveSum(num_states_for, &num_states_for); Array1<int32_t> &fsa_to_states_row_splits = num_states_for; RaggedShape fsa_to_states = RaggedShape2(&fsa_to_states_row_splits, nullptr, -1); int32_t num_states = fsa_to_states.NumElements(); Array1<int32_t> num_arcs_for(c, num_states + 1); int32_t *num_arcs_for_data = num_arcs_for.Data(); const int32_t *fts_row_splits1_data = fsa_to_states.RowSplits(1).Data(), *fts_row_ids1_data = fsa_to_states.RowIds(1).Data(), *symbol_data = symbols.values.Data(); // set the arcs number for each state K2_EVAL( c, num_states, lambda_set_num_arcs, (int32_t state_idx01)->void { int32_t fsa_idx0 = fts_row_ids1_data[state_idx01], // we minus fsa_idx0 here, because we are adding one more state, // the final state for each fsa sym_state_idx01 = state_idx01 / 2 - fsa_idx0, remainder = state_idx01 % 2, current_num_arcs = 2; // normally there are two arcs, self-loop // and arc pointing to the next state // blank state always has two arcs if (remainder) { // symbol state int32_t sym_final_state = symbol_row_split1_data[fsa_idx0 + 1]; // There are no arcs for final states if (sym_state_idx01 == sym_final_state) { current_num_arcs = 0; } else if (modified) { current_num_arcs = 3; } else { int32_t current_symbol = symbol_data[sym_state_idx01], // we set the next symbol of the last symbol to -1, so // the following if clause will always be true, which means // we will have 3 arcs for last symbol state next_symbol = (sym_state_idx01 + 1) == sym_final_state ? -1 : symbol_data[sym_state_idx01 + 1]; // symbols must be not equal to -1, which is specially used in k2 K2_CHECK_NE(current_symbol, -1); // if current_symbol equals next_symbol, we need a blank state // between them, so there are two arcs for this state // otherwise, this state will point to blank state and next symbol // state, so we need three arcs here. // Note: for the simpilfied topology (standard equals false), there // are always 3 arcs leaving symbol states. if (current_symbol != next_symbol) current_num_arcs = 3; } } num_arcs_for_data[state_idx01] = current_num_arcs; }); ExclusiveSum(num_arcs_for, &num_arcs_for); Array1<int32_t> &states_to_arcs_row_splits = num_arcs_for; RaggedShape states_to_arcs = RaggedShape2(&states_to_arcs_row_splits, nullptr, -1); // ctc_shape with a index of [fsa][state][arc] RaggedShape ctc_shape = ComposeRaggedShapes(fsa_to_states, states_to_arcs); int32_t num_arcs = ctc_shape.NumElements(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *ctc_row_splits1_data = ctc_shape.RowSplits(1).Data(), *ctc_row_ids1_data = ctc_shape.RowIds(1).Data(), *ctc_row_splits2_data = ctc_shape.RowSplits(2).Data(), *ctc_row_ids2_data = ctc_shape.RowIds(2).Data(); int32_t *aux_labels_data = nullptr; if (aux_labels != nullptr) { *aux_labels = Array1<int32_t>(c, num_arcs); aux_labels_data = aux_labels->Data(); } K2_EVAL( c, num_arcs, lambda_set_arcs, (int32_t arc_idx012)->void { int32_t state_idx01 = ctc_row_ids2_data[arc_idx012], fsa_idx0 = ctc_row_ids1_data[state_idx01], state_idx0x = ctc_row_splits1_data[fsa_idx0], state_idx1 = state_idx01 - state_idx0x, arc_idx01x = ctc_row_splits2_data[state_idx01], arc_idx2 = arc_idx012 - arc_idx01x, sym_state_idx01 = state_idx01 / 2 - fsa_idx0, remainder = state_idx01 % 2, sym_final_state = symbol_row_split1_data[fsa_idx0 + 1]; bool final_state = sym_final_state == sym_state_idx01; int32_t current_symbol = final_state ? -1 : symbol_data[sym_state_idx01]; Arc arc; arc.score = 0; arc.src_state = state_idx1; int32_t aux_labels_value = 0; if (remainder) { if (final_state) return; int32_t next_symbol = (sym_state_idx01 + 1) == sym_final_state ? -1 : symbol_data[sym_state_idx01 + 1]; // for standard topology, the symbol state can not point to next // symbol state if the next symbol is identical to current symbol. if (current_symbol == next_symbol && !modified) { K2_CHECK_LT(arc_idx2, 2); arc.label = arc_idx2 == 0 ? 0 : current_symbol; arc.dest_state = arc_idx2 == 0 ? state_idx1 + 1 : state_idx1; } else { switch (arc_idx2) { case 0: // the arc pointing to blank state arc.label = 0; arc.dest_state = state_idx1 + 1; break; case 1: // the self loop arc arc.label = current_symbol; arc.dest_state = state_idx1; break; case 2: // the arc pointing to the next symbol state arc.label = next_symbol; aux_labels_value = sym_state_idx01 + 1 == sym_final_state ? 0 : next_symbol; arc.dest_state = state_idx1 + 2; break; default: K2_LOG(FATAL) << "Arc index must be less than 3"; } } } else { K2_CHECK_LT(arc_idx2, 2); arc.label = arc_idx2 == 0 ? 0 : current_symbol; arc.dest_state = arc_idx2 == 0 ? state_idx1 : state_idx1 + 1; aux_labels_value = (arc_idx2 == 0 || final_state) ? 0 : current_symbol; } arcs_data[arc_idx012] = arc; if (aux_labels) aux_labels_data[arc_idx012] = aux_labels_value; }); return Ragged<Arc>(ctc_shape, arcs); } Fsa CtcTopo(const ContextPtr &c, int32_t max_token, bool modified, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels); if (modified) { // plusing 2 here to include 0(epsilon) and final state int32_t states = max_token + 2; // for modified topology, the number of self loops and leaving arcs for // state 0 are all the number of states minus one. // and there two arcs(one for self loop, the other points to state 0) for // each of other states. see links belove for details : // https://github.com/k2-fsa/k2/issues/746#issuecomment-856421616 // https://github.com/k2-fsa/snowfall/pull/209 int32_t num_arcs = (states - 1) * 2 + (states - 2) * 2; *aux_labels = Array1<int32_t>(c, num_arcs); Array1<int32_t> row_ids(c, num_arcs); Array1<Arc> arcs(c, num_arcs); int32_t *row_ids_data = row_ids.Data(), *aux_labels_data = aux_labels->Data(); Arc *arcs_data = arcs.Data(); K2_EVAL( c, num_arcs, lambad_set_row_ids_and_arcs, (int32_t idx01) -> void { Arc arc; arc.score = 0; if (idx01 < states - 1) { // state 0 self loop arc.src_state = 0; arc.dest_state = 0; arc.label = idx01; row_ids_data[idx01] = 0; aux_labels_data[idx01] = idx01; } else if (idx01 < (states - 1) * 2) { // arcs leaving state 0 int32_t dest_state = idx01 - (states - 1) + 1; arc.src_state = 0; arc.dest_state = dest_state; arc.label = dest_state == states - 1 ? -1 : dest_state; row_ids_data[idx01] = 0; aux_labels_data[idx01] = dest_state == states -1 ? -1 : dest_state; } else { // arcs for other states int32_t bias = idx01 - (states - 1) * 2; int32_t state = bias / 2 + 1; arc.src_state = state; arc.label = state; if (bias % 2) arc.dest_state = 0; else arc.dest_state = state; row_ids_data[idx01] = state; aux_labels_data[idx01] = 0; } arcs_data[idx01] = arc; }); Array1<int32_t> row_splits(c, states + 1); RowIdsToRowSplits(row_ids, &row_splits); return Ragged<Arc>(RaggedShape2(&row_splits, &row_ids, num_arcs), arcs); } else { // plusing 2 here to include 0(epsilon) and final state int32_t states = max_token + 2, dim0 = states - 1, // minusing 1 here because there is not // any leaving arcs for final state dim1 = max_token + 2, // there are number of states arcs leaving // each state for standard topolopy num_arcs = dim0 * dim1; *aux_labels = Array1<int32_t>(c, num_arcs); Array1<int32_t> row_ids(c, num_arcs); Array1<Arc> arcs(c, num_arcs); int32_t *row_ids_data = row_ids.Data(), *aux_labels_data = aux_labels->Data(); Arc *arcs_data = arcs.Data(); K2_EVAL2( c, dim0, dim1, lambda_set_row_ids_and_arcs, (int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; Arc arc; arc.src_state = i; arc.dest_state = j; arc.label = j == (dim1 - 1) ? -1 : j; arc.score = 0; arcs_data[i * dim1 + j] = arc; int32_t olabel = i == j ? 0 : (j == (dim1 - 1) ? -1 : j); aux_labels_data[i * dim1 + j] = olabel; }); Array1<int32_t> row_splits(c, states + 1); RowIdsToRowSplits(row_ids, &row_splits); return Ragged<Arc>(RaggedShape2(&row_splits, &row_ids, dim0 * dim1), arcs); } } void ArcSort(Fsa *fsa) { if (fsa->NumAxes() < 2) return; // it is empty SortSublists<Arc>(fsa); } void ArcSort(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); if (!src.values.IsValid()) return; if (arc_map != nullptr) *arc_map = Array1<int32_t>(src.Context(), src.NumElements()); Fsa tmp(src.shape, src.values.Clone()); SortSublists<Arc>(&tmp, arc_map); *dest = tmp; } // TODO(fangjun): use the following method suggested by Dan // // ... incidentally, it's possible to further optimize this so the run // time is less than linear, by using methods similar to what I use // in GetStateBatches(); imagine computing a table that instead of // the best traceback, is the best 2-step traceback; and then the 4-step // traceback, and so on. There's no need for this right now, since the // forward-pass algorithm is already at least linear-time in the length // of this path. But we can consider it for the future. Ragged<int32_t> ShortestPath(FsaVec &fsas, const Array1<int32_t> &entering_arcs) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); const int32_t *entering_arcs_data = entering_arcs.Data(); const Arc *arcs_data = fsas.values.Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = fsas.TotSize(1); ContextPtr &context = fsas.Context(); // allocate an extra element for ExclusiveSum Array1<int32_t> num_best_arcs_per_fsa(context, num_fsas + 1); int32_t *num_best_arcs_per_fsa_data = num_best_arcs_per_fsa.Data(); const int32_t *row_splits1_data = fsas.RowSplits(1).Data(); // -1 represents an invalid arc_index. // This extra array avoids an extra iteration over `entering_arcs`. Array1<int32_t> state_best_arc_index_array(context, num_states, -1); int32_t *state_best_arc_index_array_data = state_best_arc_index_array.Data(); K2_EVAL( context, num_fsas, lambda_set_num_best_arcs, (int32_t fsas_idx0) { int32_t state_idx01 = row_splits1_data[fsas_idx0]; int32_t state_idx01_next = row_splits1_data[fsas_idx0 + 1]; if (state_idx01_next == state_idx01) { // this fsa is empty, so there is no best path available num_best_arcs_per_fsa_data[fsas_idx0] = 0; return; } int32_t final_state_idx01 = state_idx01_next - 1; int32_t cur_state = final_state_idx01; int32_t cur_index = entering_arcs_data[cur_state]; int32_t num_arcs = 0; int32_t *p = state_best_arc_index_array_data + final_state_idx01; while (cur_index != -1) { *p = cur_index; --p; cur_state = arcs_data[cur_index].src_state + state_idx01; cur_index = entering_arcs_data[cur_state]; ++num_arcs; } num_best_arcs_per_fsa_data[fsas_idx0] = num_arcs; }); ExclusiveSum(num_best_arcs_per_fsa, &num_best_arcs_per_fsa); RaggedShape shape = RaggedShape2(&num_best_arcs_per_fsa, nullptr, -1); const int32_t *shape_row_splits1_data = shape.RowSplits(1).Data(); const int32_t *shape_row_ids1_data = shape.RowIds(1).Data(); const int32_t *ans_row_splits_data = shape.RowSplits(1).Data(); Array1<int32_t> best_path_arc_indexes(context, shape.NumElements()); int32_t *best_path_arc_indexes_data = best_path_arc_indexes.Data(); K2_EVAL( context, shape.NumElements(), lambda_set_best_arcs, (int32_t ans_idx01) { int32_t fsa_idx0 = shape_row_ids1_data[ans_idx01]; int32_t ans_idx0x = shape_row_splits1_data[fsa_idx0]; int32_t ans_idx1 = ans_idx01 - ans_idx0x; int32_t num_arcs_this_fsa = num_best_arcs_per_fsa_data[fsa_idx0 + 1] - num_best_arcs_per_fsa_data[fsa_idx0]; if (num_arcs_this_fsa == 0) return; int32_t final_state_idx01_this_fsa = row_splits1_data[fsa_idx0 + 1] - 1; const int32_t *p_start = state_best_arc_index_array_data + final_state_idx01_this_fsa - num_arcs_this_fsa + 1; best_path_arc_indexes_data[ans_idx01] = p_start[ans_idx1]; }); Ragged<int32_t> ans(shape, best_path_arc_indexes); return ans; } void AddEpsilonSelfLoops(FsaOrVec &src, FsaOrVec *dest, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); const int32_t *old_row_splits1_data = src.RowSplits(1).Data(), *old_row_ids1_data = src.RowIds(1).Data(); const Arc *old_arcs_data = src.values.Data(); if (src.NumAxes() == 2) { int32_t num_states = src.Dim0(); if (num_states < 2) { K2_CHECK_EQ(num_states, 0); *dest = src; if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0); return; } int32_t old_num_arcs = src.TotSize(1), new_num_arcs = old_num_arcs + (num_states - 1); Array1<int32_t> new_row_splits(c, num_states + 1), new_row_ids(c, new_num_arcs); Array1<Arc> new_arcs(c, new_num_arcs); int32_t *new_row_splits1_data = new_row_splits.Data(), *new_row_ids1_data = new_row_ids.Data(); Arc *new_arcs_data = new_arcs.Data(); int32_t *arc_map_data = nullptr; if (arc_map) { *arc_map = Array1<int32_t>(c, new_num_arcs); arc_map_data = arc_map->Data(); } ParallelRunner pr(c); { With w(pr.NewStream()); K2_EVAL( c, old_num_arcs, lambda_copy_data, (int32_t arc_idx01)->void { int32_t state_idx0 = old_row_ids1_data[arc_idx01], new_arc_idx01 = arc_idx01 + 1 + state_idx0; // the "+1" above is because we put the self-loop first. new_row_ids1_data[new_arc_idx01] = state_idx0; new_arcs_data[new_arc_idx01] = old_arcs_data[arc_idx01]; if (arc_map_data) arc_map_data[new_arc_idx01] = arc_idx01; }); } { With w(pr.NewStream()); K2_EVAL( c, num_states, lambda_set_new_data, (int32_t state_idx0)->void { int32_t old_arc_idx0x = old_row_splits1_data[state_idx0], new_arc_idx0x = old_arc_idx0x + state_idx0; new_row_splits1_data[state_idx0] = new_arc_idx0x; if (state_idx0 + 1 < num_states) { // not final-state int32_t new_arc_idx01 = new_arc_idx0x; // the 1st arc is the loop new_row_ids1_data[new_arc_idx01] = state_idx0; new_arcs_data[new_arc_idx01] = Arc(state_idx0, state_idx0, 0, 0.0); if (arc_map_data) arc_map_data[new_arc_idx01] = -1; } else { // Note: if num_states was zero we would have returned above, so // we don't have to worry about empty FSAs. new_row_splits1_data[num_states] = new_arc_idx0x; } }); } pr.Finish(); *dest = Ragged<Arc>( RaggedShape2(&new_row_splits, &new_row_ids, new_num_arcs), new_arcs); } else { K2_CHECK_EQ(src.NumAxes(), 3); // Get a vector saying, for each FSA, whether it's nonempty. int32_t num_fsas = src.Dim0(), num_states = src.TotSize(1), old_num_arcs = src.TotSize(2); if (num_states == 0) { *dest = src; if (arc_map) *arc_map = Array1<int32_t>(c, 0); return; } Array1<int32_t> fsa_nonempty(c, num_fsas + 1); int32_t *fsa_nonempty_data = fsa_nonempty.Data(); K2_EVAL( c, num_fsas, lambda_set_fsa_nonempty, (int32_t fsa_idx0)->void { fsa_nonempty_data[fsa_idx0] = (old_row_splits1_data[fsa_idx0 + 1] > old_row_splits1_data[fsa_idx0]); }); ExclusiveSum(fsa_nonempty, &fsa_nonempty); const int32_t *old_row_splits2_data = src.RowSplits(2).Data(), *old_row_ids2_data = src.RowIds(2).Data(); int32_t num_nonempty_fsas = fsa_nonempty.Back(), new_num_arcs = old_num_arcs + num_states - num_nonempty_fsas; // we subtract `num_nonempty_fsas` because final-states don't get a // self-loop. Array1<int32_t> new_row_splits2(c, num_states + 1), new_row_ids2(c, new_num_arcs); Array1<Arc> new_arcs(c, new_num_arcs); // fsa_idx0_mod_data maps from fsa_idx0 to a modified fsa_idx0 that // "doesn't count" FSAs with zero states. const int32_t *fsa_idx0_mod_data = fsa_nonempty_data; int32_t *new_row_splits2_data = new_row_splits2.Data(), *new_row_ids2_data = new_row_ids2.Data(); Arc *new_arcs_data = new_arcs.Data(); int32_t *arc_map_data = nullptr; if (arc_map) { *arc_map = Array1<int32_t>(c, new_num_arcs); arc_map_data = arc_map->Data(); } ParallelRunner pr(c); { With w(pr.NewStream()); K2_EVAL( c, old_num_arcs, lambda_copy_data, (int32_t arc_idx012)->void { int32_t state_idx01 = old_row_ids2_data[arc_idx012], fsa_idx0 = old_row_ids1_data[state_idx01], fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0], new_arc_idx012 = arc_idx012 + 1 + state_idx01 - fsa_idx0_mod; // The "+1" above is because we put the self-loop first. The // "-fsa_idx0_mod" is because final-states don't get a self-loop. new_row_ids2_data[new_arc_idx012] = state_idx01; new_arcs_data[new_arc_idx012] = old_arcs_data[arc_idx012]; if (arc_map_data) arc_map_data[new_arc_idx012] = arc_idx012; }); } { With w(pr.NewStream()); K2_EVAL( c, num_states, lambda_set_new_data, (int32_t state_idx01)->void { int32_t fsa_idx0 = old_row_ids1_data[state_idx01], fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0], state_idx0x = old_row_splits1_data[fsa_idx0], next_state_idx0x = old_row_splits1_data[fsa_idx0 + 1], old_arc_idx01x = old_row_splits2_data[state_idx01]; // Below the "+ state_idx01" is because each state gets a self-loop, // and the "- fsa_idx0_mod" is because final-states don't get a // self-loop. int32_t new_arc_idx01x = old_arc_idx01x + state_idx01 - fsa_idx0_mod; // The self-loop arc is the first arc: int32_t new_arc_idx012 = new_arc_idx01x; new_row_splits2_data[state_idx01] = new_arc_idx01x; if (state_idx01 + 1 < next_state_idx0x) { // not final-state new_row_ids2_data[new_arc_idx012] = state_idx01; int32_t state_idx1 = state_idx01 - state_idx0x; new_arcs_data[new_arc_idx012] = Arc(state_idx1, state_idx1, 0, 0.0); if (arc_map_data) arc_map_data[new_arc_idx012] = -1; } else if (state_idx01 + 1 == num_states) { // Note: if num_states was zero we would have returned above, so // we dont have to worry about an empty FsaVec. new_row_splits2_data[num_states] = new_arc_idx01x; } }); } pr.Finish(); *dest = Ragged<Arc>(RaggedShape3(&src.RowSplits(1), &src.RowIds(1), num_states, &new_row_splits2, &new_row_ids2, new_num_arcs), new_arcs); } } Fsa Union(FsaVec &fsas, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &context = fsas.Context(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); const int32_t *fsas_row_splits2_data = fsas.RowSplits(2).Data(); const int32_t *fsas_row_ids1_data = fsas.RowIds(1).Data(); const int32_t *fsas_row_ids2_data = fsas.RowIds(2).Data(); const Arc *arcs_data = fsas.values.Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = fsas.TotSize(1); int32_t num_arcs = fsas.TotSize(2); // A new start state and a new final state are added (+2). // The final state of each fsa is removed (-num_fsas) int32_t num_out_states = num_states + 2 - num_fsas; int32_t out_final_state = num_out_states - 1; // For every fsa, a new arc is added from the new start state // to its original start state (+num_fsas) int32_t num_out_arcs = num_arcs + num_fsas; Array1<int32_t> out_row_ids(context, num_out_arcs); Array1<Arc> out_arcs(context, num_out_arcs); Array1<int32_t> tmp_arc_map(context, num_out_arcs, -1); int32_t *tmp_arc_map_data = tmp_arc_map.Data(); int32_t *out_row_ids_data = out_row_ids.Data(); Arc *out_arcs_data = out_arcs.Data(); K2_EVAL( context, num_arcs, lambda_set_out, (int32_t fsas_arc_idx012) { int32_t fsas_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012]; int32_t fsas_idx0 = fsas_row_ids1_data[fsas_state_idx01]; int32_t this_fsa_final_state_idx01 = fsas_row_splits1_data[fsas_idx0 + 1] - 1; K2_DCHECK_GT(this_fsa_final_state_idx01, fsas_state_idx01) << "We support only FSAs with at least two states at present"; int32_t fsas_state_idx0x = fsas_row_splits1_data[fsas_idx0]; int32_t fsas_state_idx1 = fsas_state_idx01 - fsas_state_idx0x; int32_t this_fsa_final_state_idx1 = this_fsa_final_state_idx01 - fsas_state_idx0x; int32_t fsas_arc_idx0xx = fsas_row_splits2_data[fsas_state_idx0x]; // fsa0: +1 (a new start state) // fsa1: +0 (the final state of fsa0 is removed) // fsa2: -1 (the final state of fsa1 is removed) // fsa3: -2 (the final state of fsa2 is removed) int32_t state_offset = 1 - fsas_idx0; int32_t out_state_idx0 = fsas_state_idx01 + state_offset; int32_t out_arc_idx01 = fsas_arc_idx012 + num_fsas; out_row_ids_data[out_arc_idx01] = out_state_idx0; Arc arc = arcs_data[fsas_arc_idx012]; K2_DCHECK_EQ(arc.src_state, fsas_state_idx1); if (arc.dest_state == this_fsa_final_state_idx1) arc.dest_state = out_final_state; else arc.dest_state = arc.dest_state - arc.src_state + out_state_idx0; arc.src_state = out_state_idx0; out_arcs_data[out_arc_idx01] = arc; tmp_arc_map_data[out_arc_idx01] = fsas_arc_idx012; if (fsas_arc_idx0xx == fsas_arc_idx012) { // add a new arc from the new start state to the start state // of this fsa // // WARNING: we cannot use fsas_state_idx01 here // since the start state may have no leaving arcs! Arc arc(0, fsas_state_idx0x + state_offset, 0, 0); out_arcs_data[fsas_idx0] = arc; out_row_ids_data[fsas_idx0] = 0; } }); if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map); Array1<int32_t> out_row_splits(context, num_out_states + 1); RowIdsToRowSplits(out_row_ids, &out_row_splits); RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs); Fsa ans = Ragged<Arc>(shape, out_arcs); return ans; } Fsa Closure(Fsa &fsa, Array1<int32_t> *arc_map /* = nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2) << "We support only a single FSA."; ContextPtr &c = fsa.Context(); int32_t num_states = fsa.Dim0(); if (num_states < 2) { K2_CHECK_EQ(num_states, 0) << "An empty fsa should contain no states at all"; if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0); return fsa; // return itself if the input fsa is empty } const int32_t *fsa_row_splits_data = fsa.RowSplits(1).Data(); const int32_t *fsa_row_ids_data = fsa.RowIds(1).Data(); const Arc *fsa_arcs_data = fsa.values.Data(); int32_t fsa_final_state = num_states - 1; int32_t num_out_states = num_states; // An arc from the start state to the final state with label == -1 is added. int32_t num_out_arcs = fsa.values.Dim() + 1; Array1<int32_t> out_row_ids(c, num_out_arcs); int32_t *out_row_ids_data = out_row_ids.Data(); Array1<Arc> out_arcs(c, num_out_arcs); Arc *out_arcs_data = out_arcs.Data(); Array1<int32_t> tmp_arc_map(c, num_out_arcs); int32_t *tmp_arc_map_data = tmp_arc_map.Data(); K2_EVAL( c, fsa.values.Dim(), lambda_set_arcs, (int32_t fsa_arc_idx01) { int32_t fsa_state_idx0 = fsa_row_ids_data[fsa_arc_idx01]; int32_t fsa_arc_idx0x = fsa_row_splits_data[fsa_state_idx0]; int32_t fsa_arc_idx1 = fsa_arc_idx01 - fsa_arc_idx0x; int32_t this_state_num_arcs = fsa_row_splits_data[fsa_state_idx0 + 1] - fsa_arc_idx0x; Arc arc = fsa_arcs_data[fsa_arc_idx01]; if (arc.dest_state == fsa_final_state) { // modify arcs entering the final state such that: // - dest_state == 0 // - label == 0 arc.dest_state = 0; K2_DCHECK_EQ(arc.label, -1); arc.label = 0; } int out_arc_idx01; if (arc.src_state > 0) { // this arc is not originated from the start state, so its index is // incremented out_arc_idx01 = fsa_arc_idx01 + 1; } else { out_arc_idx01 = fsa_arc_idx01; if (fsa_arc_idx1 == this_state_num_arcs - 1) { // This is the last arc of the original start state, // so we add a new arc just after it. Arc new_arc(0, fsa_final_state, -1, 0.0f); out_arcs_data[out_arc_idx01 + 1] = new_arc; out_row_ids_data[out_arc_idx01 + 1] = 0; tmp_arc_map_data[out_arc_idx01 + 1] = -1; } } // it may happen that the start state has no leaving arcs if (fsa_row_splits_data[1] == 0) { Arc new_arc(0, fsa_final_state, -1, 0.0f); out_arcs_data[0] = new_arc; out_row_ids_data[0] = 0; tmp_arc_map_data[0] = -1; } tmp_arc_map_data[out_arc_idx01] = fsa_arc_idx01; out_arcs_data[out_arc_idx01] = arc; out_row_ids_data[out_arc_idx01] = arc.src_state; }); if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map); Array1<int32_t> out_row_splits(c, num_out_states + 1); int32_t *out_row_splits_data = out_row_splits.Data(); K2_EVAL( c, out_row_splits.Dim(), lambda_set_row_splits, (int32_t i) { if (i == 0) out_row_splits_data[i] = 0; else out_row_splits_data[i] = fsa_row_splits_data[i] + 1; }); RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs); Fsa ans = Ragged<Arc>(shape, out_arcs); return ans; } FsaOrVec ExpandArcs(FsaOrVec &fsas, RaggedShape &labels_shape, Array1<int32_t> *fsas_arc_map /*=nullptr*/, Array1<int32_t> *labels_arc_map /*=nullptr*/) { NVTX_RANGE(K2_FUNC); if (fsas.NumAxes() == 2) { FsaVec fsas_temp = FsaToFsaVec(fsas); return ExpandArcs(fsas_temp, labels_shape, fsas_arc_map, labels_arc_map) .RemoveAxis(0); } K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(labels_shape.NumAxes(), 2); K2_CHECK_EQ(fsas.NumElements(), labels_shape.Dim0()); ContextPtr &c = fsas.Context(); K2_CHECK(c->IsCompatible(*labels_shape.Context())); RaggedShape state_to_arcs = GetLayer(fsas.shape, 1); // `state_to_foo` is a RaggedShape that, for each state in `fsas`, has a list // of length `num_arcs + 1`, where `num_arcs` is the number of arcs leaving // this state in `fsas`. Interpret this as: one element for the state // itself, then one for each arc leaving it. This `foo` is an index that // corresponds to num-arcs plus one, but because it is really a placeholder // and we want to keep it distinct from other things, we call it `foo`. RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1); int32_t foo_size = state_to_foo.NumElements(); // For each element of `state_to_foo`, `num_ostates_for` says how many states // there will be for this (state,foo) in the returned (output) FSA. Here, the // idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the // state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the // original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `fsas`], // and we set `num_ostates_for[idx01] = max(0, seq_len-1)`, where seq_len is // the length of the sequence in `labels_shape` corresponding to this // arc-index. Array1<int32_t> num_ostates_for(c, foo_size + 1); int32_t *num_ostates_for_data = num_ostates_for.Data(); const int32_t *labels_row_splits1_data = labels_shape.RowSplits(1).Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(), *state_to_foo_row_splits1_data = state_to_foo.RowSplits(1).Data(), *state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data(); K2_EVAL( c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void { // note: the idx01, idx0, idx0x are into `state_to_foo`. // This idx0 is a state-index into `fsas` (an idx01 w.r.t. `fsas`). int32_t idx0 = state_to_foo_row_ids1_data[idx01], idx0x = state_to_foo_row_splits1_data[idx0], idx1 = idx01 - idx0x; // idx1 is `foo`. int32_t num_ostates; if (idx1 == 0) { num_ostates = 1; // this is a copy of the original state. } else { int32_t fsas_arc_idx2 = idx1 - 1, fsas_state_idx01 = idx0, fsas_arc_idx01x = fsas_row_splits2_data[fsas_state_idx01], fsas_arc_idx012 = fsas_arc_idx01x + fsas_arc_idx2, labels_shape_idx0 = fsas_arc_idx012, labels_shape_idx0x = labels_row_splits1_data[labels_shape_idx0], labels_shape_idx0x_next = labels_row_splits1_data[labels_shape_idx0 + 1], labels_shape_len1 = labels_shape_idx0x_next - labels_shape_idx0x; // A sequence of n symbols will require n-1 extra states to represent // it. num_ostates = max(labels_shape_len1 - 1, (int32_t)0); } num_ostates_for_data[idx01] = num_ostates; }); ExclusiveSum(num_ostates_for, &num_ostates_for); Array1<int32_t> &foo_to_ostates_row_splits = num_ostates_for; RaggedShape foo_to_ostates = RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1); // to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate] // where foo is a general-purpose index that ranges over the (num_arcs + 1) of // the original state. RaggedShape to_ostates_shape = ComposeRaggedShapes3( GetLayer(fsas.shape, 0), state_to_foo, foo_to_ostates); // Below, `tos` means `to_ostates_shape`. const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(), *tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(), *tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(), *tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(), *tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(), *tos_row_ids3_data = to_ostates_shape.RowIds(3).Data(); // `num_oarcs` gives the number of arcs in the returned (output) FSA for each // `ostate` (i.e. leaving each state in the returned FSA). int32_t tot_ostates = to_ostates_shape.NumElements(); Array1<int32_t> num_oarcs(c, tot_ostates + 1); int32_t *num_oarcs_data = num_oarcs.Data(); K2_EVAL( c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void { // All these indexes are into `to_ostates_shape`, indexed // `[fsa][state][foo][ostate].` int32_t idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx01x_next = tos_row_splits2_data[idx01 + 1], len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x, idx3 = idx0123 - idx012x; int32_t num_arcs; if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); // This ostate corresponds to the original state; it is not one of the // extra states added to support chains of arcs. // The original state had `orig_num_arcs` leaving it, which is the // number of `foo` indexes minus one. int32_t orig_num_arcs = len2 - 1; num_arcs = orig_num_arcs; } else { // All newly-created states have exactly one arc leaving them. num_arcs = 1; } num_oarcs_data[idx0123] = num_arcs; }); ExclusiveSum(num_oarcs, &num_oarcs); Array1<int32_t> &ostate_to_oarcs_row_splits = num_oarcs; RaggedShape ostate_to_oarcs = RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1); // `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc] RaggedShape full_shape = ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs); // for the lower-order row-splits and row-ids, use tot_row_{splits,idx}n_data const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(), *full_row_ids4_data = full_shape.RowIds(4).Data(); int32_t tot_oarcs = full_shape.NumElements(); K2_CHECK_GE(tot_oarcs, fsas.NumElements()); int32_t *fsas_arc_map_data = nullptr, *labels_arc_map_data = nullptr; if (fsas_arc_map) { *fsas_arc_map = Array1<int32_t>(c, tot_oarcs); fsas_arc_map_data = fsas_arc_map->Data(); } if (labels_arc_map) { *labels_arc_map = Array1<int32_t>(c, tot_oarcs); labels_arc_map_data = labels_arc_map->Data(); } Array1<Arc> oarcs(c, tot_oarcs); Arc *oarcs_data = oarcs.Data(); const Arc *arcs_data = fsas.values.Data(); K2_EVAL( c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void { // All these indexes are into `full_shape`, indexed // `[fsa][state][foo][ostate][oarc].` int32_t idx0123 = full_row_ids4_data[idx01234], idx0123x = full_row_splits4_data[idx0123], idx4 = idx01234 - idx0123x, idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx3 = idx0123 - idx012x, idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx2 = idx012 - idx01x, idx0 = tos_row_ids1_data[idx01], idx0x = tos_row_splits1_data[idx0], idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]]; int32_t fsa_idx01x = fsas_row_splits2_data[idx01]; int32_t fsa_idx2; // the idx2 (arc-index) into `fsas` of the input arc // that's most relevant to us.. int32_t seq_pos; // seq_pos is our index into the sequence of arcs that // we produce for each original arc if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); fsa_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; the idx4 // enumerates the arcs leaving it.. seq_pos = 0; } else { // this is one of the extra `foo` indexes, one per arc in the input // FSA that leaves this state; each of those `foo` indexes has // (seq_len - 1) states in it (idx3=0,1..seq_len-1); and each state // has one arc leaving it (idx4==0). K2_CHECK_EQ(idx4, 0); fsa_idx2 = idx2 - 1; seq_pos = idx3 + 1; } int32_t fsa_idx012 = fsa_idx01x + fsa_idx2; // index of the arc in // source FSA FSA that // we're expanding.. Arc iarc = arcs_data[fsa_idx012]; int32_t labels_idx0x = labels_row_splits1_data[fsa_idx012], labels_next_idx0x = labels_row_splits1_data[fsa_idx012 + 1], labels_len1 = labels_next_idx0x - labels_idx0x; // labels_len1 is length of label sequence for this arc K2_CHECK_LT(seq_pos, max(int32_t(1), labels_len1)); int32_t dest_idx01 = idx0x + iarc.dest_state, // original destination // state-index orig_dest_idx0123 = tos_row_splits3_data[tos_row_splits2_data[dest_idx01]]; Arc oarc; oarc.src_state = idx0123 - idx0xxx; // If this is the last arc in the sequence, the dest-state is the // original dest-state of the arc. Otherwise the dest-state is one of // the new states that we created. The idx123 will be an idx1 after // removing axes. int32_t dest_idx123; if (seq_pos + 1 >= labels_len1) { // last arc in sequence.. dest_idx123 = orig_dest_idx0123 - idx0xxx; } else { int32_t dest_state_idx2 = fsa_idx2 + 1, // index `foo` equals // orig_arc_idx+1 dest_state_idx3 = seq_pos, // ostate index.. dest_idx012 = idx01x + dest_state_idx2, dest_idx012x = tos_row_splits3_data[dest_idx012], dest_idx0123 = dest_idx012x + dest_state_idx3; dest_idx123 = dest_idx0123 - idx0xxx; } oarc.dest_state = dest_idx123; // indexes 1,2,3 will be combined; in // the output FSA it will be an idx1. if (fsas_arc_map_data) fsas_arc_map_data[idx01234] = (seq_pos == 0 ? fsa_idx012 : -1); if (labels_arc_map_data) labels_arc_map_data[idx01234] = (seq_pos < labels_len1 ? labels_idx0x + seq_pos : -1); if (iarc.label != -1) { // normal case.. label goes on 1st arc in sequence oarc.label = (seq_pos == 0 ? iarc.label : 0); } else { // If the arc was to the final-state, we need to keep the label on the // last arc of the sequence to keep the output valid. The following // would be "seq_pos + 1 == labels_len1 ? -1 : 0", but we make it ">=" // not "=" to account for the case seq_pos=0, labels_len1 = 0. oarc.label = (seq_pos + 1 >= labels_len1 ? -1 : 0); } oarc.score = (seq_pos == 0 ? iarc.score : 0.0); oarcs_data[idx01234] = oarc; }); // remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes // axis 1, so remove axis 1 twice]. RaggedShape temp = RemoveAxis(full_shape, 1); return FsaVec(RemoveAxis(temp, 1), oarcs); } void Invert(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest, Ragged<int32_t> *dest_aux_labels, Array1<int32_t> *arc_map /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src_aux_labels.NumAxes(), 2); K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements()); K2_CHECK(dest != nullptr && dest_aux_labels != nullptr); ContextPtr c = GetContext(src, src_aux_labels); if (src.NumAxes() == 2) { Fsa *srcs = &src; FsaVec src_vec = CreateFsaVec(1, &srcs), dest_vec; Invert(src_vec, src_aux_labels, &dest_vec, dest_aux_labels, arc_map); *dest = GetFsaVecElement(dest_vec, 0); return; } Array1<int32_t> src_arc_map, labels_arc_map; *dest = ExpandArcs(src, src_aux_labels.shape, &src_arc_map, &labels_arc_map); // swap labels and aux_labels int32_t dest_num_arcs = dest->NumElements(); Arc *dest_arcs_data = dest->values.Data(); const int32_t *labels_arc_map_data = labels_arc_map.Data(), *src_aux_labels_data = src_aux_labels.values.Data(); Array1<int32_t> dest_aux_labels_row_splits(c, dest_num_arcs + 1); int32_t *dest_aux_labels_row_splits_data = dest_aux_labels_row_splits.Data(); K2_EVAL( c, dest_num_arcs, lambda_set_dest_aux_labels_num, (int32_t dest_idx012)->void { Arc &dest_arc = dest_arcs_data[dest_idx012]; // we'll remove epsilons in dest_aux_labels dest_aux_labels_row_splits_data[dest_idx012] = dest_arc.label == 0 ? 0 : 1; }); ExclusiveSum(dest_aux_labels_row_splits.Arange(0, dest_num_arcs), &dest_aux_labels_row_splits); RaggedShape dest_aux_labels_shape = RaggedShape2(&dest_aux_labels_row_splits, nullptr, -1); Array1<int32_t> dest_aux_labels_values(c, dest_aux_labels_shape.NumElements()); int32_t *dest_aux_labels_values_data = dest_aux_labels_values.Data(); K2_EVAL( c, dest_num_arcs, lambda_set_dest_labels_and_aux_labels, (int32_t dest_idx012)->void { Arc &dest_arc = dest_arcs_data[dest_idx012]; // swap label and aux_label if (dest_arc.label != 0) { int32_t dest_aux_labels_idx0x = dest_aux_labels_row_splits_data[dest_idx012]; // every arc in dest has at most one aux_label (as the aux_label is // the label of src on this arc) dest_aux_labels_values_data[dest_aux_labels_idx0x] = dest_arc.label; } int32_t src_aux_labels_idx01 = labels_arc_map_data[dest_idx012]; dest_arc.label = src_aux_labels_idx01 == -1 ? 0 : src_aux_labels_data[src_aux_labels_idx01]; }); *dest_aux_labels = Ragged<int32_t>(dest_aux_labels_shape, dest_aux_labels_values); if (arc_map != nullptr) *arc_map = src_arc_map; } // Will be used in InvertHost to process FsaVec input recursively. void RecursionWrapperAuxLabels(void (*f)(FsaOrVec &, Ragged<int32_t> &, FsaOrVec *, Ragged<int32_t> *), FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest, Ragged<int32_t> *dest_aux_labels) { NVTX_RANGE(K2_FUNC); // src is actually an FsaVec. Just recurse for now. K2_CHECK_EQ(src.NumAxes(), 3); int32_t num_fsas = src.shape.Dim0(); std::vector<Fsa> srcs(num_fsas), dests(num_fsas); std::vector<Ragged<int32_t>> src_aux_labels_vec(num_fsas), dest_aux_labels_vec(num_fsas); int32_t tot_num_arcs = 0; Array1<int32_t> src_aux_labels_row_splits = src_aux_labels.RowSplits(1), src_aux_labels_values = src_aux_labels.values; for (int32_t i = 0; i < num_fsas; ++i) { srcs[i] = src.Index(0, i); int32_t cur_num_arcs = srcs[i].NumElements(); // below block get aux_labels for srcs[i] // TODO(haowen): replace with Range op for ragged { Array1<int32_t> row_splits = src_aux_labels_row_splits.Arange( tot_num_arcs, tot_num_arcs + cur_num_arcs + 1); Array1<int32_t> values = src_aux_labels_values.Arange(row_splits[0], row_splits.Back()); row_splits = Minus(row_splits, row_splits[0]); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); src_aux_labels_vec[i] = Ragged<int32_t>(shape, values); } f(srcs[i], src_aux_labels_vec[i], &(dests[i]), &(dest_aux_labels_vec[i])); tot_num_arcs += cur_num_arcs; } *dest = Stack(0, num_fsas, dests.data()); *dest_aux_labels = Cat(0, num_fsas, dest_aux_labels_vec.data()); } void InvertHost(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest, Ragged<int32_t> *dest_aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(src_aux_labels.NumAxes(), 2); K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements()); K2_CHECK(dest != nullptr && dest_aux_labels != nullptr); int32_t num_axes = src.NumAxes(); if (num_axes < 2 || num_axes > 3) { K2_LOG(FATAL) << "Input has bad num-axes " << num_axes; } else if (num_axes == 3) { return RecursionWrapperAuxLabels(InvertHost, src, src_aux_labels, dest, dest_aux_labels); } k2host::Fsa host_fsa = FsaToHostFsa(src); // k2host::AuxLabels is a k2host::Array2 k2host::AuxLabels host_aux_labels( src_aux_labels.Dim0(), src_aux_labels.NumElements(), src_aux_labels.RowSplits(1).Data(), src_aux_labels.values.Data()); k2host::FstInverter inverter(host_fsa, host_aux_labels); k2host::Array2Size<int32_t> fsa_size, aux_size; inverter.GetSizes(&fsa_size, &aux_size); FsaCreator fsa_creator(fsa_size); k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa(); Ragged2Creator<int32_t> ragged_creator(aux_size); k2host::AuxLabels host_dest_aux_labels = ragged_creator.GetHostArray2(); inverter.GetOutput(&host_dest_fsa, &host_dest_aux_labels); *dest = fsa_creator.GetFsa(); *dest_aux_labels = ragged_creator.GetRagged2(); } FsaOrVec ReplaceFsa(FsaVec &src, FsaOrVec &index, int32_t symbol_range_begin, Array1<int32_t> *arc_map_src /* = nullptr */, Array1<int32_t> *arc_map_index /* = nullptr */) { NVTX_RANGE(K2_FUNC); if (index.NumAxes() == 2) { FsaVec index_temp = FsaToFsaVec(index); return ReplaceFsa(src, index_temp, symbol_range_begin, arc_map_src, arc_map_index).RemoveAxis(0); } K2_CHECK_EQ(index.NumAxes(), 3); ContextPtr &c = index.Context(); K2_CHECK(c->IsCompatible(*src.Context())); RaggedShape state_to_arcs = GetLayer(index.shape, 1); // `state_to_foo` is a RaggedShape that, for each state in `index`, has a list // of length `tot_arcs + 1`. Interpret this as: one element for the state // itself, then one for each arc leaving it. This `foo` is an index that // corresponds to num-arcs plus one, but because it is really a placeholder // and we want to keep it distinct from other things, we call it `foo`. RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1); int32_t foo_size = state_to_foo.NumElements(), num_src_fsas = src.Dim0(); // For each element of `state_to_foo`, `num_ostates_for` says how many states // there will be for this (state,foo) in the returned (output) FSA. Here, the // idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the // state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the // original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `index`] // and we set `num_ostates_for[idx01] = max(0, state_num-1)`, where state_num // is the states number of the fsa in `src` that would repalce into this arc, // the final state of this fsa will identify with the dest-state of this arc, // so we minus 1. Array1<int32_t> num_ostates_for(c, foo_size + 1); int32_t *num_ostates_for_data = num_ostates_for.Data(); const Arc *index_arcs_data = index.values.Data(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(), *index_row_splits2_data = index.RowSplits(2).Data(), *state_to_foo_row_splits1_data = state_to_foo.RowSplits(1).Data(), *state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data(); K2_EVAL( c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void { // note: the idx01, idx0, idx0x are into `state_to_foo`. // This idx0 is a state-index into `index` (an idx01 w.r.t. `index`). int32_t idx0 = state_to_foo_row_ids1_data[idx01], idx0x = state_to_foo_row_splits1_data[idx0], idx1 = idx01 - idx0x; // idx1 is `foo`. int32_t num_ostates; if (idx1 == 0) { num_ostates = 1; // this is a copy of the original state. } else { int32_t index_arc_idx2 = idx1 - 1, index_state_idx01 = idx0, index_arc_idx01x = index_row_splits2_data[index_state_idx01], index_arc_idx012 = index_arc_idx01x + index_arc_idx2, index_label = index_arcs_data[index_arc_idx012].label, src_idx0 = index_label - symbol_range_begin; // will not replace for this arc if (src_idx0 < 0 || src_idx0 >= num_src_fsas) { num_ostates = 0; } else { int32_t src_idx0x = src_row_splits1_data[src_idx0], src_idx0x_next = src_row_splits1_data[src_idx0 + 1], src_len1 = src_idx0x_next - src_idx0x; num_ostates = max(src_len1 - 1, (int32_t)0); } } num_ostates_for_data[idx01] = num_ostates; }); ExclusiveSum(num_ostates_for, &num_ostates_for); Array1<int32_t> &foo_to_ostates_row_splits = num_ostates_for; RaggedShape foo_to_ostates = RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1); // to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate] // where foo is a general-purpose index that ranges over the (num_arcs + 1) of // the original state. RaggedShape to_ostates_shape = ComposeRaggedShapes3( GetLayer(index.shape, 0), state_to_foo, foo_to_ostates); // Below, `tos` means `to_ostates_shape`. const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(), *tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(), *tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(), *tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(), *tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(), *tos_row_ids3_data = to_ostates_shape.RowIds(3).Data(), *src_row_splits2_data = src.RowSplits(2).Data(); // `num_oarcs` gives the number of arcs in the returned (output) FSA for each // `ostate` (i.e. leaving each state in the returned FSA). int32_t tot_ostates = to_ostates_shape.NumElements(); Array1<int32_t> num_oarcs(c, tot_ostates + 1); int32_t *num_oarcs_data = num_oarcs.Data(); K2_EVAL( c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void { // All these indexes are into `to_ostates_shape`, indexed // `[fsa][state][foo][ostate].` int32_t idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx01x_next = tos_row_splits2_data[idx01 + 1], len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x, idx3 = idx0123 - idx012x; int32_t num_arcs; if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); // This ostate corresponds to the original state; // The original state had `orig_num_arcs` leaving it, which is the // number of `foo` indexes minus one. int32_t orig_num_arcs = len2 - 1; num_arcs = orig_num_arcs; } else { // All inserted states have the same num of arcs as in the src. // note: the prefix `index_` means it is an idxXXX w.r.t. `index`. // the prefix `src_` means the variable is an idxXXX w.r.t. `src`. int32_t index_arc_idx2 = idx2 - 1, index_arc_idx01x = index_row_splits2_data[idx01], index_arc_idx012 = index_arc_idx01x + index_arc_idx2, index_label = index_arcs_data[index_arc_idx012].label, src_fsa_idx0 = index_label - symbol_range_begin; K2_CHECK_GE(src_fsa_idx0, 0); K2_CHECK_LT(src_fsa_idx0, num_src_fsas); int32_t src_state_idx1 = idx3, src_state_idx0x = src_row_splits1_data[src_fsa_idx0], src_state_idx01 = src_state_idx0x + src_state_idx1, src_arc_idx01x = src_row_splits2_data[src_state_idx01], src_arc_idx01x_next = src_row_splits2_data[src_state_idx01 + 1], src_num_arcs = src_arc_idx01x_next - src_arc_idx01x; num_arcs = src_num_arcs; } num_oarcs_data[idx0123] = num_arcs; }); ExclusiveSum(num_oarcs, &num_oarcs); Array1<int32_t> &ostate_to_oarcs_row_splits = num_oarcs; RaggedShape ostate_to_oarcs = RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1); // `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc] RaggedShape full_shape = ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs); // for the lower-order row-splits and row-ids, use tot_row_{splits,ids}n_data const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(), *full_row_ids4_data = full_shape.RowIds(4).Data(); int32_t tot_oarcs = full_shape.NumElements(); K2_CHECK_GE(tot_oarcs, index.NumElements()); int32_t *arc_map_src_data = nullptr, *arc_map_index_data = nullptr; if (arc_map_src) { *arc_map_src = Array1<int32_t>(c, tot_oarcs); arc_map_src_data = arc_map_src->Data(); } if (arc_map_index) { *arc_map_index = Array1<int32_t>(c, tot_oarcs); arc_map_index_data = arc_map_index->Data(); } Array1<Arc> oarcs(c, tot_oarcs); Arc *oarcs_data = oarcs.Data(); const Arc *src_arcs_data = src.values.Data(); K2_EVAL( c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void { // All these indexes are into `full_shape`, indexed // `[fsa][state][foo][ostate][oarc].` // The prefix `index_` means it is an idxXXX w.r.t. `index`. // the prefix `src_` means the variable is an idxXXX w.r.t. `src`. int32_t idx0123 = full_row_ids4_data[idx01234], idx0123x = full_row_splits4_data[idx0123], idx4 = idx01234 - idx0123x, idx012 = tos_row_ids3_data[idx0123], idx012x = tos_row_splits3_data[idx012], idx3 = idx0123 - idx012x, idx01 = tos_row_ids2_data[idx012], idx01x = tos_row_splits2_data[idx01], idx2 = idx012 - idx01x, idx0 = tos_row_ids1_data[idx01], idx0x = tos_row_splits1_data[idx0], idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]]; int32_t index_arc_idx2; // the idx2 (arc-index) into `index` if (idx2 == 0) { K2_CHECK_EQ(idx3, 0); index_arc_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; // the idx4 enumerates the arcs leaving it.. } else { // this is one of the extra `foo` indexes, it's conrespoding index // into `index` is `foo` index minus 1 index_arc_idx2 = idx2 - 1; } int32_t index_arc_idx01x = index_row_splits2_data[idx01]; // index of the arc in source FSA, FSA that we're replaceing.. int32_t index_arc_idx012 = index_arc_idx01x + index_arc_idx2; Arc index_arc = index_arcs_data[index_arc_idx012]; // original destination state-index int32_t dest_state_idx01 = idx0x + index_arc.dest_state, orig_dest_state_idx0123 = tos_row_splits3_data[tos_row_splits2_data[dest_state_idx01]]; Arc src_arc; Arc oarc; oarc.src_state = idx0123 - idx0xxx; // initialize mapping index int32_t arc_src_map_idx = -1, arc_index_map_idx = -1; int32_t src_fsa_idx0 = index_arc.label - symbol_range_begin; // will not replace for this arc // dest state is the dest state of index arc if (src_fsa_idx0 < 0 || src_fsa_idx0 >= num_src_fsas) { K2_CHECK_EQ(idx2, 0); oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; oarc.label = index_arc.label; oarc.score = index_arc.score; arc_index_map_idx = index_arc_idx012; } else { int32_t src_state_idx0x = src_row_splits1_data[src_fsa_idx0], src_state_idx0x_next = src_row_splits1_data[src_fsa_idx0 + 1], num_states = src_state_idx0x_next - src_state_idx0x, src_state_idx1 = idx3, src_state_idx01 = src_state_idx0x + src_state_idx1, src_arc_idx01x = src_row_splits2_data[src_state_idx01], src_arc_idx2 = idx4, src_arc_idx012 = src_arc_idx01x + src_arc_idx2; src_arc = src_arcs_data[src_arc_idx012]; // handle the arcs belongs to index if (idx2 == 0) { // if the fsa to be replaced in is empty, this arc would point to // its original dest-state if (0 == num_states) { oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; } else { // this arc would point to the initial state of the fsa in src, // the state id bias to current state(the src-state) is the count // of all the ostates coresponding to the original state util now, // the idx4 enumerates foo index int32_t idx012_t = idx01x + 0, idx2_t = idx4, idx012x_t = tos_row_splits3_data[idx012_t], idx012x_next_t = tos_row_splits3_data[idx012_t + idx2_t + 1], bias = idx012x_next_t - idx012x_t; oarc.dest_state = idx0123 + bias - idx0xxx; } // set the label of the arc we are replacing to be 0(epsilon) oarc.label = 0; oarc.score = index_arc.score; arc_index_map_idx = index_arc_idx012; } else { // handle the arcs belongs to src // the arc point to the final state of the fsa in src would point to // the dest state of the arc we're replaceing if (src_arc.label == -1) { oarc.dest_state = orig_dest_state_idx0123 - idx0xxx; } else { // this is the inner arc of the fsa in src int32_t dest_state_idx012x = idx0123 - idx3, dest_state_idx0123 = dest_state_idx012x + src_arc.dest_state; oarc.dest_state = dest_state_idx0123 - idx0xxx; } // arcs in src fsas that point to final state would set to epsilon // arc (label from -1 to 0) oarc.label = src_arc.label == -1 ? 0 : src_arc.label; oarc.score = src_arc.score; arc_src_map_idx = src_arc_idx012; } } if (arc_map_src_data) arc_map_src_data[idx01234] = arc_src_map_idx; if (arc_map_index_data) arc_map_index_data[idx01234] = arc_index_map_idx; oarcs_data[idx01234] = oarc; }); // remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes // axis 1, so remove axis 1 twice]. RaggedShape temp = RemoveAxis(full_shape, 1); return FsaVec(RemoveAxis(temp, 1), oarcs); } FsaOrVec RemoveEpsilonSelfLoops(FsaOrVec &src, Array1<int32_t> *arc_map /* = nullptr */) { NVTX_RANGE(K2_FUNC); if (src.NumAxes() == 2) { FsaVec temp = FsaToFsaVec(src); return RemoveEpsilonSelfLoops(temp, arc_map).RemoveAxis(0); } K2_CHECK_EQ(src.NumAxes(), 3); ContextPtr &c = src.Context(); int32_t num_arcs = src.NumElements(); Renumbering renumber_lists(c, num_arcs); char *keep_list_data = renumber_lists.Keep().Data(); const Arc *arcs_data = src.values.Data(); K2_EVAL( c, num_arcs, lambda_set_keep, (int32_t i)->void { Arc arc = arcs_data[i]; char keep; if (arc.label == 0 && arc.src_state == arc.dest_state) { // This arc is an epsilon self-loop, so it should be removed keep = 0; } else { keep = 1; } keep_list_data[i] = keep; }); FsaVec ans = Index(src, 2, renumber_lists.New2Old(), arc_map); return ans; } } // namespace k2
b19081090de3175f678c51449d3572b64f177167.hip
// !!! This is a file automatically generated by hipify!!! /*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Created by Pawan Harish. ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #define CUDA_UVM #define MAX_THREADS_PER_BLOCK 512 int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; #include "kernel.hip" #include "kernel2.cu" void BFSGraph(int argc, char** argv); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { no_of_nodes=0; edge_list_size=0; BFSGraph( argc, argv); } void Usage(int argc, char**argv){ fprintf(stderr,"Usage: %s <input_file>\n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// //Apply BFS on a Graph using CUDA //////////////////////////////////////////////////////////////////////////////// void BFSGraph( int argc, char** argv) { char *input_f; if(argc!=2){ Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f,"r"); if(!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp,"%d",&no_of_nodes); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } #ifndef CUDA_UVM // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes); #else Node* h_graph_nodes; hipMallocManaged( (void**) &h_graph_nodes, sizeof(Node)*no_of_nodes) ; bool *h_graph_mask; hipMallocManaged( (void**) &h_graph_mask, sizeof(bool)*no_of_nodes) ; bool *h_updating_graph_mask; hipMallocManaged( (void**) &h_updating_graph_mask, sizeof(bool)*no_of_nodes) ; bool *h_graph_visited; hipMallocManaged( (void**) &h_graph_visited, sizeof(bool)*no_of_nodes) ; #endif int start, edgeno; // initalize the memory for( unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; h_graph_mask[i]=false; h_updating_graph_mask[i]=false; h_graph_visited[i]=false; } //read the source node from the file fscanf(fp,"%d",&source); source=0; //set the source node as true in the mask h_graph_mask[source]=true; h_graph_visited[source]=true; fscanf(fp,"%d",&edge_list_size); int id,cost; #ifndef CUDA_UVM int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); #else int* h_graph_edges; hipMallocManaged( (void**) &h_graph_edges, sizeof(int)*edge_list_size) ; #endif for(int i=0; i < edge_list_size ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i] = id; } if(fp) fclose(fp); printf("Read File\n"); #ifndef CUDA_UVM //Copy the Node list to device memory Node* d_graph_nodes; hipMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ; hipMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, hipMemcpyHostToDevice) ; //Copy the Edge List to device Memory int* d_graph_edges; hipMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ; hipMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, hipMemcpyHostToDevice) ; //Copy the Mask to device memory bool* d_graph_mask; hipMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ; hipMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; bool* d_updating_graph_mask; hipMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ; hipMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; //Copy the Visited nodes array to device memory bool* d_graph_visited; hipMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ; hipMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, hipMemcpyHostToDevice) ; #endif // allocate mem for the result on host side #ifndef CUDA_UVM int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes); #else int* h_cost; hipMallocManaged( (void**) &h_cost, sizeof(int)*no_of_nodes); #endif for(int i=0;i<no_of_nodes;i++) h_cost[i]=-1; h_cost[source]=0; #ifndef CUDA_UVM // allocate device memory for result int* d_cost; hipMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes); hipMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, hipMemcpyHostToDevice) ; #endif //make a bool to check if the execution is over bool *d_over; #ifndef CUDA_UVM hipMalloc( (void**) &d_over, sizeof(bool)); #else hipMallocManaged( (void**) &d_over, sizeof(bool)); #endif printf("Copied Everything to GPU memory\n"); // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); int k=0; printf("Start traversing the tree\n"); #ifndef CUDA_UVM bool stop; #endif //Call the Kernel untill all the elements of Frontier are not false do { //if no thread changes this value then the loop stops #ifndef CUDA_UVM stop=false; hipMemcpy( d_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ; #else *d_over=false; #endif #ifndef CUDA_UVM hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes); #else hipLaunchKernelGGL(( Kernel), dim3(grid), dim3(threads), 0 , 0, h_graph_nodes, h_graph_edges, h_graph_mask, h_updating_graph_mask, h_graph_visited, h_cost, no_of_nodes); #endif // check if kernel execution generated and error #ifndef CUDA_UVM hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes); #else hipLaunchKernelGGL(( Kernel2), dim3(grid), dim3(threads), 0 , 0, h_graph_mask, h_updating_graph_mask, h_graph_visited, d_over, no_of_nodes); #endif // check if kernel execution generated and error #ifndef CUDA_UVM hipMemcpy( &stop, d_over, sizeof(bool), hipMemcpyDeviceToHost) ; #else hipDeviceSynchronize(); #endif k++; } #ifndef CUDA_UVM while(stop); #else while(*d_over); #endif printf("Kernel Executed %d times\n",k); // copy result from device to host #ifndef CUDA_UVM hipMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, hipMemcpyDeviceToHost) ; #else hipDeviceSynchronize(); #endif //Store the result into a file FILE *fpo = fopen("result.txt","w"); for(int i=0;i<no_of_nodes;i++) fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]); fclose(fpo); printf("Result stored in result.txt\n"); // cleanup memory #ifndef CUDA_UVM free( h_graph_nodes); free( h_graph_edges); free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); hipFree(d_graph_nodes); hipFree(d_graph_edges); hipFree(d_graph_mask); hipFree(d_updating_graph_mask); hipFree(d_graph_visited); hipFree(d_cost); #else hipFree(h_graph_nodes); hipFree(h_graph_edges); hipFree(h_graph_mask); hipFree(h_updating_graph_mask); hipFree(h_graph_visited); hipFree(h_cost); #endif }
b19081090de3175f678c51449d3572b64f177167.cu
/*********************************************************************************** Implementing Breadth first search on CUDA using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. Created by Pawan Harish. ************************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #define CUDA_UVM #define MAX_THREADS_PER_BLOCK 512 int no_of_nodes; int edge_list_size; FILE *fp; //Structure to hold a node information struct Node { int starting; int no_of_edges; }; #include "kernel.cu" #include "kernel2.cu" void BFSGraph(int argc, char** argv); //////////////////////////////////////////////////////////////////////////////// // Main Program //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { no_of_nodes=0; edge_list_size=0; BFSGraph( argc, argv); } void Usage(int argc, char**argv){ fprintf(stderr,"Usage: %s <input_file>\n", argv[0]); } //////////////////////////////////////////////////////////////////////////////// //Apply BFS on a Graph using CUDA //////////////////////////////////////////////////////////////////////////////// void BFSGraph( int argc, char** argv) { char *input_f; if(argc!=2){ Usage(argc, argv); exit(0); } input_f = argv[1]; printf("Reading File\n"); //Read in Graph from a file fp = fopen(input_f,"r"); if(!fp) { printf("Error Reading graph file\n"); return; } int source = 0; fscanf(fp,"%d",&no_of_nodes); int num_of_blocks = 1; int num_of_threads_per_block = no_of_nodes; //Make execution Parameters according to the number of nodes //Distribute threads across multiple Blocks if necessary if(no_of_nodes>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(no_of_nodes/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } #ifndef CUDA_UVM // allocate host memory Node* h_graph_nodes = (Node*) malloc(sizeof(Node)*no_of_nodes); bool *h_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_updating_graph_mask = (bool*) malloc(sizeof(bool)*no_of_nodes); bool *h_graph_visited = (bool*) malloc(sizeof(bool)*no_of_nodes); #else Node* h_graph_nodes; cudaMallocManaged( (void**) &h_graph_nodes, sizeof(Node)*no_of_nodes) ; bool *h_graph_mask; cudaMallocManaged( (void**) &h_graph_mask, sizeof(bool)*no_of_nodes) ; bool *h_updating_graph_mask; cudaMallocManaged( (void**) &h_updating_graph_mask, sizeof(bool)*no_of_nodes) ; bool *h_graph_visited; cudaMallocManaged( (void**) &h_graph_visited, sizeof(bool)*no_of_nodes) ; #endif int start, edgeno; // initalize the memory for( unsigned int i = 0; i < no_of_nodes; i++) { fscanf(fp,"%d %d",&start,&edgeno); h_graph_nodes[i].starting = start; h_graph_nodes[i].no_of_edges = edgeno; h_graph_mask[i]=false; h_updating_graph_mask[i]=false; h_graph_visited[i]=false; } //read the source node from the file fscanf(fp,"%d",&source); source=0; //set the source node as true in the mask h_graph_mask[source]=true; h_graph_visited[source]=true; fscanf(fp,"%d",&edge_list_size); int id,cost; #ifndef CUDA_UVM int* h_graph_edges = (int*) malloc(sizeof(int)*edge_list_size); #else int* h_graph_edges; cudaMallocManaged( (void**) &h_graph_edges, sizeof(int)*edge_list_size) ; #endif for(int i=0; i < edge_list_size ; i++) { fscanf(fp,"%d",&id); fscanf(fp,"%d",&cost); h_graph_edges[i] = id; } if(fp) fclose(fp); printf("Read File\n"); #ifndef CUDA_UVM //Copy the Node list to device memory Node* d_graph_nodes; cudaMalloc( (void**) &d_graph_nodes, sizeof(Node)*no_of_nodes) ; cudaMemcpy( d_graph_nodes, h_graph_nodes, sizeof(Node)*no_of_nodes, cudaMemcpyHostToDevice) ; //Copy the Edge List to device Memory int* d_graph_edges; cudaMalloc( (void**) &d_graph_edges, sizeof(int)*edge_list_size) ; cudaMemcpy( d_graph_edges, h_graph_edges, sizeof(int)*edge_list_size, cudaMemcpyHostToDevice) ; //Copy the Mask to device memory bool* d_graph_mask; cudaMalloc( (void**) &d_graph_mask, sizeof(bool)*no_of_nodes) ; cudaMemcpy( d_graph_mask, h_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; bool* d_updating_graph_mask; cudaMalloc( (void**) &d_updating_graph_mask, sizeof(bool)*no_of_nodes) ; cudaMemcpy( d_updating_graph_mask, h_updating_graph_mask, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; //Copy the Visited nodes array to device memory bool* d_graph_visited; cudaMalloc( (void**) &d_graph_visited, sizeof(bool)*no_of_nodes) ; cudaMemcpy( d_graph_visited, h_graph_visited, sizeof(bool)*no_of_nodes, cudaMemcpyHostToDevice) ; #endif // allocate mem for the result on host side #ifndef CUDA_UVM int* h_cost = (int*) malloc( sizeof(int)*no_of_nodes); #else int* h_cost; cudaMallocManaged( (void**) &h_cost, sizeof(int)*no_of_nodes); #endif for(int i=0;i<no_of_nodes;i++) h_cost[i]=-1; h_cost[source]=0; #ifndef CUDA_UVM // allocate device memory for result int* d_cost; cudaMalloc( (void**) &d_cost, sizeof(int)*no_of_nodes); cudaMemcpy( d_cost, h_cost, sizeof(int)*no_of_nodes, cudaMemcpyHostToDevice) ; #endif //make a bool to check if the execution is over bool *d_over; #ifndef CUDA_UVM cudaMalloc( (void**) &d_over, sizeof(bool)); #else cudaMallocManaged( (void**) &d_over, sizeof(bool)); #endif printf("Copied Everything to GPU memory\n"); // setup execution parameters dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); int k=0; printf("Start traversing the tree\n"); #ifndef CUDA_UVM bool stop; #endif //Call the Kernel untill all the elements of Frontier are not false do { //if no thread changes this value then the loop stops #ifndef CUDA_UVM stop=false; cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ; #else *d_over=false; #endif #ifndef CUDA_UVM Kernel<<< grid, threads, 0 >>>( d_graph_nodes, d_graph_edges, d_graph_mask, d_updating_graph_mask, d_graph_visited, d_cost, no_of_nodes); #else Kernel<<< grid, threads, 0 >>>( h_graph_nodes, h_graph_edges, h_graph_mask, h_updating_graph_mask, h_graph_visited, h_cost, no_of_nodes); #endif // check if kernel execution generated and error #ifndef CUDA_UVM Kernel2<<< grid, threads, 0 >>>( d_graph_mask, d_updating_graph_mask, d_graph_visited, d_over, no_of_nodes); #else Kernel2<<< grid, threads, 0 >>>( h_graph_mask, h_updating_graph_mask, h_graph_visited, d_over, no_of_nodes); #endif // check if kernel execution generated and error #ifndef CUDA_UVM cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ; #else cudaDeviceSynchronize(); #endif k++; } #ifndef CUDA_UVM while(stop); #else while(*d_over); #endif printf("Kernel Executed %d times\n",k); // copy result from device to host #ifndef CUDA_UVM cudaMemcpy( h_cost, d_cost, sizeof(int)*no_of_nodes, cudaMemcpyDeviceToHost) ; #else cudaDeviceSynchronize(); #endif //Store the result into a file FILE *fpo = fopen("result.txt","w"); for(int i=0;i<no_of_nodes;i++) fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]); fclose(fpo); printf("Result stored in result.txt\n"); // cleanup memory #ifndef CUDA_UVM free( h_graph_nodes); free( h_graph_edges); free( h_graph_mask); free( h_updating_graph_mask); free( h_graph_visited); free( h_cost); cudaFree(d_graph_nodes); cudaFree(d_graph_edges); cudaFree(d_graph_mask); cudaFree(d_updating_graph_mask); cudaFree(d_graph_visited); cudaFree(d_cost); #else cudaFree(h_graph_nodes); cudaFree(h_graph_edges); cudaFree(h_graph_mask); cudaFree(h_updating_graph_mask); cudaFree(h_graph_visited); cudaFree(h_cost); #endif }
5a601ce0bc1d6567768a230949c11e88e0bd7510.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * intList.cu */ #include "header/intList.h" __device__ void copyTabDev(uint64_t *src, uint64_t *dest, int size) { if (blockIdx.x == 0) { int tid = threadIdx.x; if (tid < size) { dest[tid] = src[tid]; } } } //1 block ; size thread __global__ void copyTabGPU(uint64_t *src, uint64_t *dest, int size) { int tid = threadIdx.x; if (tid < size) { dest[tid] = src[tid]; } } __host__ Int_List_GPU *createIntList() { Int_List_GPU *l = new Int_List_GPU[1]; l->Size = 0; l->List = NULL; return l; } __host__ void addInt(Int_List_GPU **list, int v) { Int_List_GPU *l = new Int_List_GPU[1]; l->List = new uint64_t[(*list)->Size + 1]; l->Size = (*list)->Size + 1; uint64_t *dev_list_dest, *dev_list_src; hipMalloc((void **)&dev_list_src, (*list)->Size * sizeof(uint64_t)); hipMemcpy(dev_list_src, (*list)->List, (*list)->Size * sizeof(uint64_t), hipMemcpyHostToDevice); hipMalloc((void **)&dev_list_dest, ((*list)->Size + 1) * sizeof(uint64_t)); hipLaunchKernelGGL(( copyTabGPU), dim3(1), dim3((*list)->Size), 0, 0, dev_list_src, dev_list_dest, (*list)->Size); hipMemcpy(l->List, dev_list_dest, (*list)->Size * sizeof(uint64_t), hipMemcpyDeviceToHost); hipFree(dev_list_src); hipFree(dev_list_dest); l->List[(*list)->Size] = v; delete[]((list[0])->List); delete[](list[0]); list[0] = l; } __device__ void addIntGPU(uint64_t **list, int size, int v) { if (blockIdx.x == 0) { __shared__ uint64_t *l; if (threadIdx.x == 0) { l = new uint64_t[size + 1]; } copyTabDev((*list), l, size); l[size] = v; *list = l; } } __host__ uint64_t getVal(Int_List_GPU l, int index) { return l.List[index]; } __device__ uint64_t getValGPU(uint64_t *l, int index) { return l[index]; } __host__ void removeLastInt(Int_List_GPU **list) { Int_List_GPU *l = new Int_List_GPU[1]; l->List = new uint64_t[(*list)->Size - 1]; int *dev_list_dest, *dev_list_src; hipMalloc((void **)&dev_list_src, (*list)->Size * sizeof(int)); hipMemcpy(dev_list_src, (*list)->List, (*list)->Size * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void **)&dev_list_dest, ((*list)->Size - 1) * sizeof(int)); hipLaunchKernelGGL(( copyTabGPU), dim3(1), dim3((*list)->Size - 1), 0, 0, (*list)->List, l->List, (*list)->Size - 1); hipMemcpy(l->List, dev_list_dest, ((*list)->Size - 1) * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_list_src); hipFree(dev_list_dest); l->Size = (*list)->Size - 1; *list = l; } __device__ void removeLastInt(uint64_t **list, uint64_t size) { if (blockIdx.x == 0) { __shared__ uint64_t *l; if (threadIdx.x == 0) { l = new uint64_t[size - 1]; } copyTabDev((*list), l, size - 1); *list = l; } } __host__ void resetIntList(Int_List_GPU **list) { while ((*list)->Size > 0) { removeLastInt(list); } } __device__ void resetIntListGPU(uint64_t **list, uint64_t size) { if (blockIdx.x == 0) { for (int i = 0; i < size; i++) { removeLastInt(list, size); } } } __host__ void printIntList(Int_List_GPU l) { printf("%i\n", l.Size); char* tmp = (char *) malloc (1000*sizeof(char)); char *tmptmp = (char *) malloc (1000*sizeof(char)); sprintf(tmp,"%s ","facteurs"); for (int i = 0; i < l.Size; i++) { sprintf(tmptmp,"%lu ", getVal(l, i)); strcat(tmp, tmptmp); sprintf(tmptmp,"%s", ""); } printf("%s\n",tmp); free(tmptmp); free(tmp); }
5a601ce0bc1d6567768a230949c11e88e0bd7510.cu
/** * intList.cu */ #include "header/intList.h" __device__ void copyTabDev(uint64_t *src, uint64_t *dest, int size) { if (blockIdx.x == 0) { int tid = threadIdx.x; if (tid < size) { dest[tid] = src[tid]; } } } //1 block ; size thread __global__ void copyTabGPU(uint64_t *src, uint64_t *dest, int size) { int tid = threadIdx.x; if (tid < size) { dest[tid] = src[tid]; } } __host__ Int_List_GPU *createIntList() { Int_List_GPU *l = new Int_List_GPU[1]; l->Size = 0; l->List = NULL; return l; } __host__ void addInt(Int_List_GPU **list, int v) { Int_List_GPU *l = new Int_List_GPU[1]; l->List = new uint64_t[(*list)->Size + 1]; l->Size = (*list)->Size + 1; uint64_t *dev_list_dest, *dev_list_src; cudaMalloc((void **)&dev_list_src, (*list)->Size * sizeof(uint64_t)); cudaMemcpy(dev_list_src, (*list)->List, (*list)->Size * sizeof(uint64_t), cudaMemcpyHostToDevice); cudaMalloc((void **)&dev_list_dest, ((*list)->Size + 1) * sizeof(uint64_t)); copyTabGPU<<<1, (*list)->Size>>>(dev_list_src, dev_list_dest, (*list)->Size); cudaMemcpy(l->List, dev_list_dest, (*list)->Size * sizeof(uint64_t), cudaMemcpyDeviceToHost); cudaFree(dev_list_src); cudaFree(dev_list_dest); l->List[(*list)->Size] = v; delete[]((list[0])->List); delete[](list[0]); list[0] = l; } __device__ void addIntGPU(uint64_t **list, int size, int v) { if (blockIdx.x == 0) { __shared__ uint64_t *l; if (threadIdx.x == 0) { l = new uint64_t[size + 1]; } copyTabDev((*list), l, size); l[size] = v; *list = l; } } __host__ uint64_t getVal(Int_List_GPU l, int index) { return l.List[index]; } __device__ uint64_t getValGPU(uint64_t *l, int index) { return l[index]; } __host__ void removeLastInt(Int_List_GPU **list) { Int_List_GPU *l = new Int_List_GPU[1]; l->List = new uint64_t[(*list)->Size - 1]; int *dev_list_dest, *dev_list_src; cudaMalloc((void **)&dev_list_src, (*list)->Size * sizeof(int)); cudaMemcpy(dev_list_src, (*list)->List, (*list)->Size * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&dev_list_dest, ((*list)->Size - 1) * sizeof(int)); copyTabGPU<<<1, (*list)->Size - 1>>>((*list)->List, l->List, (*list)->Size - 1); cudaMemcpy(l->List, dev_list_dest, ((*list)->Size - 1) * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_list_src); cudaFree(dev_list_dest); l->Size = (*list)->Size - 1; *list = l; } __device__ void removeLastInt(uint64_t **list, uint64_t size) { if (blockIdx.x == 0) { __shared__ uint64_t *l; if (threadIdx.x == 0) { l = new uint64_t[size - 1]; } copyTabDev((*list), l, size - 1); *list = l; } } __host__ void resetIntList(Int_List_GPU **list) { while ((*list)->Size > 0) { removeLastInt(list); } } __device__ void resetIntListGPU(uint64_t **list, uint64_t size) { if (blockIdx.x == 0) { for (int i = 0; i < size; i++) { removeLastInt(list, size); } } } __host__ void printIntList(Int_List_GPU l) { printf("%i\n", l.Size); char* tmp = (char *) malloc (1000*sizeof(char)); char *tmptmp = (char *) malloc (1000*sizeof(char)); sprintf(tmp,"%s ","facteurs"); for (int i = 0; i < l.Size; i++) { sprintf(tmptmp,"%lu ", getVal(l, i)); strcat(tmp, tmptmp); sprintf(tmptmp,"%s", ""); } printf("%s\n",tmp); free(tmptmp); free(tmp); }
87a74e1c8c82f4b4d0879873dbd5c09c3d5e6ad3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <basicOps.cuh> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <float.h> const int NUM_THREADS = 32; __global__ void kGetNonZeroElements(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) atomicAdd(&out[0],A[i] != 0.0f ? 1.0f : 0.0f); } __global__ void kGetNonZeroColumns(float *A, float *out, int rows, int cols) { const int myCol = (blockIdx.x * blockDim.x) + threadIdx.x; float result = 0.0f; if(myCol < cols) { for (unsigned int i = 0;i < rows; i++) { if(A[(myCol*rows) + i] != 0.0f) result = 1.0f; } atomicAdd(&out[0],result); } } __global__ void kRenormalizeWeights(float *w, float *unit_sums, float limit, int rows, int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const int size = rows*cols; int myCol = 0; float rel_diff = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { myCol = i/rows; if(unit_sums[myCol] > limit) { rel_diff = 1.0f/unit_sums[myCol]; w[i] *= rel_diff; } else{ continue; } } } __global__ void kFill_with(float *m, float fill_value, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) m[i] = fill_value; } __global__ void kFill_with(int *m, int fill_value, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) m[i] = fill_value; } __global__ void kRdmNumbers(float *seed, int size, float *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned long long s[ 2 ]; //s[0] = (long long)seed[(gridDim.x*blockIdx.x) + threadIdx.x]; //s[1] = (long long)seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x]; s[0] = 17; s[1] = 83; unsigned long long s1 = s[ 0 ]; unsigned long long s0 = s[ 1 ]; unsigned long long rdm64 = 23459867034598355; if(idx == 0) { printf("rdm: %i\n", rdm64); printf("rdm1: %i\n", (unsigned int)(rdm64&0xffffffff)); printf("rdm2: %i\n", (unsigned int)((rdm64>>32)&0xffffffff)); } unsigned int rdm32_1 = 0; unsigned int rdm32_2 = 0; //printf("seed 1: %i\n", seed[(gridDim.x*blockIdx.x) + threadIdx.x]); //printf("seed 2: %i\n", seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x]); //printf("idx: %i\n", idx); for(int i = idx*2; i < size; i+=numThreads*2) { s1 = s[0]; s0 = s[1]; s[0] = s0; s1 ^= s1 << 23; // a rdm64 = (s[1 ] = (s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26))) + s0; // b, c rdm32_1 = (rdm64&0xffffffff); rdm32_2 = ((rdm64>>32)&0xffffffff); out[i] = rdm32_1; out[i+1] = rdm32_2; } seed[(gridDim.x*blockIdx.x) + threadIdx.x] = s[0]; seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x] = s[1]; } __global__ void kCreateRdmSqrtWeight_Logistic(float *A, int in, int out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const float lower_limit = -4.0f*sqrtf(6.0f/((float)in + out)); const float upper_limit = 4.0f*sqrtf(6.0f/((float)in + out)); const float range = upper_limit-lower_limit; for (unsigned int i = idx;i < size; i += numThreads) { A[i] = lower_limit + (A[i]*range); } } __global__ void kCreateSparseRdmWeight(float *rdm, float* indicies, float *out, int rows, int cols, int connections) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int connection_idx = 0; float rdm_value = 0.0f; int size = connections*cols; int current_col = 0; //each thread fills one row for (unsigned int i = idx; i < size; i += numThreads) { connection_idx = (int)indicies[i]; rdm_value = rdm[i]; current_col = i/(connections); out[(current_col*rows)+connection_idx] = rdm_value; } } __global__ void kRandInt(float *A, int lower_limit, int upper_limit, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const int range = upper_limit-lower_limit + 1; for (unsigned int i = idx;i < size; i += numThreads) { //use uniform random sample to get integers A[i] = (float)(((int)((A[i]*range))) + lower_limit); } } //vertical stack for column major format __global__ void vStack(float *A, float *B, float *out, int size_out, int rows_a, int rows, int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_col = 0; int current_row = 0; int offset = 0; const int rows_b = rows - rows_a; for (unsigned int i = idx;i < size_out; i += numThreads) { current_col = i / rows; //int arithmetic offset = (current_col*rows); current_row = i - offset; if(current_row >= rows_a) { //fetch b value out[i] = B[(current_col*rows_b) + current_row - rows_a]; } else { //fetch a value out[i] = A[(current_col*rows_a) + current_row]; } } } __global__ void hStack(float *A, float *B, float *out, int size_out, int size_a) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for(unsigned int i = idx; i < size_out; i+=numThreads) { if(i >= size_a) { //append B out[i] = B[i - size_a]; } else { //append A out[i] = A[i]; } } } __global__ void hStackN(float **arrA, int general_size, float *out, int size_out, int matrices_count) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_matrix = 0; for(unsigned int i = idx; i < size_out; i+=numThreads) { current_matrix = i / general_size; current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix; out[i] = arrA[current_matrix][i - (current_matrix*general_size)]; } } __global__ void vStackN(float **arrA, float *out, int rows, int cols) { int size = rows*cols; int offset = rows*cols*blockIdx.x; for(unsigned int i = threadIdx.x; i < size; i+=blockDim.x) out[offset + i] = arrA[blockIdx.x][i]; } __global__ void AddGradientsN(float **arrA, int size, int myrank, int matrix_count, float multiplier) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for(int matrix_idx = 0; matrix_idx < matrix_count; matrix_idx++) { if(matrix_idx == myrank){ continue; } for(unsigned int i = idx; i < size; i+=numThreads) arrA[myrank][i] += arrA[matrix_idx][i]; } //better numerical stability to do it afterwards for(unsigned int i = idx; i < size; i+=numThreads) arrA[myrank][i] *=multiplier; } __global__ void hStackN(Matrix **arrA, int general_size, float *out, int size_out, int matrices_count) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_matrix = 0; for(unsigned int i = idx; i < size_out; i+=numThreads) { current_matrix = i / general_size; current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix; out[i] = arrA[current_matrix]->data[i - (current_matrix*general_size)]; } } __global__ void kAdd_to_z(float *z, float *z1, float *y, float *y_count, int rows, int cols, float *out) { float value = 0; for(int row = blockIdx.x; row < rows; row +=gridDim.x) { int cls = (int)y[row]; if(threadIdx.x == 0) atomicAdd(&y_count[cls],1.0f); for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x) { value = z1[row + (col*rows)]; atomicAdd(&out[cls+(col*rows)],value); } } __syncthreads(); for(int row = blockIdx.x; row < rows; row +=gridDim.x) { int cls = (int)y[row]; for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x) { if(y_count[cls] > 0) out[cls+(col*rows)] /= y_count[cls]; } } } __global__ void kAdd(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] + B[i]; } __global__ void kMul(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] * B[i]; } __global__ void kSub(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] - B[i]; } __global__ void kSub_Sparse(float *A, float *data, int *ptr_rows, int *idx_cols, float *out, int rows, int cols, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int row_idx = 0; for (unsigned int i = idx;i < rows*cols; i += numThreads) out[i] = A[i]; for (unsigned int i = idx;i < size; i += numThreads) { for(int j = 0; j < rows + 1; j++) { if(ptr_rows[j] > i) { row_idx = j-1; break; } } out[(idx_cols[i] * rows) + row_idx] = A[(idx_cols[i] * rows) + row_idx] - data[i]; } } __global__ void kDiv(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = fdividef(A[i],B[i]); } __global__ void kExp(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = expf(A[i]); } __global__ void kLogistic(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = 1.0f / (1.0 + expf(-A[i])); } __global__ void kLogisticGrad(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i]*(1 - A[i]); } __global__ void kSqrt(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = sqrtf(A[i]); } __global__ void kLog(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = logf(A[i]); } __global__ void kSquare(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = powf(A[i], 2.0f); } __global__ void kAbs(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = fabsf(A[i]); } __global__ void kScalarMul(float *A, float scalar, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = scalar*A[i]; } __global__ void kScalarAdd(float *A, float scalar, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i]+scalar; } __global__ void kTranspose(float *A, float *out, int width, int height) { __shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1]; // read the Matrix *tile into shared memory unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = A[index_in]; } __syncthreads(); // write the transposed Matrix *tile to global memory xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; out[index_out] = block[threadIdx.x][threadIdx.y]; } } //for column major data __global__ void slice_rows(float *A, float *out, int size_out, int rows_A, int start, int end) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_col = 0; int current_row = 0; int offset = 0; int rows_out = (end - start) + 1; for (unsigned int i = idx;i < size_out; i += numThreads) { current_col = i / rows_out; //note: int arithmetic current_row = i - (current_col*rows_out); offset = rows_A*current_col; out[i] = A[offset + start + current_row]; } } //for column major data __global__ void slice_cols(float *A, float *out, int start, int rows, int size_out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx; i < size_out; i += numThreads) { out[i] = A[i+(start*rows)]; } } __device__ void reduceToMax(float* sdata, unsigned int tid) { //Synchronize threads to share shared memory data __syncthreads(); float mySum = sdata[tid]; // do reduction in shared mem if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); } if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); } if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); } if (NUM_THREADS == 32){ if (tid < 16) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); } if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); } if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); } if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); } if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); } } } else { if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); } if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); } if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); } if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); } if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); } if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); } } } } __device__ void reduceToMaxAndArgMax(float* sdataMax, float* sdataArgMax, unsigned int tid, int threads) { //Synchronize threads to share shared memory data __syncthreads(); float mySum = sdataMax[tid]; if(threads == 32) { if (tid < 16) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smemMax = sdataMax; volatile float* smemArgMax = sdataArgMax; if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; } if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; } if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; } if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; } if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; } } } else { if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smemMax = sdataMax; volatile float* smemArgMax = sdataArgMax; if (NUM_THREADS >= 64) if(mySum < smemMax[tid + 32]){smemMax[tid] = mySum = smemMax[tid + 32]; smemArgMax[tid] = smemArgMax[tid + 32]; } if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; } if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; } if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; } if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; } if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; } } } } __device__ void reduceToSumLocal(float* sdata, unsigned int tid) { //Synchronize threads to share shared memory data __syncthreads(); float mySum = sdata[tid]; // do reduction in shared mem if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (NUM_THREADS == 32){ if (tid < 16) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } } else { if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } } } __global__ void kSoftMax(float* A, float* out, unsigned int rows, unsigned int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float col_value = 0.0f; __shared__ float max_values[THREADS_PER_BLOCKS]; __shared__ float row_sums[THREADS_PER_BLOCKS]; for (unsigned int row = idx; row < rows; row += numThreads) { //fill with min values max_values[idx] = -FLT_MAX; row_sums[idx] = 0.0f; //calc max value of the row for (unsigned int i = 0; i < cols; i++) { col_value = A[(i*rows) + row]; if(col_value > max_values[idx]) { max_values[idx] = col_value; } } //calc the row sum for (unsigned int i = 0; i < cols; i++) { row_sums[idx] += __expf(A[(i*rows) + row] - max_values[idx]); } //calc the value of each element in the row for (unsigned int i = 0; i < cols; i++) { out[(i*rows) + row] = __expf(A[(i*rows) + row] - max_values[idx])/row_sums[idx]; } } } //for column major data __global__ void kSubMatrixVector(float *A, float *v, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows)*rows; //note: int arithmetic out[i] = A[i] - v[i - offset]; } } //for column major data __global__ void kAddMatrixVector(float *A, float *v, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows); //note: int arithmetic out[i] = A[i] + v[offset]; } } //for column major data __global__ void kAddScaledMatrixVector(float *A, float *v, float weight, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows); //note: int arithmetic out[i] = A[i] + (v[offset]*weight); } } //for column major data __global__ void kMulMatrixVector(float *A, float *v, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows); //note: int arithmetic out[i] = A[i] * v[offset]; } } __global__ void kArgmax(float* A, float* out, unsigned int rows, unsigned int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float max_value = -FLT_MAX; float max_i = 0; float col_value = 0.0f; for (unsigned int row = idx; row < rows; row += numThreads) { for (unsigned int i = 0; i < cols; i++) { col_value = A[(i*rows) + row]; if(col_value > max_value) { max_value = col_value; max_i = i; } } out[row] = max_i; } } __global__ void kCreate_t_matrix(float *labels, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int label = 0; int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { label = (int)(labels[i]); //offset = (label*rows) gives the current column; i gives the current row offset = (label*rows) + i; out[offset] = 1.0f; } } __global__ void kEqual(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) { out[i] = (float)(A[i] == B[i]); } } __global__ void kRectifiedLinear(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] > 0.0f ? A[i] : 0.0f; } __global__ void kRectifiedLinear_Derivative(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] > 0.0f ? 1.0f : 0.0f; } __global__ void kDoubleRectifiedLinear(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { value = (A[i] > 0.0f) ? A[i] : 0.0f; out[i] = (value < 1.0f) ? value : 1.0f; } } __global__ void kLinear(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i]; } __global__ void kDoubleRectifiedLinear_Derivative(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) { out[i] = (A[i] <= 0.0f) || (A[i] >=1.0f) ? 0.0f : 1.0f; } } __global__ void kHardTanH(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { value = (A[i] > 1.0f) ? A[i] : 1.0f; out[i] = (value < -1.0f) ? value : -1.0f; } } __global__ void kPairwise_ranking(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { value = 1.0f - A[i] + B[i]; out[i] = value < 0.0f ? 0.0f : value; } } __global__ void kPairwise_ranking_derivative(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = (1.0f - A[i] + B[i]) > 0.0f ? 1.0f : 0.0f; } __global__ void kHardTanH_Derivative(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = (A[i] < -1.0f) || (A[i] >1.0f) ? 0.0f : 1.0f; } __global__ void kSquaredError(float *A, float *t, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = powf(A[i] -t[i],2.0f); } __global__ void kSum(float *v, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; out[0] = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { atomicAdd(&out[0],v[i]); } } __global__ void kArange(float *out, int start, int rows, int cols, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i % rows)*cols; out[i] = (float)(offset + (i/rows) + start); } } __global__ void kDropout(float *A, float *rdm, float dropout, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) rdm[i] = rdm[i] > dropout ? A[i] : 0.0f; } __global__ void kDropout_cached(float *A, float *dropout, float *out, int current_idx, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = ((blockIdx.x * blockDim.x) + threadIdx.x); int shifted_idx = 0; int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { shifted_idx = i +current_idx; offset = shifted_idx/10000; out[i] = dropout[shifted_idx - (offset*10000)] == 1.0f ? A[i] : 0.0f; } } __global__ void kRMSprop(float *RMS, float *grad, float RMS_multiplier, float learning_rate, int batch_size, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; } } __global__ void kRMSprop_with_momentum_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; float momentum_matrix_value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); momentum_matrix_value = m[i]; momentum_matrix_value -= grad_value; RMS[i] = RMS_value; m[i] = momentum_matrix_value; } } __global__ void kLocalGrad (float *z, float *w, float *y, float *m, float learning_rate, int batch_size, int size, float momentum) { } __global__ void kRMSprop_with_momentum_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; float momentum_matrix_value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); momentum_matrix_value = m[i] = (momentum*momentum_matrix_value) - grad_value; RMS[i] = RMS_value; w[i] += momentum_matrix_value; } } __global__ void kRMSprop_with_nesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); m[i] = (momentum*m[i]) - (learning_rate*grad_value); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; w[i] -= grad_value; /* grad_value = learning_rate*fdividef(grad[i],(float)batch_size); m[i] = (momentum*m[i]) - grad_value; w[i] -= grad_value; */ } } __global__ void kNesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = learning_rate*fdividef(grad[i],(float)batch_size); m[i] = (momentum*m[i]) - grad_value; w[i] -= grad_value; } } __global__ void kCompression_8bit_test(float *tbl, float *A, float precision, int size, float *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float absnumber = 0.0; float multiplier = 0.1f/precision; float threshold = precision/1.e6f; __shared__ float tbl_values[128]; if(threadIdx.x < 126) tbl_values[threadIdx.x] = tbl[threadIdx.x]; __syncthreads(); for (int i = idx;i < size; i += numThreads) { int isNegative = 0; int pivot = 63; int upper_pivot = 125; int lower_pivot = 0; absnumber = A[i]*multiplier; if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; } if(absnumber < threshold){ out[i] = 0.0f; continue; } for(int j = 32; j > 0; j>>=1) { if(absnumber > tbl_values[pivot]) { lower_pivot = pivot; pivot+=j; } else { upper_pivot = pivot; pivot-=j; } } if(lower_pivot == pivot) if(fabsf(tbl_values[pivot]-absnumber) < (tbl_values[upper_pivot]-absnumber)) out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier); else out[i] = tbl_values[upper_pivot]/(isNegative == 1 ? -multiplier : multiplier); else if((tbl_values[pivot]-absnumber) < fabsf(tbl_values[lower_pivot]-absnumber)) out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier); else out[i] = tbl_values[lower_pivot]/(isNegative == 1 ? -multiplier : multiplier); } } __global__ void kDecompression_8bit(float *flt_tbl, unsigned char *A, float precision, int size, float *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; __shared__ float tbl_floats[256]; if(threadIdx.x < 126) { tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x]*precision; tbl_floats[threadIdx.x+128] = -tbl_floats[threadIdx.x]; } tbl_floats[126] = 0.0f; tbl_floats[254] = -0.0f; tbl_floats[127] = precision; tbl_floats[255] = -precision; __syncthreads(); for (int i = idx;i < size; i += numThreads) { out[i] = tbl_floats[A[i]]; } } __global__ void kCompression_8bit(float *flt_tbl, float *A, float precision, int size, unsigned char *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float absnumber = 0.0f; float threshold_lower = 0.0000015; float threshold_upper = 0.995703; int isNegative = 0; int pivot = 63; int upper_pivot = 125; int lower_pivot = 0; __shared__ float tbl_floats[128]; if(threadIdx.x < 126) tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x]; __syncthreads(); for (int i = idx;i < size; i += numThreads) { isNegative = 0; pivot = 63; upper_pivot = 125; lower_pivot = 0; absnumber = A[i]/precision; if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; } if(absnumber < threshold_lower){ out[i] = (unsigned char)126; continue; } if(absnumber > threshold_upper){ out[i] = (isNegative == 0 ? (unsigned char)127 : (unsigned char)255); continue; } for(int j = 32; j > 0; j>>=1) { if(absnumber > tbl_floats[pivot]) { lower_pivot = pivot; pivot+=j; } else { upper_pivot = pivot; pivot-=j; } } if(lower_pivot == pivot) if(fabsf(tbl_floats[pivot]-absnumber) < (tbl_floats[upper_pivot]-absnumber)) if(isNegative == 1) out[i] = pivot | 1 << 7; else out[i] = pivot; else if(isNegative == 1) out[i] = upper_pivot | 1 << 7; else out[i] = upper_pivot; else if((tbl_floats[pivot]-absnumber) < fabsf(tbl_floats[lower_pivot]-absnumber)) if(isNegative == 1) out[i] = (pivot | 1 << 7); else out[i] = pivot; else if(isNegative == 1) out[i] = lower_pivot | 1 << 7; else out[i] = lower_pivot; } } __global__ void kRMSprop_with_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size) ; RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; w[i] -= grad_value; } } __global__ void kRMSprop_with_weight_update_8bit(float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; } } __global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < m && col < n) { /* for(int i = 0; i < indptr[m+1];i++) if(indices[i] > 23) { printf("ERROR: \n"); printf("%i \n", indices[i]); printf("col: %i \n", col); printf("row: %i \n", row); } */ int max_idx = indptr[m+1]; for(int i = 0; i < m+1;i++) if(indptr[i] > max_idx) { printf("ERROR: \n"); printf("%i \n", indptr[i]); printf("max_idx: %i \n", max_idx); } const int start = indptr[row]; const int end = indptr[row + 1]; float sum = 0.f; for (int i = start; i < end; i++) { /* for(int a = start; a < end;a++) if(indices[a] > 23) { printf("ERROR: \n"); printf("%i \n", indices[a]); printf("a: %i \n", a); } */ sum += data[i] * dense_data[(col * k) + indices[i]]; if(sum > 500000 || sum < -500000) { printf("start: %i ", start); printf("end: %i ", end); printf("i: %i ", i); printf("k: %i ", k); printf("col: %i ", col); printf("data idx %i ", indices[i]); printf("full idx %i ", (col * k) + indices[i]); printf("data sparse %f ", data[i]); printf("data dense %f ", dense_data[col * k + indices[i]]); printf("data point %f ", data[i] * dense_data[col * k + indices[i]]); printf(" sum %f\n", sum); return; } } const int pos = col * m + row; target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]); } } __global__ void kPrintData(float *A, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); if(idx == 0) printf("["); for (unsigned int i = idx;i < size; i += numThreads) printf("%f ",A[i]); __syncthreads(); if(idx == 0) printf("]\n"); } __global__ void kMaxout(float *A, float *out, float *outargmax, int maxout_level, unsigned int cols, unsigned int rows) { __shared__ float max_values[32]; __shared__ float argmax_values[32]; float const min_value = -FLT_MAX; for(int row = blockIdx.x; row < rows; row +=blockDim.x) { int softout_block_idx = row + (blockIdx.y*maxout_level*rows); if(threadIdx.x < maxout_level) { max_values[threadIdx.x] = A[softout_block_idx+(threadIdx.x*rows)]; argmax_values[threadIdx.x] = (float)((blockIdx.y*maxout_level)+threadIdx.x); } else { max_values[threadIdx.x] = min_value; argmax_values[threadIdx.x] = -1.0f; } //reduceToMax(max_values, threadIdx.x); reduceToMaxAndArgMax(max_values, argmax_values, threadIdx.x, 32); __syncthreads(); if(threadIdx.x == 0) out[row + (blockIdx.y*rows)] = max_values[0]; if(threadIdx.x == 1) outargmax[row + (blockIdx.y*rows)] = argmax_values[0]; } } __global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { extern __shared__ float max_vals[]; float cur_max = -FLT_MAX; float val = 0; const int column = gridDim.x * blockIdx.y + blockIdx.x; if (column < width) { float *cur_data = &mat[column * height] ; for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) { val = cur_data[i]; if (val > cur_max) cur_max = val; } max_vals[threadIdx.x] = cur_max; reduceToMax(max_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) target[column] = max_vals[0]; } } __global__ void kExpandToMaxoutGrad(float* error, float* indexes, float *out, int error_size, int error_rows, int maxout_level) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const int grad_size = maxout_level*error_size; for (unsigned int i = idx;i < grad_size; i += numThreads) out[i] = 0.0f; for (unsigned int i = idx;i < error_size; i += numThreads) { int row_idx = idx - ((idx / error_rows)*error_rows); out[row_idx + (((int)indexes[idx])*error_rows)] = error[i]; } } __global__ void kConstructVocabMatrix(float *vocab_idx, float *vocab_idx_y, float* vocab, float *rdm_idx, float *batch_X, float *batch_Y) { int middleIdx = (gridDim.y/2); int myIdx = 0; int myRdmIdx = 0; //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y //middle index is replaced by rdm word for batch_Y, but we still need to write the correct word into batch_X! if(blockIdx.y != middleIdx) { myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myIdx; } else { myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; myRdmIdx = (int)rdm_idx[blockIdx.x]; vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myRdmIdx; } int myVocabIdx = blockDim.x*myIdx; int myVocabRdmIdx = blockDim.x*myRdmIdx; if(blockIdx.y != middleIdx) { batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x]; batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x]; } else { batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x]; batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabRdmIdx + threadIdx.x]; } } __global__ void concat_batches(float **batch_X, float **batch_Y, float *out_X, float *out_Y) { //gridDim.z = matrix_count //gridDim.y = batch size //gridDim.x = window_size //blockDim.x = partial vocab size int full_vocab_size = gridDim.z*blockDim.x; int cols = gridDim.x*full_vocab_size; int partial_cols = blockDim.x*gridDim.x; //full_size times current row = current row idx //current window position times partial_threads times current matrix = current word idx //threadIdx.x current parameter within a word out_X[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_X[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x]; out_Y[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_Y[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x]; } /* //numerically unstable? __global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int myIdx = 0; float multiplier = -fdividef(learning_rate,float(gridDim.x)); myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; //printf("%f ",grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier); //printf("%f ",vocab[myVocabIdx + threadIdx.x]); //printf("%f ",vocab[myVocabIdx + threadIdx.x]+ (grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier)); if(myIdx > 10000) atomicAdd(&vocab[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier); //vocab[myVocabIdx + threadIdx.x] +=grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]; //printf("%s ",!isfinite(grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier)); } */ //numerically unstable? __global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab[myVocabIdx + threadIdx.x],-grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*learning_rate); } __global__ void kExpandDoubleVocabGradient(float *gradX, float *gradY, float *vocab_idx_X, float *vocab_idx_Y, float* vocab, float *vocab_grad, float *vocab_grad_idx, float learning_rate, int grad_size) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y //float multiplier = fdividef(learning_rate,(float)(gridDim.x*2)); int myIdx_X = (int)vocab_idx_X[blockIdx.x+(blockIdx.y*gridDim.x)]; int myIdx_Y = (int)vocab_idx_Y[blockIdx.x+(blockIdx.y*gridDim.x)]; //int grad_cols = grad_size/blockDim.x; int myVocabIdx_X = blockDim.x*myIdx_X; int myVocabIdx_Y = blockDim.x*myIdx_Y; atomicAdd(&vocab_grad[myVocabIdx_X + threadIdx.x],gradX[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); atomicAdd(&vocab_grad[myVocabIdx_Y + threadIdx.x],gradY[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); /* vocab_grad_idx[myIdx_X] = 1.0f; vocab_grad_idx[myIdx_Y] = 1.0f; __syncthreads(); int block_idx = (blockIdx.y*gridDim.x) + blockIdx.x; int threads_blocks = gridDim.x*gridDim.y; for(int i = block_idx; i < grad_cols; i+=threads_blocks) { if(vocab_grad_idx[i] == 1.0f) { vocab[(i*blockDim.x) + threadIdx.x] -= vocab_grad[(i*blockDim.x) + threadIdx.x]*multiplier; } } */ } /* __global__ void kExpandVocabGradient_sharedMemory(float *grad, float *vocab_idx, float *vocab_grad, float *sorted_vocab_idx, vocab_idx_size) { //vocab_vector_size = blockDim.x; //batch_size = gridDim.x //try different configs for gridDim.x, e.g 16, 32 etc. //will have vocab_vector_size = blockDim.x elements e.g. 64 extern __shared__ float sGrads[]; float myWordIdx = 0.0f; float last_word = 0.0f; float currentIdx = 0.0f; sGrads[threadIdx.x] = 0.0f; for(int word = blockIdx.x; currentIdx < vocab_idx_size; word++) { for(int i = currentIdx; i < vocab_idx_size; i++, currentIdx++) { } } } */ __global__ void kExpandVocabGradient(float *grad, float *vocab_idx, float *vocab_grad) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); } __global__ void kExpandPartialVocabGradient(float *grad, float *vocab_idx, float *vocab_grad, int matrix_idx, int matrix_count) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int offset = matrix_idx*gridDim.x*blockDim.x; int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*(blockDim.x*matrix_count)*gridDim.x) + (threadIdx.x*gridDim.x) + offset]); } __global__ void kExpandVocabGradientMiddleWord(float *grad, float *vocab_idx, float *vocab_grad) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y if(blockIdx.x+(blockIdx.y*gridDim.x) == gridDim.y/2) { int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); } } __global__ void kDot8bit(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB) { const unsigned int threads_per_block = blockDim.x*blockDim.y; const int mygrid = blockIdx.x; const int myidx = (threadIdx.y*blockDim.x)+threadIdx.x; __shared__ float tbl_floatsA[256]; __shared__ float tbl_floatsB[256]; for(int i = myidx; i < 126; i++) { tbl_floatsA[i] = flt_tbl[i]*precisionA; tbl_floatsA[i+128] = -tbl_floatsA[i]; tbl_floatsB[i] = flt_tbl[i]*precisionB; tbl_floatsB[i+128] = -tbl_floatsB[i]; } tbl_floatsA[126] = 0.0f; tbl_floatsB[126] = 0.0f; tbl_floatsA[127] = precisionA; tbl_floatsB[127] = -precisionA; tbl_floatsA[254] = -0.0f; tbl_floatsB[254] = -0.0f; tbl_floatsA[255] = precisionB; tbl_floatsB[255] = -precisionB; __syncthreads(); for(int Arow = mygrid; Arow < rowsA; Arow+=gridDim.x) { for(int Bcol = myidx; Bcol < colsB; Bcol+=threads_per_block) { int idxout = (Bcol*rowsA) + Arow; for(int Acol = 0; Acol < colsA; Acol++) out[idxout] += tbl_floatsA[A[(Acol*rowsA)+Arow]] * tbl_floatsB[B[(colsA*Bcol) + Acol]]; } } } __global__ void kDot8bit_shared(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB) { int myidx = (threadIdx.y*blockDim.x)+threadIdx.x; __shared__ unsigned char A_tile[64][256]; //64x32 banks __shared__ unsigned char B_tile[64][256];//256x8 banks __shared__ float tbl_floatsA[256]; __shared__ float tbl_floatsB[256]; for(int i = myidx; i < 126; i++) { tbl_floatsA[i] = flt_tbl[i]*precisionA; tbl_floatsA[i+128] = -tbl_floatsA[i]; tbl_floatsB[i] = flt_tbl[i]*precisionB; tbl_floatsB[i+128] = -tbl_floatsB[i]; } tbl_floatsA[126] = 0.0f; tbl_floatsB[126] = 0.0f; tbl_floatsA[127] = precisionA; tbl_floatsB[127] = -precisionA; tbl_floatsA[254] = -0.0f; tbl_floatsB[254] = -0.0f; tbl_floatsA[255] = precisionB; tbl_floatsB[255] = -precisionB; __syncthreads(); myidx = threadIdx.y*16; for(int Arow = threadIdx.x; Arow < rowsA; Arow+=64)//threadDim.x = 64 { for(int Acol = threadIdx.y*16; Acol < colsA; Acol+=256)//threadDim.y = 16 { for(int i = 0; i < 16; i++) A_tile[Arow][Acol+i] = A[((Acol+i)*rowsA)+ Arow]; for(int i = 0; i < 16; i++) B_tile[Arow][Acol+i] = B[(Arow*colsA)+ Acol+i];//B_tile is transposed to avoid bank conflicts with 64 threads __syncthreads(); for(int Bcol = 0; Bcol < 64; Bcol++) for (int i = 0; i < 16; ++i)// atomicAdd(&out[((Bcol)*rowsA) + Arow],tbl_floatsA[A_tile[threadIdx.x][myidx + i]] * tbl_floatsB[B_tile[Bcol][myidx + i]]); } } } __global__ void MatMul(float* A, float* B, float* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) { float CValue = 0; int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x]; else As[threadIdx.y][threadIdx.x] = 0.0; if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else Bs[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue; } static __device__ void saxpy(float alpha, const float* b, float* c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } __global__ void sgemm_kernel_N_N_64_16_16_16_4(float* C,const float* A,const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { __shared__ float Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int ibx = blockIdx.x * 64; int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; /* Taking care of invalid memory access in dimension M */ if ( ibx+idt >= m ) A += ibx+0; else A += ibx + idt; C += ibx + idt + __mul24(iby, ldc); B += tx+__mul24(iby, ldb); /* These variables guide the threads to avoid invalid memory accesses in dimension N. Simply it's the stopping criterion. or you can say that access index wraps around to a valid memory location. */ int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb; if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; } if ( s1 == 0 ) B += __mul24(ty, ldb); else s1=0; const float *Bend = B + k - k % 16; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 15 ) { do { float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); A += 4 * lda; saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[12][0], Cb ); saxpy( Ab[1], &Bb[13][0], Cb ); saxpy( Ab[2], &Bb[14][0], Cb ); saxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); } /* Common sub expression elimination. */ ibx = ibx + idt - m; /* remembering k dimension */ ldb = m = k; /* k changed to support the generic case and reuse valuable registers */ k = k % 16; m -= k; /* Here we are taking care of k % dim_k portions */ if ( k != 0 ) { /* Avoid Invalid Memory access in dimension K If some thread enters this if ( ) block first access to B should be valid as K isn't divisible by blk_K Note that dimension N has been taken care of by s1, s2, s3, s4 But depending upon K and thread index tx, some memory access may be still invalid, so take care of them now by setting s1, s2, s3, s4 = 0 B might have been advanced in the previous loop, take care of that, this is about right bottom corner. */ if ( m + tx >= ldb ) { s1 = s2 = s3 = s4 = 0; B -= tx; } Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); for(int i=0; i < k; i++) { saxpy( A[0], &Bb[i+0][0], Cb ); A += lda; } } /* Now taking care of dimension M, N that doesnt fit into blocks */ if ( (iby+16) >= n ) { lda = n - iby; } else { lda = 16; } if ( ibx >= 0 ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } __global__ void sgemmNN( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta ) { const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int id = inx + iny*16; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const float *Blast = B + k; float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; __shared__ float bs[16][17]; do { #pragma unroll for( int i = 0; i < 16; i += 4 ) bs[inx][iny+i] = B[i*ldb]; __syncthreads(); #pragma unroll for( int i = 0; i < 16; i++, A += lda ) saxpy( A[0], &bs[i][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++, C += ldc ) C[0] = alpha*c[i] + beta*C[0]; } __global__ void sgemm_kernel_N_T_64_16_4_16_4(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; if ( iby + tx >= n ) B += iby + 0; else B += iby + tx; /* Taking care of boundary cases where K < 4. */ if ( ty >= k ) B += __mul24( 0, ldb ); else B += __mul24( ty, ldb ); if ( ibx + idt >= m ) A += ibx + 0; else A += ibx + idt; int s2=lda, s3=2*lda, s4=3*lda; switch (k) { case 1: s2=0; s3=0; s4=0; break; case 2: s2=lda; s3=0; s4=0; break; case 3: s2=lda; s3=2*lda; s4=0; break; } C += ibx + idt + __mul24( iby, ldc ); float Ap[4] = { A[0], A[s2], A[s3], A[s4] }; float b = B[0]; const float *Bend = B + ldb*(k - k % 4); B += 4*ldb; A += 4*lda; __shared__ float Bb[4][16]; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 7 ) { do { float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]}; Bb[ty][tx]=b; __syncthreads(); Ap[0] = A[0]; Ap[1] = A[s2]; Ap[2] = A[s3]; Ap[3] = A[s4]; b=B[0]; saxpy( Ab[0], &Bb[0][0], Cb ); saxpy( Ab[1], &Bb[1][0], Cb ); saxpy( Ab[2], &Bb[2][0], Cb ); saxpy( Ab[3], &Bb[3][0], Cb ); A += 4*lda; B += 4*ldb; __syncthreads(); } while (B < Bend); } if ( k > 3 ) { Bb[ty][tx]=b; int k1 = k - k % 4; if ( (k1+ty) >= k ) B -= 4*ldb; else B -= 0*ldb; if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; } __syncthreads(); b=B[0]; saxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0]; saxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2]; saxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3]; saxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4]; } k = k % 4; if ( k != 0 ) { __syncthreads(); Bb[ty][tx]=b; __syncthreads(); for(int i=0; i < k; i++) { saxpy( Ap[i], &Bb[i][0], Cb ); } } if ( (iby+16)>=n) { lda = n-iby; } else{ lda = 16; } if ( (ibx+idt) >= m ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } __global__ void sgemm_kernel_T_N_32_32_8_8_8(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { const int ibx = blockIdx.x * 32; const int iby = blockIdx.y * 32; const int tx = threadIdx.y; const int ty = threadIdx.x; int idt = tx*8 + ty; if ( ty >= k ) A += __mul24(ibx, lda) + 0; else A += __mul24(ibx, lda) + ty; if ( (ibx + tx) >= m ) A += __mul24(0, lda); else A += __mul24(tx, lda); if ( (iby+tx) >= n ) B += __mul24(iby+0, ldb); else B += __mul24(iby+tx, ldb); if ( ty >= k ) B += 0; else B += ty; C += ibx + idt % 32 + __mul24( iby + 16*(idt/32), ldc ); lda = lda * 8; ldb = ldb * 8; int as1=0, as2=lda, as3=2*lda, as4=3*lda; int bs1=0, bs2=ldb, bs3=2*ldb, bs4=3*ldb; switch(k) { case 1: as2=0; as3=0*lda; as4=0; bs2=0; bs3=0*ldb; bs4=0; break; case 2: as2=lda; as3=0*lda; as4=0; bs2=ldb; bs3=0*ldb; bs4=0; break; case 3: as2=lda; as3=2*lda; as4=0; bs2=ldb; bs3=2*ldb; bs4=0; break; } if ( (ibx + tx ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else if ( (ibx + tx + 8 ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else if ( (ibx + tx + 16) >= m ) { as1=0; as2=1*lda; as3=0*lda; as4=0*lda; } else if ( (ibx + tx + 24) >= m ) { as1=0; as2=1*lda; as3=2*lda; as4=0*lda; } if ( (iby + tx ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else if ( (iby + tx + 8 ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else if ( (iby + tx + 16) >= n ) { bs1=0; bs2=1*ldb; bs3=0*ldb; bs4=0*ldb; } else if ( (iby + tx + 24) >= n ) { bs1=0; bs2=1*ldb; bs3=2*ldb; bs4=0*ldb; } float b = B[bs1]; float b1 = B[bs2]; float b2 = B[bs3]; float b3 = B[bs4]; float Ap[4] = { A[as1], A[as2], A[as3], A[as4] }; const float *Bend = B + (k - k % 8); B += 8; A += 8; __shared__ float Bb[8][33]; __shared__ float ABb[32][9]; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; const int l = 17*(idt/32); int idt1 = idt; idt = idt % 32; if ( k > 15 ) { do { Bb[ty][tx ] = b; Bb[ty][tx+8 ] = b1; Bb[ty][tx+17] = b2; Bb[ty][tx+25] = b3; ABb[tx ][ty] = Ap[0]; ABb[tx+8 ][ty] = Ap[1]; ABb[tx+16][ty] = Ap[2]; ABb[tx+24][ty] = Ap[3]; __syncthreads(); saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1]; saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2]; saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3]; saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4]; saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1]; saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2]; saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3]; saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4]; B += 8; A += 8; __syncthreads(); } while (B < Bend); } if ( k > 7 ) { Bb[ty][tx ] = b; Bb[ty][tx+8 ] = b1; Bb[ty][tx+17] = b2; Bb[ty][tx+25] = b3; ABb[tx ][ty] = Ap[0]; ABb[tx+8 ][ty] = Ap[1]; ABb[tx+16][ty] = Ap[2]; ABb[tx+24][ty] = Ap[3]; __syncthreads(); as1 = k - k % 8; if ( as1+ty >= k ) { bs1=0*ldb; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; B -= 8; } if ( as1+ty >= k ) { as1=0*lda; as2=0*lda; as3=0*lda; as4=0*lda; A -= 8; } as1=0; saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1]; saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2]; saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3]; saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4]; saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1]; saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2]; saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3]; saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4]; } k = k % 8; if ( k != 0 ) { __syncthreads(); Bb[ty][tx ] = b; Bb[ty][tx+8 ] = b1; Bb[ty][tx+17] = b2; Bb[ty][tx+25] = b3; ABb[tx ][ty] = Ap[0]; ABb[tx+8 ][ty] = Ap[1]; ABb[tx+16][ty] = Ap[2]; ABb[tx+24][ty] = Ap[3]; __syncthreads(); for(int i=0; i < k; i++) { saxpy( ABb[idt][i], &Bb[i][l], Cb ); } } if ( (iby+16*(idt1/32+1)) >= n ) { lda = n - iby - 16*(idt1/32); } else { lda = 16; } if ( (ibx+idt) >= m ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } }
87a74e1c8c82f4b4d0879873dbd5c09c3d5e6ad3.cu
#include <basicOps.cuh> #include <curand.h> #include <curand_kernel.h> #include <float.h> const int NUM_THREADS = 32; __global__ void kGetNonZeroElements(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) atomicAdd(&out[0],A[i] != 0.0f ? 1.0f : 0.0f); } __global__ void kGetNonZeroColumns(float *A, float *out, int rows, int cols) { const int myCol = (blockIdx.x * blockDim.x) + threadIdx.x; float result = 0.0f; if(myCol < cols) { for (unsigned int i = 0;i < rows; i++) { if(A[(myCol*rows) + i] != 0.0f) result = 1.0f; } atomicAdd(&out[0],result); } } __global__ void kRenormalizeWeights(float *w, float *unit_sums, float limit, int rows, int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const int size = rows*cols; int myCol = 0; float rel_diff = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { myCol = i/rows; if(unit_sums[myCol] > limit) { rel_diff = 1.0f/unit_sums[myCol]; w[i] *= rel_diff; } else{ continue; } } } __global__ void kFill_with(float *m, float fill_value, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) m[i] = fill_value; } __global__ void kFill_with(int *m, int fill_value, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) m[i] = fill_value; } __global__ void kRdmNumbers(float *seed, int size, float *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned long long s[ 2 ]; //s[0] = (long long)seed[(gridDim.x*blockIdx.x) + threadIdx.x]; //s[1] = (long long)seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x]; s[0] = 17; s[1] = 83; unsigned long long s1 = s[ 0 ]; unsigned long long s0 = s[ 1 ]; unsigned long long rdm64 = 23459867034598355; if(idx == 0) { printf("rdm: %i\n", rdm64); printf("rdm1: %i\n", (unsigned int)(rdm64&0xffffffff)); printf("rdm2: %i\n", (unsigned int)((rdm64>>32)&0xffffffff)); } unsigned int rdm32_1 = 0; unsigned int rdm32_2 = 0; //printf("seed 1: %i\n", seed[(gridDim.x*blockIdx.x) + threadIdx.x]); //printf("seed 2: %i\n", seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x]); //printf("idx: %i\n", idx); for(int i = idx*2; i < size; i+=numThreads*2) { s1 = s[0]; s0 = s[1]; s[0] = s0; s1 ^= s1 << 23; // a rdm64 = (s[1 ] = (s1 ^ s0 ^ (s1 >> 17) ^ (s0 >> 26))) + s0; // b, c rdm32_1 = (rdm64&0xffffffff); rdm32_2 = ((rdm64>>32)&0xffffffff); out[i] = rdm32_1; out[i+1] = rdm32_2; } seed[(gridDim.x*blockIdx.x) + threadIdx.x] = s[0]; seed[(gridDim.x*(blockIdx.x+1)) + threadIdx.x] = s[1]; } __global__ void kCreateRdmSqrtWeight_Logistic(float *A, int in, int out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const float lower_limit = -4.0f*sqrtf(6.0f/((float)in + out)); const float upper_limit = 4.0f*sqrtf(6.0f/((float)in + out)); const float range = upper_limit-lower_limit; for (unsigned int i = idx;i < size; i += numThreads) { A[i] = lower_limit + (A[i]*range); } } __global__ void kCreateSparseRdmWeight(float *rdm, float* indicies, float *out, int rows, int cols, int connections) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int connection_idx = 0; float rdm_value = 0.0f; int size = connections*cols; int current_col = 0; //each thread fills one row for (unsigned int i = idx; i < size; i += numThreads) { connection_idx = (int)indicies[i]; rdm_value = rdm[i]; current_col = i/(connections); out[(current_col*rows)+connection_idx] = rdm_value; } } __global__ void kRandInt(float *A, int lower_limit, int upper_limit, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const int range = upper_limit-lower_limit + 1; for (unsigned int i = idx;i < size; i += numThreads) { //use uniform random sample to get integers A[i] = (float)(((int)((A[i]*range))) + lower_limit); } } //vertical stack for column major format __global__ void vStack(float *A, float *B, float *out, int size_out, int rows_a, int rows, int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_col = 0; int current_row = 0; int offset = 0; const int rows_b = rows - rows_a; for (unsigned int i = idx;i < size_out; i += numThreads) { current_col = i / rows; //int arithmetic offset = (current_col*rows); current_row = i - offset; if(current_row >= rows_a) { //fetch b value out[i] = B[(current_col*rows_b) + current_row - rows_a]; } else { //fetch a value out[i] = A[(current_col*rows_a) + current_row]; } } } __global__ void hStack(float *A, float *B, float *out, int size_out, int size_a) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for(unsigned int i = idx; i < size_out; i+=numThreads) { if(i >= size_a) { //append B out[i] = B[i - size_a]; } else { //append A out[i] = A[i]; } } } __global__ void hStackN(float **arrA, int general_size, float *out, int size_out, int matrices_count) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_matrix = 0; for(unsigned int i = idx; i < size_out; i+=numThreads) { current_matrix = i / general_size; current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix; out[i] = arrA[current_matrix][i - (current_matrix*general_size)]; } } __global__ void vStackN(float **arrA, float *out, int rows, int cols) { int size = rows*cols; int offset = rows*cols*blockIdx.x; for(unsigned int i = threadIdx.x; i < size; i+=blockDim.x) out[offset + i] = arrA[blockIdx.x][i]; } __global__ void AddGradientsN(float **arrA, int size, int myrank, int matrix_count, float multiplier) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for(int matrix_idx = 0; matrix_idx < matrix_count; matrix_idx++) { if(matrix_idx == myrank){ continue; } for(unsigned int i = idx; i < size; i+=numThreads) arrA[myrank][i] += arrA[matrix_idx][i]; } //better numerical stability to do it afterwards for(unsigned int i = idx; i < size; i+=numThreads) arrA[myrank][i] *=multiplier; } __global__ void hStackN(Matrix **arrA, int general_size, float *out, int size_out, int matrices_count) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_matrix = 0; for(unsigned int i = idx; i < size_out; i+=numThreads) { current_matrix = i / general_size; current_matrix = current_matrix == matrices_count ? current_matrix - 1 : current_matrix; out[i] = arrA[current_matrix]->data[i - (current_matrix*general_size)]; } } __global__ void kAdd_to_z(float *z, float *z1, float *y, float *y_count, int rows, int cols, float *out) { float value = 0; for(int row = blockIdx.x; row < rows; row +=gridDim.x) { int cls = (int)y[row]; if(threadIdx.x == 0) atomicAdd(&y_count[cls],1.0f); for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x) { value = z1[row + (col*rows)]; atomicAdd(&out[cls+(col*rows)],value); } } __syncthreads(); for(int row = blockIdx.x; row < rows; row +=gridDim.x) { int cls = (int)y[row]; for (unsigned int col = threadIdx.x; col < cols; col += blockDim.x) { if(y_count[cls] > 0) out[cls+(col*rows)] /= y_count[cls]; } } } __global__ void kAdd(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] + B[i]; } __global__ void kMul(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] * B[i]; } __global__ void kSub(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] - B[i]; } __global__ void kSub_Sparse(float *A, float *data, int *ptr_rows, int *idx_cols, float *out, int rows, int cols, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int row_idx = 0; for (unsigned int i = idx;i < rows*cols; i += numThreads) out[i] = A[i]; for (unsigned int i = idx;i < size; i += numThreads) { for(int j = 0; j < rows + 1; j++) { if(ptr_rows[j] > i) { row_idx = j-1; break; } } out[(idx_cols[i] * rows) + row_idx] = A[(idx_cols[i] * rows) + row_idx] - data[i]; } } __global__ void kDiv(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = fdividef(A[i],B[i]); } __global__ void kExp(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = expf(A[i]); } __global__ void kLogistic(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = 1.0f / (1.0 + expf(-A[i])); } __global__ void kLogisticGrad(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i]*(1 - A[i]); } __global__ void kSqrt(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = sqrtf(A[i]); } __global__ void kLog(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = logf(A[i]); } __global__ void kSquare(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = powf(A[i], 2.0f); } __global__ void kAbs(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = fabsf(A[i]); } __global__ void kScalarMul(float *A, float scalar, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = scalar*A[i]; } __global__ void kScalarAdd(float *A, float scalar, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i]+scalar; } __global__ void kTranspose(float *A, float *out, int width, int height) { __shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1]; // read the Matrix *tile into shared memory unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = A[index_in]; } __syncthreads(); // write the transposed Matrix *tile to global memory xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; out[index_out] = block[threadIdx.x][threadIdx.y]; } } //for column major data __global__ void slice_rows(float *A, float *out, int size_out, int rows_A, int start, int end) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int current_col = 0; int current_row = 0; int offset = 0; int rows_out = (end - start) + 1; for (unsigned int i = idx;i < size_out; i += numThreads) { current_col = i / rows_out; //note: int arithmetic current_row = i - (current_col*rows_out); offset = rows_A*current_col; out[i] = A[offset + start + current_row]; } } //for column major data __global__ void slice_cols(float *A, float *out, int start, int rows, int size_out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx; i < size_out; i += numThreads) { out[i] = A[i+(start*rows)]; } } __device__ void reduceToMax(float* sdata, unsigned int tid) { //Synchronize threads to share shared memory data __syncthreads(); float mySum = sdata[tid]; // do reduction in shared mem if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 256]); } __syncthreads(); } if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 128]); } __syncthreads(); } if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = fmaxf(mySum, sdata[tid + 64]); } __syncthreads(); } if (NUM_THREADS == 32){ if (tid < 16) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); } if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); } if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); } if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); } if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); } } } else { if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 64) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 32]); } if (NUM_THREADS >= 32) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 16]); } if (NUM_THREADS >= 16) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 8]); } if (NUM_THREADS >= 8) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 4]); } if (NUM_THREADS >= 4) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 2]); } if (NUM_THREADS >= 2) { smem[tid] = mySum = fmaxf(mySum, smem[tid + 1]); } } } } __device__ void reduceToMaxAndArgMax(float* sdataMax, float* sdataArgMax, unsigned int tid, int threads) { //Synchronize threads to share shared memory data __syncthreads(); float mySum = sdataMax[tid]; if(threads == 32) { if (tid < 16) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smemMax = sdataMax; volatile float* smemArgMax = sdataArgMax; if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; } if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; } if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; } if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; } if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; } } } else { if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smemMax = sdataMax; volatile float* smemArgMax = sdataArgMax; if (NUM_THREADS >= 64) if(mySum < smemMax[tid + 32]){smemMax[tid] = mySum = smemMax[tid + 32]; smemArgMax[tid] = smemArgMax[tid + 32]; } if (NUM_THREADS >= 32) if(mySum < smemMax[tid + 16]){smemMax[tid] = mySum = smemMax[tid + 16]; smemArgMax[tid] = smemArgMax[tid + 16]; } if (NUM_THREADS >= 16) if(mySum < smemMax[tid + 8]){smemMax[tid] = mySum = smemMax[tid + 8]; smemArgMax[tid] = smemArgMax[tid + 8]; } if (NUM_THREADS >= 8) if(mySum < smemMax[tid + 4]){smemMax[tid] = mySum = smemMax[tid + 4]; smemArgMax[tid] = smemArgMax[tid + 4]; } if (NUM_THREADS >= 4) if(mySum < smemMax[tid + 2]){smemMax[tid] = mySum = smemMax[tid + 2]; smemArgMax[tid] = smemArgMax[tid + 2]; } if (NUM_THREADS >= 2) if(mySum < smemMax[tid + 1]){smemMax[tid] = mySum = smemMax[tid + 1]; smemArgMax[tid] = smemArgMax[tid + 1]; } } } } __device__ void reduceToSumLocal(float* sdata, unsigned int tid) { //Synchronize threads to share shared memory data __syncthreads(); float mySum = sdata[tid]; // do reduction in shared mem if (NUM_THREADS >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (NUM_THREADS >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (NUM_THREADS >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (NUM_THREADS == 32){ if (tid < 16) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } } else { if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (NUM_THREADS >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (NUM_THREADS >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (NUM_THREADS >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (NUM_THREADS >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (NUM_THREADS >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (NUM_THREADS >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } } } __global__ void kSoftMax(float* A, float* out, unsigned int rows, unsigned int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float col_value = 0.0f; __shared__ float max_values[THREADS_PER_BLOCKS]; __shared__ float row_sums[THREADS_PER_BLOCKS]; for (unsigned int row = idx; row < rows; row += numThreads) { //fill with min values max_values[idx] = -FLT_MAX; row_sums[idx] = 0.0f; //calc max value of the row for (unsigned int i = 0; i < cols; i++) { col_value = A[(i*rows) + row]; if(col_value > max_values[idx]) { max_values[idx] = col_value; } } //calc the row sum for (unsigned int i = 0; i < cols; i++) { row_sums[idx] += __expf(A[(i*rows) + row] - max_values[idx]); } //calc the value of each element in the row for (unsigned int i = 0; i < cols; i++) { out[(i*rows) + row] = __expf(A[(i*rows) + row] - max_values[idx])/row_sums[idx]; } } } //for column major data __global__ void kSubMatrixVector(float *A, float *v, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows)*rows; //note: int arithmetic out[i] = A[i] - v[i - offset]; } } //for column major data __global__ void kAddMatrixVector(float *A, float *v, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows); //note: int arithmetic out[i] = A[i] + v[offset]; } } //for column major data __global__ void kAddScaledMatrixVector(float *A, float *v, float weight, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows); //note: int arithmetic out[i] = A[i] + (v[offset]*weight); } } //for column major data __global__ void kMulMatrixVector(float *A, float *v, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; //offset = current_column * rows int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i / rows); //note: int arithmetic out[i] = A[i] * v[offset]; } } __global__ void kArgmax(float* A, float* out, unsigned int rows, unsigned int cols) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float max_value = -FLT_MAX; float max_i = 0; float col_value = 0.0f; for (unsigned int row = idx; row < rows; row += numThreads) { for (unsigned int i = 0; i < cols; i++) { col_value = A[(i*rows) + row]; if(col_value > max_value) { max_value = col_value; max_i = i; } } out[row] = max_i; } } __global__ void kCreate_t_matrix(float *labels, float *out, int rows, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int label = 0; int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { label = (int)(labels[i]); //offset = (label*rows) gives the current column; i gives the current row offset = (label*rows) + i; out[offset] = 1.0f; } } __global__ void kEqual(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) { out[i] = (float)(A[i] == B[i]); } } __global__ void kRectifiedLinear(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] > 0.0f ? A[i] : 0.0f; } __global__ void kRectifiedLinear_Derivative(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i] > 0.0f ? 1.0f : 0.0f; } __global__ void kDoubleRectifiedLinear(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { value = (A[i] > 0.0f) ? A[i] : 0.0f; out[i] = (value < 1.0f) ? value : 1.0f; } } __global__ void kLinear(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = A[i]; } __global__ void kDoubleRectifiedLinear_Derivative(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) { out[i] = (A[i] <= 0.0f) || (A[i] >=1.0f) ? 0.0f : 1.0f; } } __global__ void kHardTanH(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { value = (A[i] > 1.0f) ? A[i] : 1.0f; out[i] = (value < -1.0f) ? value : -1.0f; } } __global__ void kPairwise_ranking(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { value = 1.0f - A[i] + B[i]; out[i] = value < 0.0f ? 0.0f : value; } } __global__ void kPairwise_ranking_derivative(float *A, float *B, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = (1.0f - A[i] + B[i]) > 0.0f ? 1.0f : 0.0f; } __global__ void kHardTanH_Derivative(float *A, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = (A[i] < -1.0f) || (A[i] >1.0f) ? 0.0f : 1.0f; } __global__ void kSquaredError(float *A, float *t, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) out[i] = powf(A[i] -t[i],2.0f); } __global__ void kSum(float *v, float *out, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; out[0] = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { atomicAdd(&out[0],v[i]); } } __global__ void kArange(float *out, int start, int rows, int cols, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { offset = (i % rows)*cols; out[i] = (float)(offset + (i/rows) + start); } } __global__ void kDropout(float *A, float *rdm, float dropout, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; for (unsigned int i = idx;i < size; i += numThreads) rdm[i] = rdm[i] > dropout ? A[i] : 0.0f; } __global__ void kDropout_cached(float *A, float *dropout, float *out, int current_idx, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = ((blockIdx.x * blockDim.x) + threadIdx.x); int shifted_idx = 0; int offset = 0; for (unsigned int i = idx;i < size; i += numThreads) { shifted_idx = i +current_idx; offset = shifted_idx/10000; out[i] = dropout[shifted_idx - (offset*10000)] == 1.0f ? A[i] : 0.0f; } } __global__ void kRMSprop(float *RMS, float *grad, float RMS_multiplier, float learning_rate, int batch_size, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; } } __global__ void kRMSprop_with_momentum_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; float momentum_matrix_value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); momentum_matrix_value = m[i]; momentum_matrix_value -= grad_value; RMS[i] = RMS_value; m[i] = momentum_matrix_value; } } __global__ void kLocalGrad (float *z, float *w, float *y, float *m, float learning_rate, int batch_size, int size, float momentum) { } __global__ void kRMSprop_with_momentum_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; float momentum_matrix_value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); momentum_matrix_value = m[i] = (momentum*momentum_matrix_value) - grad_value; RMS[i] = RMS_value; w[i] += momentum_matrix_value; } } __global__ void kRMSprop_with_nesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); m[i] = (momentum*m[i]) - (learning_rate*grad_value); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; w[i] -= grad_value; /* grad_value = learning_rate*fdividef(grad[i],(float)batch_size); m[i] = (momentum*m[i]) - grad_value; w[i] -= grad_value; */ } } __global__ void kNesterov_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = learning_rate*fdividef(grad[i],(float)batch_size); m[i] = (momentum*m[i]) - grad_value; w[i] -= grad_value; } } __global__ void kCompression_8bit_test(float *tbl, float *A, float precision, int size, float *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float absnumber = 0.0; float multiplier = 0.1f/precision; float threshold = precision/1.e6f; __shared__ float tbl_values[128]; if(threadIdx.x < 126) tbl_values[threadIdx.x] = tbl[threadIdx.x]; __syncthreads(); for (int i = idx;i < size; i += numThreads) { int isNegative = 0; int pivot = 63; int upper_pivot = 125; int lower_pivot = 0; absnumber = A[i]*multiplier; if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; } if(absnumber < threshold){ out[i] = 0.0f; continue; } for(int j = 32; j > 0; j>>=1) { if(absnumber > tbl_values[pivot]) { lower_pivot = pivot; pivot+=j; } else { upper_pivot = pivot; pivot-=j; } } if(lower_pivot == pivot) if(fabsf(tbl_values[pivot]-absnumber) < (tbl_values[upper_pivot]-absnumber)) out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier); else out[i] = tbl_values[upper_pivot]/(isNegative == 1 ? -multiplier : multiplier); else if((tbl_values[pivot]-absnumber) < fabsf(tbl_values[lower_pivot]-absnumber)) out[i] = tbl_values[pivot]/(isNegative == 1 ? -multiplier : multiplier); else out[i] = tbl_values[lower_pivot]/(isNegative == 1 ? -multiplier : multiplier); } } __global__ void kDecompression_8bit(float *flt_tbl, unsigned char *A, float precision, int size, float *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; __shared__ float tbl_floats[256]; if(threadIdx.x < 126) { tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x]*precision; tbl_floats[threadIdx.x+128] = -tbl_floats[threadIdx.x]; } tbl_floats[126] = 0.0f; tbl_floats[254] = -0.0f; tbl_floats[127] = precision; tbl_floats[255] = -precision; __syncthreads(); for (int i = idx;i < size; i += numThreads) { out[i] = tbl_floats[A[i]]; } } __global__ void kCompression_8bit(float *flt_tbl, float *A, float precision, int size, unsigned char *out) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float absnumber = 0.0f; float threshold_lower = 0.0000015; float threshold_upper = 0.995703; int isNegative = 0; int pivot = 63; int upper_pivot = 125; int lower_pivot = 0; __shared__ float tbl_floats[128]; if(threadIdx.x < 126) tbl_floats[threadIdx.x] = flt_tbl[threadIdx.x]; __syncthreads(); for (int i = idx;i < size; i += numThreads) { isNegative = 0; pivot = 63; upper_pivot = 125; lower_pivot = 0; absnumber = A[i]/precision; if(absnumber < 0.0f){isNegative = 1; absnumber=-absnumber; } if(absnumber < threshold_lower){ out[i] = (unsigned char)126; continue; } if(absnumber > threshold_upper){ out[i] = (isNegative == 0 ? (unsigned char)127 : (unsigned char)255); continue; } for(int j = 32; j > 0; j>>=1) { if(absnumber > tbl_floats[pivot]) { lower_pivot = pivot; pivot+=j; } else { upper_pivot = pivot; pivot-=j; } } if(lower_pivot == pivot) if(fabsf(tbl_floats[pivot]-absnumber) < (tbl_floats[upper_pivot]-absnumber)) if(isNegative == 1) out[i] = pivot | 1 << 7; else out[i] = pivot; else if(isNegative == 1) out[i] = upper_pivot | 1 << 7; else out[i] = upper_pivot; else if((tbl_floats[pivot]-absnumber) < fabsf(tbl_floats[lower_pivot]-absnumber)) if(isNegative == 1) out[i] = (pivot | 1 << 7); else out[i] = pivot; else if(isNegative == 1) out[i] = lower_pivot | 1 << 7; else out[i] = lower_pivot; } } __global__ void kRMSprop_with_weight_update (float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size) ; RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad_value = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; w[i] -= grad_value; } } __global__ void kRMSprop_with_weight_update_8bit(float *RMS, float *grad, float *w, float *m, float RMS_multiplier, float learning_rate, int batch_size, int size, float momentum) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; float grad_value = 0.0f; float RMS_value = 0.0f; float rms_reciprocal = 1.0f - RMS_multiplier; for (unsigned int i = idx;i < size; i += numThreads) { grad_value = fdividef(grad[i],(float)batch_size); RMS_value = (RMS_multiplier*RMS[i]) + (powf(grad_value,2.0f)*rms_reciprocal); grad[i] = learning_rate*fdividef(grad_value,(sqrtf(RMS_value)+1.0e-08f)); RMS[i] = RMS_value; } } __global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < m && col < n) { /* for(int i = 0; i < indptr[m+1];i++) if(indices[i] > 23) { printf("ERROR: \n"); printf("%i \n", indices[i]); printf("col: %i \n", col); printf("row: %i \n", row); } */ int max_idx = indptr[m+1]; for(int i = 0; i < m+1;i++) if(indptr[i] > max_idx) { printf("ERROR: \n"); printf("%i \n", indptr[i]); printf("max_idx: %i \n", max_idx); } const int start = indptr[row]; const int end = indptr[row + 1]; float sum = 0.f; for (int i = start; i < end; i++) { /* for(int a = start; a < end;a++) if(indices[a] > 23) { printf("ERROR: \n"); printf("%i \n", indices[a]); printf("a: %i \n", a); } */ sum += data[i] * dense_data[(col * k) + indices[i]]; if(sum > 500000 || sum < -500000) { printf("start: %i ", start); printf("end: %i ", end); printf("i: %i ", i); printf("k: %i ", k); printf("col: %i ", col); printf("data idx %i ", indices[i]); printf("full idx %i ", (col * k) + indices[i]); printf("data sparse %f ", data[i]); printf("data dense %f ", dense_data[col * k + indices[i]]); printf("data point %f ", data[i] * dense_data[col * k + indices[i]]); printf(" sum %f\n", sum); return; } } const int pos = col * m + row; target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]); } } __global__ void kPrintData(float *A, int size) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); if(idx == 0) printf("["); for (unsigned int i = idx;i < size; i += numThreads) printf("%f ",A[i]); __syncthreads(); if(idx == 0) printf("]\n"); } __global__ void kMaxout(float *A, float *out, float *outargmax, int maxout_level, unsigned int cols, unsigned int rows) { __shared__ float max_values[32]; __shared__ float argmax_values[32]; float const min_value = -FLT_MAX; for(int row = blockIdx.x; row < rows; row +=blockDim.x) { int softout_block_idx = row + (blockIdx.y*maxout_level*rows); if(threadIdx.x < maxout_level) { max_values[threadIdx.x] = A[softout_block_idx+(threadIdx.x*rows)]; argmax_values[threadIdx.x] = (float)((blockIdx.y*maxout_level)+threadIdx.x); } else { max_values[threadIdx.x] = min_value; argmax_values[threadIdx.x] = -1.0f; } //reduceToMax(max_values, threadIdx.x); reduceToMaxAndArgMax(max_values, argmax_values, threadIdx.x, 32); __syncthreads(); if(threadIdx.x == 0) out[row + (blockIdx.y*rows)] = max_values[0]; if(threadIdx.x == 1) outargmax[row + (blockIdx.y*rows)] = argmax_values[0]; } } __global__ void kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { extern __shared__ float max_vals[]; float cur_max = -FLT_MAX; float val = 0; const int column = gridDim.x * blockIdx.y + blockIdx.x; if (column < width) { float *cur_data = &mat[column * height] ; for (unsigned int i = threadIdx.x; i < height; i += blockDim.x) { val = cur_data[i]; if (val > cur_max) cur_max = val; } max_vals[threadIdx.x] = cur_max; reduceToMax(max_vals, threadIdx.x); __syncthreads(); if (threadIdx.x == 0) target[column] = max_vals[0]; } } __global__ void kExpandToMaxoutGrad(float* error, float* indexes, float *out, int error_size, int error_rows, int maxout_level) { const unsigned int numThreads = blockDim.x * gridDim.x; const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const int grad_size = maxout_level*error_size; for (unsigned int i = idx;i < grad_size; i += numThreads) out[i] = 0.0f; for (unsigned int i = idx;i < error_size; i += numThreads) { int row_idx = idx - ((idx / error_rows)*error_rows); out[row_idx + (((int)indexes[idx])*error_rows)] = error[i]; } } __global__ void kConstructVocabMatrix(float *vocab_idx, float *vocab_idx_y, float* vocab, float *rdm_idx, float *batch_X, float *batch_Y) { int middleIdx = (gridDim.y/2); int myIdx = 0; int myRdmIdx = 0; //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y //middle index is replaced by rdm word for batch_Y, but we still need to write the correct word into batch_X! if(blockIdx.y != middleIdx) { myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myIdx; } else { myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; myRdmIdx = (int)rdm_idx[blockIdx.x]; vocab_idx_y[blockIdx.x+(blockIdx.y*gridDim.x)] = (float)myRdmIdx; } int myVocabIdx = blockDim.x*myIdx; int myVocabRdmIdx = blockDim.x*myRdmIdx; if(blockIdx.y != middleIdx) { batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x]; batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x]; } else { batch_X[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabIdx + threadIdx.x]; batch_Y[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)] = vocab[myVocabRdmIdx + threadIdx.x]; } } __global__ void concat_batches(float **batch_X, float **batch_Y, float *out_X, float *out_Y) { //gridDim.z = matrix_count //gridDim.y = batch size //gridDim.x = window_size //blockDim.x = partial vocab size int full_vocab_size = gridDim.z*blockDim.x; int cols = gridDim.x*full_vocab_size; int partial_cols = blockDim.x*gridDim.x; //full_size times current row = current row idx //current window position times partial_threads times current matrix = current word idx //threadIdx.x current parameter within a word out_X[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_X[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x]; out_Y[(blockIdx.y *cols) + (blockIdx.x*full_vocab_size) + (blockIdx.z*blockDim.x) +threadIdx.x] = batch_Y[blockIdx.z][(blockIdx.y *partial_cols) + (blockIdx.x*blockDim.x) + threadIdx.x]; } /* //numerically unstable? __global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int myIdx = 0; float multiplier = -fdividef(learning_rate,float(gridDim.x)); myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; //printf("%f ",grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier); //printf("%f ",vocab[myVocabIdx + threadIdx.x]); //printf("%f ",vocab[myVocabIdx + threadIdx.x]+ (grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier)); if(myIdx > 10000) atomicAdd(&vocab[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier); //vocab[myVocabIdx + threadIdx.x] +=grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]; //printf("%s ",!isfinite(grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*multiplier)); } */ //numerically unstable? __global__ void kUpdateVocabWithGradient(float *grad, float *vocab_idx, float* vocab, float learning_rate) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab[myVocabIdx + threadIdx.x],-grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]*learning_rate); } __global__ void kExpandDoubleVocabGradient(float *gradX, float *gradY, float *vocab_idx_X, float *vocab_idx_Y, float* vocab, float *vocab_grad, float *vocab_grad_idx, float learning_rate, int grad_size) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y //float multiplier = fdividef(learning_rate,(float)(gridDim.x*2)); int myIdx_X = (int)vocab_idx_X[blockIdx.x+(blockIdx.y*gridDim.x)]; int myIdx_Y = (int)vocab_idx_Y[blockIdx.x+(blockIdx.y*gridDim.x)]; //int grad_cols = grad_size/blockDim.x; int myVocabIdx_X = blockDim.x*myIdx_X; int myVocabIdx_Y = blockDim.x*myIdx_Y; atomicAdd(&vocab_grad[myVocabIdx_X + threadIdx.x],gradX[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); atomicAdd(&vocab_grad[myVocabIdx_Y + threadIdx.x],gradY[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); /* vocab_grad_idx[myIdx_X] = 1.0f; vocab_grad_idx[myIdx_Y] = 1.0f; __syncthreads(); int block_idx = (blockIdx.y*gridDim.x) + blockIdx.x; int threads_blocks = gridDim.x*gridDim.y; for(int i = block_idx; i < grad_cols; i+=threads_blocks) { if(vocab_grad_idx[i] == 1.0f) { vocab[(i*blockDim.x) + threadIdx.x] -= vocab_grad[(i*blockDim.x) + threadIdx.x]*multiplier; } } */ } /* __global__ void kExpandVocabGradient_sharedMemory(float *grad, float *vocab_idx, float *vocab_grad, float *sorted_vocab_idx, vocab_idx_size) { //vocab_vector_size = blockDim.x; //batch_size = gridDim.x //try different configs for gridDim.x, e.g 16, 32 etc. //will have vocab_vector_size = blockDim.x elements e.g. 64 extern __shared__ float sGrads[]; float myWordIdx = 0.0f; float last_word = 0.0f; float currentIdx = 0.0f; sGrads[threadIdx.x] = 0.0f; for(int word = blockIdx.x; currentIdx < vocab_idx_size; word++) { for(int i = currentIdx; i < vocab_idx_size; i++, currentIdx++) { } } } */ __global__ void kExpandVocabGradient(float *grad, float *vocab_idx, float *vocab_grad) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); } __global__ void kExpandPartialVocabGradient(float *grad, float *vocab_idx, float *vocab_grad, int matrix_idx, int matrix_count) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y int offset = matrix_idx*gridDim.x*blockDim.x; int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*(blockDim.x*matrix_count)*gridDim.x) + (threadIdx.x*gridDim.x) + offset]); } __global__ void kExpandVocabGradientMiddleWord(float *grad, float *vocab_idx, float *vocab_grad) { //vocab_vector_size = blockDim.x; //vocab_idx_rows = batch_size = gridDim.x //vocab_idx_cols = window_size = gridDim.y if(blockIdx.x+(blockIdx.y*gridDim.x) == gridDim.y/2) { int myIdx = (int)vocab_idx[blockIdx.x+(blockIdx.y*gridDim.x)]; int myVocabIdx = blockDim.x*myIdx; atomicAdd(&vocab_grad[myVocabIdx + threadIdx.x],grad[blockIdx.x + (blockIdx.y*blockDim.x*gridDim.x) + (threadIdx.x*gridDim.x)]); } } __global__ void kDot8bit(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB) { const unsigned int threads_per_block = blockDim.x*blockDim.y; const int mygrid = blockIdx.x; const int myidx = (threadIdx.y*blockDim.x)+threadIdx.x; __shared__ float tbl_floatsA[256]; __shared__ float tbl_floatsB[256]; for(int i = myidx; i < 126; i++) { tbl_floatsA[i] = flt_tbl[i]*precisionA; tbl_floatsA[i+128] = -tbl_floatsA[i]; tbl_floatsB[i] = flt_tbl[i]*precisionB; tbl_floatsB[i+128] = -tbl_floatsB[i]; } tbl_floatsA[126] = 0.0f; tbl_floatsB[126] = 0.0f; tbl_floatsA[127] = precisionA; tbl_floatsB[127] = -precisionA; tbl_floatsA[254] = -0.0f; tbl_floatsB[254] = -0.0f; tbl_floatsA[255] = precisionB; tbl_floatsB[255] = -precisionB; __syncthreads(); for(int Arow = mygrid; Arow < rowsA; Arow+=gridDim.x) { for(int Bcol = myidx; Bcol < colsB; Bcol+=threads_per_block) { int idxout = (Bcol*rowsA) + Arow; for(int Acol = 0; Acol < colsA; Acol++) out[idxout] += tbl_floatsA[A[(Acol*rowsA)+Arow]] * tbl_floatsB[B[(colsA*Bcol) + Acol]]; } } } __global__ void kDot8bit_shared(unsigned char *A, unsigned char *B, float *out, int rowsA, int colsA, int colsB, float *flt_tbl, float precisionA, float precisionB) { int myidx = (threadIdx.y*blockDim.x)+threadIdx.x; __shared__ unsigned char A_tile[64][256]; //64x32 banks __shared__ unsigned char B_tile[64][256];//256x8 banks __shared__ float tbl_floatsA[256]; __shared__ float tbl_floatsB[256]; for(int i = myidx; i < 126; i++) { tbl_floatsA[i] = flt_tbl[i]*precisionA; tbl_floatsA[i+128] = -tbl_floatsA[i]; tbl_floatsB[i] = flt_tbl[i]*precisionB; tbl_floatsB[i+128] = -tbl_floatsB[i]; } tbl_floatsA[126] = 0.0f; tbl_floatsB[126] = 0.0f; tbl_floatsA[127] = precisionA; tbl_floatsB[127] = -precisionA; tbl_floatsA[254] = -0.0f; tbl_floatsB[254] = -0.0f; tbl_floatsA[255] = precisionB; tbl_floatsB[255] = -precisionB; __syncthreads(); myidx = threadIdx.y*16; for(int Arow = threadIdx.x; Arow < rowsA; Arow+=64)//threadDim.x = 64 { for(int Acol = threadIdx.y*16; Acol < colsA; Acol+=256)//threadDim.y = 16 { for(int i = 0; i < 16; i++) A_tile[Arow][Acol+i] = A[((Acol+i)*rowsA)+ Arow]; for(int i = 0; i < 16; i++) B_tile[Arow][Acol+i] = B[(Arow*colsA)+ Acol+i];//B_tile is transposed to avoid bank conflicts with 64 threads __syncthreads(); for(int Bcol = 0; Bcol < 64; Bcol++) for (int i = 0; i < 16; ++i)// atomicAdd(&out[((Bcol)*rowsA) + Arow],tbl_floatsA[A_tile[threadIdx.x][myidx + i]] * tbl_floatsB[B_tile[Bcol][myidx + i]]); } } } __global__ void MatMul(float* A, float* B, float* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) { float CValue = 0; int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x]; else As[threadIdx.y][threadIdx.x] = 0.0; if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else Bs[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue; } static __device__ void saxpy(float alpha, const float* b, float* c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } __global__ void sgemm_kernel_N_N_64_16_16_16_4(float* C,const float* A,const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { __shared__ float Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int ibx = blockIdx.x * 64; int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; /* Taking care of invalid memory access in dimension M */ if ( ibx+idt >= m ) A += ibx+0; else A += ibx + idt; C += ibx + idt + __mul24(iby, ldc); B += tx+__mul24(iby, ldb); /* These variables guide the threads to avoid invalid memory accesses in dimension N. Simply it's the stopping criterion. or you can say that access index wraps around to a valid memory location. */ int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb; if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; } if ( s1 == 0 ) B += __mul24(ty, ldb); else s1=0; const float *Bend = B + k - k % 16; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 15 ) { do { float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); A += 4 * lda; saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; saxpy( Ab[0], &Bb[12][0], Cb ); saxpy( Ab[1], &Bb[13][0], Cb ); saxpy( Ab[2], &Bb[14][0], Cb ); saxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); } /* Common sub expression elimination. */ ibx = ibx + idt - m; /* remembering k dimension */ ldb = m = k; /* k changed to support the generic case and reuse valuable registers */ k = k % 16; m -= k; /* Here we are taking care of k % dim_k portions */ if ( k != 0 ) { /* Avoid Invalid Memory access in dimension K If some thread enters this if ( ) block first access to B should be valid as K isn't divisible by blk_K Note that dimension N has been taken care of by s1, s2, s3, s4 But depending upon K and thread index tx, some memory access may be still invalid, so take care of them now by setting s1, s2, s3, s4 = 0 B might have been advanced in the previous loop, take care of that, this is about right bottom corner. */ if ( m + tx >= ldb ) { s1 = s2 = s3 = s4 = 0; B -= tx; } Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); for(int i=0; i < k; i++) { saxpy( A[0], &Bb[i+0][0], Cb ); A += lda; } } /* Now taking care of dimension M, N that doesnt fit into blocks */ if ( (iby+16) >= n ) { lda = n - iby; } else { lda = 16; } if ( ibx >= 0 ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } __global__ void sgemmNN( const float *A, int lda, const float *B, int ldb, float* C, int ldc, int k, float alpha, float beta ) { const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int id = inx + iny*16; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const float *Blast = B + k; float c[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; __shared__ float bs[16][17]; do { #pragma unroll for( int i = 0; i < 16; i += 4 ) bs[inx][iny+i] = B[i*ldb]; __syncthreads(); #pragma unroll for( int i = 0; i < 16; i++, A += lda ) saxpy( A[0], &bs[i][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++, C += ldc ) C[0] = alpha*c[i] + beta*C[0]; } __global__ void sgemm_kernel_N_T_64_16_4_16_4(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int ibx = blockIdx.x * 64; const int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; if ( iby + tx >= n ) B += iby + 0; else B += iby + tx; /* Taking care of boundary cases where K < 4. */ if ( ty >= k ) B += __mul24( 0, ldb ); else B += __mul24( ty, ldb ); if ( ibx + idt >= m ) A += ibx + 0; else A += ibx + idt; int s2=lda, s3=2*lda, s4=3*lda; switch (k) { case 1: s2=0; s3=0; s4=0; break; case 2: s2=lda; s3=0; s4=0; break; case 3: s2=lda; s3=2*lda; s4=0; break; } C += ibx + idt + __mul24( iby, ldc ); float Ap[4] = { A[0], A[s2], A[s3], A[s4] }; float b = B[0]; const float *Bend = B + ldb*(k - k % 4); B += 4*ldb; A += 4*lda; __shared__ float Bb[4][16]; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 7 ) { do { float Ab[4] = {Ap[0], Ap[1], Ap[2], Ap[3]}; Bb[ty][tx]=b; __syncthreads(); Ap[0] = A[0]; Ap[1] = A[s2]; Ap[2] = A[s3]; Ap[3] = A[s4]; b=B[0]; saxpy( Ab[0], &Bb[0][0], Cb ); saxpy( Ab[1], &Bb[1][0], Cb ); saxpy( Ab[2], &Bb[2][0], Cb ); saxpy( Ab[3], &Bb[3][0], Cb ); A += 4*lda; B += 4*ldb; __syncthreads(); } while (B < Bend); } if ( k > 3 ) { Bb[ty][tx]=b; int k1 = k - k % 4; if ( (k1+ty) >= k ) B -= 4*ldb; else B -= 0*ldb; if ( (k1+0) >= k ) {s2=0; s3=0*lda; s4=0; A -= 4*lda; } else if ( (k1+1) >= k ) {s2=0; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+2) >= k ) {s2=lda; s3=0*lda; s4=0; A -= 0*lda; } else if ( (k1+3) >= k ) {s2=lda; s3=2*lda; s4=0; A -= 0*lda; } __syncthreads(); b=B[0]; saxpy( Ap[0], &Bb[0][0], Cb ); Ap[0] = A[0]; saxpy( Ap[1], &Bb[1][0], Cb ); Ap[1] = A[s2]; saxpy( Ap[2], &Bb[2][0], Cb ); Ap[2] = A[s3]; saxpy( Ap[3], &Bb[3][0], Cb ); Ap[3] = A[s4]; } k = k % 4; if ( k != 0 ) { __syncthreads(); Bb[ty][tx]=b; __syncthreads(); for(int i=0; i < k; i++) { saxpy( Ap[i], &Bb[i][0], Cb ); } } if ( (iby+16)>=n) { lda = n-iby; } else{ lda = 16; } if ( (ibx+idt) >= m ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } __global__ void sgemm_kernel_T_N_32_32_8_8_8(float* C, const float* A, const float* B, int m, int n, int k, int lda, int ldb, int ldc, float alpha, float beta ) { const int ibx = blockIdx.x * 32; const int iby = blockIdx.y * 32; const int tx = threadIdx.y; const int ty = threadIdx.x; int idt = tx*8 + ty; if ( ty >= k ) A += __mul24(ibx, lda) + 0; else A += __mul24(ibx, lda) + ty; if ( (ibx + tx) >= m ) A += __mul24(0, lda); else A += __mul24(tx, lda); if ( (iby+tx) >= n ) B += __mul24(iby+0, ldb); else B += __mul24(iby+tx, ldb); if ( ty >= k ) B += 0; else B += ty; C += ibx + idt % 32 + __mul24( iby + 16*(idt/32), ldc ); lda = lda * 8; ldb = ldb * 8; int as1=0, as2=lda, as3=2*lda, as4=3*lda; int bs1=0, bs2=ldb, bs3=2*ldb, bs4=3*ldb; switch(k) { case 1: as2=0; as3=0*lda; as4=0; bs2=0; bs3=0*ldb; bs4=0; break; case 2: as2=lda; as3=0*lda; as4=0; bs2=ldb; bs3=0*ldb; bs4=0; break; case 3: as2=lda; as3=2*lda; as4=0; bs2=ldb; bs3=2*ldb; bs4=0; break; } if ( (ibx + tx ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else if ( (ibx + tx + 8 ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else if ( (ibx + tx + 16) >= m ) { as1=0; as2=1*lda; as3=0*lda; as4=0*lda; } else if ( (ibx + tx + 24) >= m ) { as1=0; as2=1*lda; as3=2*lda; as4=0*lda; } if ( (iby + tx ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else if ( (iby + tx + 8 ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else if ( (iby + tx + 16) >= n ) { bs1=0; bs2=1*ldb; bs3=0*ldb; bs4=0*ldb; } else if ( (iby + tx + 24) >= n ) { bs1=0; bs2=1*ldb; bs3=2*ldb; bs4=0*ldb; } float b = B[bs1]; float b1 = B[bs2]; float b2 = B[bs3]; float b3 = B[bs4]; float Ap[4] = { A[as1], A[as2], A[as3], A[as4] }; const float *Bend = B + (k - k % 8); B += 8; A += 8; __shared__ float Bb[8][33]; __shared__ float ABb[32][9]; float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; const int l = 17*(idt/32); int idt1 = idt; idt = idt % 32; if ( k > 15 ) { do { Bb[ty][tx ] = b; Bb[ty][tx+8 ] = b1; Bb[ty][tx+17] = b2; Bb[ty][tx+25] = b3; ABb[tx ][ty] = Ap[0]; ABb[tx+8 ][ty] = Ap[1]; ABb[tx+16][ty] = Ap[2]; ABb[tx+24][ty] = Ap[3]; __syncthreads(); saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1]; saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2]; saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3]; saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4]; saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1]; saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2]; saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3]; saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4]; B += 8; A += 8; __syncthreads(); } while (B < Bend); } if ( k > 7 ) { Bb[ty][tx ] = b; Bb[ty][tx+8 ] = b1; Bb[ty][tx+17] = b2; Bb[ty][tx+25] = b3; ABb[tx ][ty] = Ap[0]; ABb[tx+8 ][ty] = Ap[1]; ABb[tx+16][ty] = Ap[2]; ABb[tx+24][ty] = Ap[3]; __syncthreads(); as1 = k - k % 8; if ( as1+ty >= k ) { bs1=0*ldb; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; B -= 8; } if ( as1+ty >= k ) { as1=0*lda; as2=0*lda; as3=0*lda; as4=0*lda; A -= 8; } as1=0; saxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1]; saxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2]; saxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3]; saxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4]; saxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1]; saxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2]; saxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3]; saxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4]; } k = k % 8; if ( k != 0 ) { __syncthreads(); Bb[ty][tx ] = b; Bb[ty][tx+8 ] = b1; Bb[ty][tx+17] = b2; Bb[ty][tx+25] = b3; ABb[tx ][ty] = Ap[0]; ABb[tx+8 ][ty] = Ap[1]; ABb[tx+16][ty] = Ap[2]; ABb[tx+24][ty] = Ap[3]; __syncthreads(); for(int i=0; i < k; i++) { saxpy( ABb[idt][i], &Bb[i][l], Cb ); } } if ( (iby+16*(idt1/32+1)) >= n ) { lda = n - iby - 16*(idt1/32); } else { lda = 16; } if ( (ibx+idt) >= m ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } }
bb96edd9bedd5a15c6937b8038707cfe5e62a49b.hip
// !!! This is a file automatically generated by hipify!!! /* Sample code for Sparse-Matrix-Vector multiplication.*/ #include <iostream> #include <cstdlib> #include <time.h> #include <hip/hip_runtime.h> #include <minigun/minigun.h> #include "../samples_utils.h" #include "../samples_io.h" struct GData { float* cur{nullptr}; float* next{nullptr}; float* weight{nullptr}; int* eid_mapping{nullptr}; }; struct SPMVFunctor { static __device__ __forceinline__ void ApplyEdge( int32_t src, int32_t dst, int32_t eid, GData* gdata) {} static __device__ __forceinline__ void ApplyEdgeReduce( int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float* val, GData* gdata) { *val += gdata->cur[src] * gdata->weight[gdata->eid_mapping[eid]]; } static __device__ __forceinline__ int32_t GetFeatSize(GData *gdata) { return 1; } static __device__ __forceinline__ float* GetOutBuf(GData* gdata) { return gdata->next; } static __device__ __forceinline__ int32_t GetOutOffset(int32_t idx, GData* gdata) { return idx; } }; std::vector<float> GroundTruth( const std::vector<int32_t>& row_offsets, const std::vector<int32_t>& column_indices, const std::vector<float>& vdata, const std::vector<float>& edata) { std::vector<float> ret(vdata.size(), 0); for (size_t u = 0; u < row_offsets.size() - 1; ++u) { for (int32_t eid = row_offsets[u]; eid < row_offsets[u+1]; ++eid) { int32_t v = column_indices[eid]; ret[v] += vdata[u] * edata[eid]; } } return ret; } int main(int argc, char** argv) { srand(42); std::vector<int32_t> row_offsets, column_indices; utils::CreateNPGraph(10000, 0.01, row_offsets, column_indices); const int32_t N = row_offsets.size() - 1; const int32_t M = column_indices.size(); std::cout << "#nodes: " << N << " #edges: " << M << std::endl; CUDA_CALL(hipSetDevice(0)); minigun::IntCsr csr; csr.row_offsets.length = row_offsets.size(); CUDA_CALL(hipMalloc(&csr.row_offsets.data, sizeof(int32_t) * row_offsets.size())); CUDA_CALL(hipMemcpy(csr.row_offsets.data, &row_offsets[0], sizeof(int32_t) * row_offsets.size(), hipMemcpyHostToDevice)); csr.column_indices.length = column_indices.size(); CUDA_CALL(hipMalloc(&csr.column_indices.data, sizeof(int32_t) * column_indices.size())); CUDA_CALL(hipMemcpy(csr.column_indices.data, &column_indices[0], sizeof(int32_t) * column_indices.size(), hipMemcpyHostToDevice)); csr.num_rows = N; csr.num_cols = N; // Create raw eid_mapping minigun::IntArray csr_mapping = utils::arange(0, M, kDLGPU); // Create csr_t and coo minigun::IntCsr csr_t; auto pack = utils::ToReverseCsr(csr, csr_mapping, kDLGPU); csr_t = pack.first; minigun::IntArray csr_t_mapping = pack.second; minigun::IntCoo coo; coo = utils::ToCoo(csr, kDLGPU); minigun::IntSpMat spmat = {&csr, &csr_t, &coo}; // Create stream minigun::advance::RuntimeConfig config; config.ctx = {kDLGPU, 0}; config.data_num_blocks = 1; config.data_num_threads = 1; CUDA_CALL(hipStreamCreate(&config.stream)); // Create vdata, edata and copy to GPU std::vector<float> vvec(N), evec(M); for (int32_t i = 0; i < N; ++i) { vvec[i] = (float)rand() / RAND_MAX; } for (int32_t i = 0; i < M; ++i) { evec[i] = (float)rand() / RAND_MAX; } GData gdata; CUDA_CALL(hipMalloc(&gdata.cur, sizeof(float) * N)); CUDA_CALL(hipMemcpy(gdata.cur, &vvec[0], sizeof(float) * N, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&gdata.next, sizeof(float) * N)); CUDA_CALL(hipMemset(gdata.next, 0, sizeof(float) * N)); CUDA_CALL(hipMalloc(&gdata.weight, sizeof(float) * M)); CUDA_CALL(hipMemcpy(gdata.weight, &evec[0], sizeof(float) * M, hipMemcpyHostToDevice)); gdata.eid_mapping = csr_t_mapping.data; CUDA_CALL(hipDeviceSynchronize()); // Compute ground truth std::vector<float> truth = GroundTruth(row_offsets, column_indices, vvec, evec); typedef minigun::advance::Config<minigun::advance::kDst> Config; minigun::advance::Advance<kDLGPU, int32_t, float, Config, GData, SPMVFunctor>( config, spmat, &gdata); CUDA_CALL(hipDeviceSynchronize()); // verify output std::vector<float> rst(N); CUDA_CALL(hipMemcpy(&rst[0], gdata.next, sizeof(float) * N, hipMemcpyDeviceToHost)); //utils::VecPrint(rst); std::cout << "Correct? " << utils::VecEqual(truth, rst) << std::endl; const int K = 10; timeval t0, t1; gettimeofday(&t0, nullptr); for (int i = 0; i < K; ++i) { minigun::advance::Advance<kDLGPU, int32_t, float, Config, GData, SPMVFunctor>( config, spmat, &gdata); } CUDA_CALL(hipDeviceSynchronize()); gettimeofday(&t1, nullptr); std::cout << "Time(ms): " << (double)(t1.tv_usec - t0.tv_usec) / K / 1000.0 << std::endl; // free return 0; }
bb96edd9bedd5a15c6937b8038707cfe5e62a49b.cu
/* Sample code for Sparse-Matrix-Vector multiplication.*/ #include <iostream> #include <cstdlib> #include <time.h> #include <cuda_runtime.h> #include <minigun/minigun.h> #include "../samples_utils.h" #include "../samples_io.h" struct GData { float* cur{nullptr}; float* next{nullptr}; float* weight{nullptr}; int* eid_mapping{nullptr}; }; struct SPMVFunctor { static __device__ __forceinline__ void ApplyEdge( int32_t src, int32_t dst, int32_t eid, GData* gdata) {} static __device__ __forceinline__ void ApplyEdgeReduce( int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float* val, GData* gdata) { *val += gdata->cur[src] * gdata->weight[gdata->eid_mapping[eid]]; } static __device__ __forceinline__ int32_t GetFeatSize(GData *gdata) { return 1; } static __device__ __forceinline__ float* GetOutBuf(GData* gdata) { return gdata->next; } static __device__ __forceinline__ int32_t GetOutOffset(int32_t idx, GData* gdata) { return idx; } }; std::vector<float> GroundTruth( const std::vector<int32_t>& row_offsets, const std::vector<int32_t>& column_indices, const std::vector<float>& vdata, const std::vector<float>& edata) { std::vector<float> ret(vdata.size(), 0); for (size_t u = 0; u < row_offsets.size() - 1; ++u) { for (int32_t eid = row_offsets[u]; eid < row_offsets[u+1]; ++eid) { int32_t v = column_indices[eid]; ret[v] += vdata[u] * edata[eid]; } } return ret; } int main(int argc, char** argv) { srand(42); std::vector<int32_t> row_offsets, column_indices; utils::CreateNPGraph(10000, 0.01, row_offsets, column_indices); const int32_t N = row_offsets.size() - 1; const int32_t M = column_indices.size(); std::cout << "#nodes: " << N << " #edges: " << M << std::endl; CUDA_CALL(cudaSetDevice(0)); minigun::IntCsr csr; csr.row_offsets.length = row_offsets.size(); CUDA_CALL(cudaMalloc(&csr.row_offsets.data, sizeof(int32_t) * row_offsets.size())); CUDA_CALL(cudaMemcpy(csr.row_offsets.data, &row_offsets[0], sizeof(int32_t) * row_offsets.size(), cudaMemcpyHostToDevice)); csr.column_indices.length = column_indices.size(); CUDA_CALL(cudaMalloc(&csr.column_indices.data, sizeof(int32_t) * column_indices.size())); CUDA_CALL(cudaMemcpy(csr.column_indices.data, &column_indices[0], sizeof(int32_t) * column_indices.size(), cudaMemcpyHostToDevice)); csr.num_rows = N; csr.num_cols = N; // Create raw eid_mapping minigun::IntArray csr_mapping = utils::arange(0, M, kDLGPU); // Create csr_t and coo minigun::IntCsr csr_t; auto pack = utils::ToReverseCsr(csr, csr_mapping, kDLGPU); csr_t = pack.first; minigun::IntArray csr_t_mapping = pack.second; minigun::IntCoo coo; coo = utils::ToCoo(csr, kDLGPU); minigun::IntSpMat spmat = {&csr, &csr_t, &coo}; // Create stream minigun::advance::RuntimeConfig config; config.ctx = {kDLGPU, 0}; config.data_num_blocks = 1; config.data_num_threads = 1; CUDA_CALL(cudaStreamCreate(&config.stream)); // Create vdata, edata and copy to GPU std::vector<float> vvec(N), evec(M); for (int32_t i = 0; i < N; ++i) { vvec[i] = (float)rand() / RAND_MAX; } for (int32_t i = 0; i < M; ++i) { evec[i] = (float)rand() / RAND_MAX; } GData gdata; CUDA_CALL(cudaMalloc(&gdata.cur, sizeof(float) * N)); CUDA_CALL(cudaMemcpy(gdata.cur, &vvec[0], sizeof(float) * N, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&gdata.next, sizeof(float) * N)); CUDA_CALL(cudaMemset(gdata.next, 0, sizeof(float) * N)); CUDA_CALL(cudaMalloc(&gdata.weight, sizeof(float) * M)); CUDA_CALL(cudaMemcpy(gdata.weight, &evec[0], sizeof(float) * M, cudaMemcpyHostToDevice)); gdata.eid_mapping = csr_t_mapping.data; CUDA_CALL(cudaDeviceSynchronize()); // Compute ground truth std::vector<float> truth = GroundTruth(row_offsets, column_indices, vvec, evec); typedef minigun::advance::Config<minigun::advance::kDst> Config; minigun::advance::Advance<kDLGPU, int32_t, float, Config, GData, SPMVFunctor>( config, spmat, &gdata); CUDA_CALL(cudaDeviceSynchronize()); // verify output std::vector<float> rst(N); CUDA_CALL(cudaMemcpy(&rst[0], gdata.next, sizeof(float) * N, cudaMemcpyDeviceToHost)); //utils::VecPrint(rst); std::cout << "Correct? " << utils::VecEqual(truth, rst) << std::endl; const int K = 10; timeval t0, t1; gettimeofday(&t0, nullptr); for (int i = 0; i < K; ++i) { minigun::advance::Advance<kDLGPU, int32_t, float, Config, GData, SPMVFunctor>( config, spmat, &gdata); } CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&t1, nullptr); std::cout << "Time(ms): " << (double)(t1.tv_usec - t0.tv_usec) / K / 1000.0 << std::endl; // free return 0; }
f2e98459274f88ddd86fa3ab3796e844f9a3f509.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "DomaineMathGPUs.h" #include "DomaineMaths.h" #include "IndiceXY.h" #include "cudaTools.h" #include "Sphere.h" #include "ColorToolCuda.h" #include "FonctionsRaytracing.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void kernelFillImageRaytracing(uchar4* ptrDevImageGL, int w, int h, DomaineMathGPUs domaineMathGPUs, float t, int N, int nbSphere, Sphere* ptrDev_tabSphere); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ static void setPixel(uchar4& pixel, int i, int j, int w, int h, float t, int N, IndiceXY indiceXY, int nbSphere, Sphere* ptrDev_tabSphere); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ void launchKernelFillImageRaytracing(uchar4* ptrDevImageGL, int w, int h, float t, const DomaineMaths& domaineMath, int N, int nbSphere, Sphere* tabSphere, Sphere* ptrDev_tabSphere) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); DomaineMathGPUs domaineMathGPUs(domaineMath); hipLaunchKernelGGL(( kernelFillImageRaytracing), dim3(dg),dim3(db), 0, 0, ptrDevImageGL, w, h, domaineMathGPUs, t, N, nbSphere, ptrDev_tabSphere); } void allocMemory(Sphere* &ptrDev_tabSphere, Sphere* tabSphere, int nbSphere) { size_t sizeSphere = sizeof(Sphere) * nbSphere; // Device memory allocation (*) HANDLE_ERROR(hipMalloc((void**) &ptrDev_tabSphere, sizeSphere)); // Host -> Device HANDLE_ERROR(hipMemcpy(ptrDev_tabSphere, tabSphere, sizeSphere, hipMemcpyHostToDevice)); } void freeMemory(Sphere* ptrDev_tabSphere) { HANDLE_ERROR(hipFree(ptrDev_tabSphere)); } __global__ void kernelFillImageRaytracing(uchar4* ptrDevImageGL, int w, int h, DomaineMathGPUs domaineMathGPUs, float t, int N, int nbSphere, Sphere* ptrDev_tabSphere) { IndiceXY indiceXY(w, h, &domaineMathGPUs); const int n = w * h; int tid = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); int s = tid; int i; int j; while (s < n) { Indice2D::pixelIJ(s, w, i, j); setPixel(ptrDevImageGL[s], i, j, w, h, t, N, indiceXY, nbSphere, ptrDev_tabSphere); ptrDevImageGL[s].w = 255; s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void setPixel(uchar4& pixelIJ, int i, int j, int w, int h, float t, int N, IndiceXY indiceXY, int nbSphere, Sphere* ptrDev_tabSphere) { float x; float y; indiceXY.toXY(i, j, x, y); float2 p; p.x = x; p.y = y; float dz; float b; float hue; float s; for (int q = 0; q < nbSphere; q++) { float hCarre = ptrDev_tabSphere[q].hCarre(p); if (ptrDev_tabSphere[q].isEnDessous(hCarre)) { dz = ptrDev_tabSphere[q].dz(hCarre); b = ptrDev_tabSphere[q].brightness(dz); hue = hueSphere(q, t, ptrDev_tabSphere[q].getHue()); s = 1.0; ColorToolCuda::HSB_TO_RVB(hue, s, b, pixelIJ.x, pixelIJ.y, pixelIJ.z); return; } } ColorToolCuda::HSB_TO_RVB(0.0, 0.0, 0.0, pixelIJ.x, pixelIJ.y, pixelIJ.z); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
f2e98459274f88ddd86fa3ab3796e844f9a3f509.cu
#include "Indice2D.h" #include "DomaineMathGPUs.h" #include "DomaineMaths.h" #include "IndiceXY.h" #include "cudaTools.h" #include "Sphere.h" #include "ColorToolCuda.h" #include "FonctionsRaytracing.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void kernelFillImageRaytracing(uchar4* ptrDevImageGL, int w, int h, DomaineMathGPUs domaineMathGPUs, float t, int N, int nbSphere, Sphere* ptrDev_tabSphere); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ static void setPixel(uchar4& pixel, int i, int j, int w, int h, float t, int N, IndiceXY indiceXY, int nbSphere, Sphere* ptrDev_tabSphere); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ void launchKernelFillImageRaytracing(uchar4* ptrDevImageGL, int w, int h, float t, const DomaineMaths& domaineMath, int N, int nbSphere, Sphere* tabSphere, Sphere* ptrDev_tabSphere) { dim3 dg = dim3(8, 8); dim3 db = dim3(16, 16, 1); DomaineMathGPUs domaineMathGPUs(domaineMath); kernelFillImageRaytracing<<<dg,db>>>(ptrDevImageGL, w, h, domaineMathGPUs, t, N, nbSphere, ptrDev_tabSphere); } void allocMemory(Sphere* &ptrDev_tabSphere, Sphere* tabSphere, int nbSphere) { size_t sizeSphere = sizeof(Sphere) * nbSphere; // Device memory allocation (*) HANDLE_ERROR(cudaMalloc((void**) &ptrDev_tabSphere, sizeSphere)); // Host -> Device HANDLE_ERROR(cudaMemcpy(ptrDev_tabSphere, tabSphere, sizeSphere, cudaMemcpyHostToDevice)); } void freeMemory(Sphere* ptrDev_tabSphere) { HANDLE_ERROR(cudaFree(ptrDev_tabSphere)); } __global__ void kernelFillImageRaytracing(uchar4* ptrDevImageGL, int w, int h, DomaineMathGPUs domaineMathGPUs, float t, int N, int nbSphere, Sphere* ptrDev_tabSphere) { IndiceXY indiceXY(w, h, &domaineMathGPUs); const int n = w * h; int tid = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); int s = tid; int i; int j; while (s < n) { Indice2D::pixelIJ(s, w, i, j); setPixel(ptrDevImageGL[s], i, j, w, h, t, N, indiceXY, nbSphere, ptrDev_tabSphere); ptrDevImageGL[s].w = 255; s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void setPixel(uchar4& pixelIJ, int i, int j, int w, int h, float t, int N, IndiceXY indiceXY, int nbSphere, Sphere* ptrDev_tabSphere) { float x; float y; indiceXY.toXY(i, j, x, y); float2 p; p.x = x; p.y = y; float dz; float b; float hue; float s; for (int q = 0; q < nbSphere; q++) { float hCarre = ptrDev_tabSphere[q].hCarre(p); if (ptrDev_tabSphere[q].isEnDessous(hCarre)) { dz = ptrDev_tabSphere[q].dz(hCarre); b = ptrDev_tabSphere[q].brightness(dz); hue = hueSphere(q, t, ptrDev_tabSphere[q].getHue()); s = 1.0; ColorToolCuda::HSB_TO_RVB(hue, s, b, pixelIJ.x, pixelIJ.y, pixelIJ.z); return; } } ColorToolCuda::HSB_TO_RVB(0.0, 0.0, 0.0, pixelIJ.x, pixelIJ.y, pixelIJ.z); } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
1a081687cf859bfd2f4cb02233364e8401b67e28.hip
// !!! This is a file automatically generated by hipify!!! #include "LinearSearch.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <random> #include <math.h> #include <memory> namespace { __global__ void searchWithCuda(int64_t * arr, size_t N, int64_t x) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < N; i += stride) { if (arr[i] == x) { break; } } } } LinearSearch::LinearSearch(size_t size, size_t arraySize) : arraySize(arraySize), Benchmarker(size) { } LinearSearch::~LinearSearch() { } std::chrono::high_resolution_clock::duration LinearSearch::runCpu() { auto array = std::make_unique<int64_t[]>(arraySize); std::mt19937_64 rand; for (auto i = 0; i < arraySize; i++) { array.get()[i] = rand(); } int randIndex = abs(static_cast<long>(rand())) % arraySize; auto x = array.get()[randIndex]; auto ptr = array.get(); auto startTime = std::chrono::high_resolution_clock::now(); for (auto i = 0; i < arraySize; i++) { if (ptr[i] == x) { break; } } auto endTime = std::chrono::high_resolution_clock::now(); return endTime - startTime; } std::chrono::high_resolution_clock::duration LinearSearch::runGpu() { auto arr = std::make_unique<int64_t[]>(arraySize); std::mt19937_64 rand; for (auto i = 0; i < arraySize; i++) { arr.get()[i] = rand(); } int64_t* gpuArray; hipMalloc(&gpuArray, sizeof(int64_t) * arraySize); hipMemcpy(gpuArray, arr.get(), sizeof(int64_t) * arraySize, hipMemcpyHostToDevice); size_t threadsPerBlock, blocksPerGrid; if (arraySize < 512) { threadsPerBlock = arraySize; blocksPerGrid = 1; } else { threadsPerBlock = 512; blocksPerGrid = (size_t)ceil(double(arraySize) / double(threadsPerBlock)); } int randIndex = std::abs(static_cast<long>(rand())) % arraySize; hipDeviceSynchronize(); auto start = std::chrono::high_resolution_clock::now(); ::searchWithCuda << <blocksPerGrid, threadsPerBlock >> > (gpuArray, arraySize, arr.get()[randIndex]); hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); hipFree(gpuArray); return end - start; }
1a081687cf859bfd2f4cb02233364e8401b67e28.cu
#include "LinearSearch.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <random> #include <math.h> #include <memory> namespace { __global__ void searchWithCuda(int64_t * arr, size_t N, int64_t x) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < N; i += stride) { if (arr[i] == x) { break; } } } } LinearSearch::LinearSearch(size_t size, size_t arraySize) : arraySize(arraySize), Benchmarker(size) { } LinearSearch::~LinearSearch() { } std::chrono::high_resolution_clock::duration LinearSearch::runCpu() { auto array = std::make_unique<int64_t[]>(arraySize); std::mt19937_64 rand; for (auto i = 0; i < arraySize; i++) { array.get()[i] = rand(); } int randIndex = abs(static_cast<long>(rand())) % arraySize; auto x = array.get()[randIndex]; auto ptr = array.get(); auto startTime = std::chrono::high_resolution_clock::now(); for (auto i = 0; i < arraySize; i++) { if (ptr[i] == x) { break; } } auto endTime = std::chrono::high_resolution_clock::now(); return endTime - startTime; } std::chrono::high_resolution_clock::duration LinearSearch::runGpu() { auto arr = std::make_unique<int64_t[]>(arraySize); std::mt19937_64 rand; for (auto i = 0; i < arraySize; i++) { arr.get()[i] = rand(); } int64_t* gpuArray; cudaMalloc(&gpuArray, sizeof(int64_t) * arraySize); cudaMemcpy(gpuArray, arr.get(), sizeof(int64_t) * arraySize, cudaMemcpyHostToDevice); size_t threadsPerBlock, blocksPerGrid; if (arraySize < 512) { threadsPerBlock = arraySize; blocksPerGrid = 1; } else { threadsPerBlock = 512; blocksPerGrid = (size_t)ceil(double(arraySize) / double(threadsPerBlock)); } int randIndex = std::abs(static_cast<long>(rand())) % arraySize; cudaDeviceSynchronize(); auto start = std::chrono::high_resolution_clock::now(); ::searchWithCuda << <blocksPerGrid, threadsPerBlock >> > (gpuArray, arraySize, arr.get()[randIndex]); cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clock::now(); cudaFree(gpuArray); return end - start; }
244ab14cf63924a5438b4819e8b9491596b5a9ba.hip
// !!! This is a file automatically generated by hipify!!! #include "GoL.h" #include <iostream> #include <fstream> #include <iomanip> int max_generations = 1000; int n_small = 9; int n_large = 20; // Perform a single run void timedRun(int sz, bool small, std::ofstream &myfile, hipEvent_t start, hipEvent_t stop){ // Set input parameters auto rows = sz*10; auto cols = sz*10; if(!small){ rows = sz*100; cols = sz*100; std::cout << std::setw(5) << (n_small+sz) ; } else{ std::cout << std::setw(5) << (sz) ; } auto N = (size_t)rows * (size_t)cols; myfile << N << ","; std::cout << std::setw(15) << rows << std::setw(15) << cols << std::setw(15) << N << std::flush; /* Time CPU execution */ GoL o_c(rows,cols,false); hipEventRecord(start,0); o_c.setRandomInitialState(); for(auto i=0;i<max_generations;++i){ o_c.updateState(); } hipEventRecord(stop,0); hipEventSynchronize(stop); float cpu_time; hipEventElapsedTime(&cpu_time,start,stop); myfile << cpu_time << ","; std::cout << std::setw(15) << cpu_time << std::flush; /* Time GPU execution */ GoL o_g(rows,cols,true); hipEventRecord(start,0); o_g.setRandomInitialState(); for(auto i=0;i<max_generations; ++i){ o_g.updateState(); } hipEventRecord(stop,0); hipEventSynchronize(stop); float gpu_time; hipEventElapsedTime(&gpu_time,start,stop); myfile << gpu_time << std::endl; std::cout << std::setw(15) << gpu_time << std::endl; } int main(int argc, char* argv[]){ std::string csv_name = "times.csv"; // default file for storing results // Parse cmd line arguments for(int i=0;i<argc;++i){ if(argv[i] == "-g" || argv[i] == "--gen"){ max_generations = std::stoi(argv[i+1]); i++; } else if(argv[i] == "--csv"){ csv_name = argv[i+1]; i++; } } if(max_generations <= 1){ std::cerr << "Invalid no of max_generations = " << max_generations << std::endl; std::cerr << "Terminating benchmarking" << std::endl; return 1; } std::cout << "Running Compute-Only Benchmarks for GameOfLife (CPU vs GPU)" << std::endl; std::cout << "# of generations for each grid = " << max_generations << std::endl; // Define stream for output csv file std::ofstream myfile; myfile.open (csv_name); if(!myfile.is_open()){ std::cerr << "Error in opening" << csv_name << std::endl; std::cerr << "Terminating benchmarking" << std::endl; return 1; } myfile << "Number of Cells,CPU Time(ms),GPU Time(ms)" << std::endl; std::cout << std::setw(5) << "Id" << std::setw(15) << "n_rows" << std::setw(15) << "n_cols" << std::setw(15) << "n_cells" << std::setw(15) << "CPU Time(ms)" << std::setw(15) << "GPU Time(ms)" << std::endl; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); // Small random inputs for(auto t = 1; t < 10 ; ++t){ timedRun(t,true,myfile,start,stop); } // Big random inputs for(auto t = 1; t <= 20 ; ++t){ timedRun(t,false,myfile,start,stop); } // teardown hipEventDestroy(start); hipEventDestroy(stop); myfile.close(); return 0; }
244ab14cf63924a5438b4819e8b9491596b5a9ba.cu
#include "GoL.h" #include <iostream> #include <fstream> #include <iomanip> int max_generations = 1000; int n_small = 9; int n_large = 20; // Perform a single run void timedRun(int sz, bool small, std::ofstream &myfile, cudaEvent_t start, cudaEvent_t stop){ // Set input parameters auto rows = sz*10; auto cols = sz*10; if(!small){ rows = sz*100; cols = sz*100; std::cout << std::setw(5) << (n_small+sz) ; } else{ std::cout << std::setw(5) << (sz) ; } auto N = (size_t)rows * (size_t)cols; myfile << N << ","; std::cout << std::setw(15) << rows << std::setw(15) << cols << std::setw(15) << N << std::flush; /* Time CPU execution */ GoL o_c(rows,cols,false); cudaEventRecord(start,0); o_c.setRandomInitialState(); for(auto i=0;i<max_generations;++i){ o_c.updateState(); } cudaEventRecord(stop,0); cudaEventSynchronize(stop); float cpu_time; cudaEventElapsedTime(&cpu_time,start,stop); myfile << cpu_time << ","; std::cout << std::setw(15) << cpu_time << std::flush; /* Time GPU execution */ GoL o_g(rows,cols,true); cudaEventRecord(start,0); o_g.setRandomInitialState(); for(auto i=0;i<max_generations; ++i){ o_g.updateState(); } cudaEventRecord(stop,0); cudaEventSynchronize(stop); float gpu_time; cudaEventElapsedTime(&gpu_time,start,stop); myfile << gpu_time << std::endl; std::cout << std::setw(15) << gpu_time << std::endl; } int main(int argc, char* argv[]){ std::string csv_name = "times.csv"; // default file for storing results // Parse cmd line arguments for(int i=0;i<argc;++i){ if(argv[i] == "-g" || argv[i] == "--gen"){ max_generations = std::stoi(argv[i+1]); i++; } else if(argv[i] == "--csv"){ csv_name = argv[i+1]; i++; } } if(max_generations <= 1){ std::cerr << "Invalid no of max_generations = " << max_generations << std::endl; std::cerr << "Terminating benchmarking" << std::endl; return 1; } std::cout << "Running Compute-Only Benchmarks for GameOfLife (CPU vs GPU)" << std::endl; std::cout << "# of generations for each grid = " << max_generations << std::endl; // Define stream for output csv file std::ofstream myfile; myfile.open (csv_name); if(!myfile.is_open()){ std::cerr << "Error in opening" << csv_name << std::endl; std::cerr << "Terminating benchmarking" << std::endl; return 1; } myfile << "Number of Cells,CPU Time(ms),GPU Time(ms)" << std::endl; std::cout << std::setw(5) << "Id" << std::setw(15) << "n_rows" << std::setw(15) << "n_cols" << std::setw(15) << "n_cells" << std::setw(15) << "CPU Time(ms)" << std::setw(15) << "GPU Time(ms)" << std::endl; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Small random inputs for(auto t = 1; t < 10 ; ++t){ timedRun(t,true,myfile,start,stop); } // Big random inputs for(auto t = 1; t <= 20 ; ++t){ timedRun(t,false,myfile,start,stop); } // teardown cudaEventDestroy(start); cudaEventDestroy(stop); myfile.close(); return 0; }
51c2e4e8267ddc08cd84a060140692254a5cb2ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "task_filljtr.cuh" template<typename T> __global__ void d_fillJTr( DeviceMemory<T>* mem ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx >= mem->nParamPoints ) return; if( !mem->paramsUsed[idx] ) return; T sumx = 0; T sumy = 0; T sumx2 = 0; T sumy2 = 0; int idxm = idx % mem->frameW; if( idx != mem->fframeW ) { for( int row = 0; row < mem->frameH; row++ ) { int idxy = IDX2R( row, idx, mem->fframeW ); #ifdef FRAC_AS_MAT T frac = mem->frac[IDX2R( row, idxm, mem->frameW )]; #else T frac = mem->frac[row]; #endif T fdi = mem->differenceImage[idxy] * ( 1 - frac ); sumx += fdi * mem->wxgrad[idxy]; sumy += fdi * mem->wygrad[idxy]; } } __syncthreads(); if( idx != 0 ) { if( idxm == 0 ) idxm = mem->frameW - 1; else idxm -= 1; for( int row = 0; row < mem->frameH; row++ ) { int idxy = IDX2R( row, idx - 1, mem->fframeW ); #ifdef FRAC_AS_MAT T frac = mem->frac[IDX2R( row, idxm, mem->frameW )]; #else T frac = mem->frac[row]; #endif T fdi = mem->differenceImage[idxy] * frac; sumx2 += fdi * mem->wxgrad[idxy]; sumy2 += fdi * mem->wygrad[idxy]; } } __syncthreads(); mem->jtr[idx - mem->subSparseOffsets[idx]] = sumx + sumx2; idx += mem->nParamPoints; mem->jtr[idx - mem->subSparseOffsets[idx]] = sumy + sumy2; } template<typename T> void hd_fillJtr( DeviceMemory<T>& mem ) { int numBlocks = ( mem.nParamPoints + ( THREADS_PER_BLOCK - 1 ) ) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( d_fillJTr<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, mem.d_mem ); } template void hd_fillJtr( DeviceMemory<float>& d_Ptr ); template void hd_fillJtr( DeviceMemory<double>& d_Ptr );
51c2e4e8267ddc08cd84a060140692254a5cb2ce.cu
#include "task_filljtr.cuh" template<typename T> __global__ void d_fillJTr( DeviceMemory<T>* mem ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if( idx >= mem->nParamPoints ) return; if( !mem->paramsUsed[idx] ) return; T sumx = 0; T sumy = 0; T sumx2 = 0; T sumy2 = 0; int idxm = idx % mem->frameW; if( idx != mem->fframeW ) { for( int row = 0; row < mem->frameH; row++ ) { int idxy = IDX2R( row, idx, mem->fframeW ); #ifdef FRAC_AS_MAT T frac = mem->frac[IDX2R( row, idxm, mem->frameW )]; #else T frac = mem->frac[row]; #endif T fdi = mem->differenceImage[idxy] * ( 1 - frac ); sumx += fdi * mem->wxgrad[idxy]; sumy += fdi * mem->wygrad[idxy]; } } __syncthreads(); if( idx != 0 ) { if( idxm == 0 ) idxm = mem->frameW - 1; else idxm -= 1; for( int row = 0; row < mem->frameH; row++ ) { int idxy = IDX2R( row, idx - 1, mem->fframeW ); #ifdef FRAC_AS_MAT T frac = mem->frac[IDX2R( row, idxm, mem->frameW )]; #else T frac = mem->frac[row]; #endif T fdi = mem->differenceImage[idxy] * frac; sumx2 += fdi * mem->wxgrad[idxy]; sumy2 += fdi * mem->wygrad[idxy]; } } __syncthreads(); mem->jtr[idx - mem->subSparseOffsets[idx]] = sumx + sumx2; idx += mem->nParamPoints; mem->jtr[idx - mem->subSparseOffsets[idx]] = sumy + sumy2; } template<typename T> void hd_fillJtr( DeviceMemory<T>& mem ) { int numBlocks = ( mem.nParamPoints + ( THREADS_PER_BLOCK - 1 ) ) / THREADS_PER_BLOCK; d_fillJTr<T><<< numBlocks, THREADS_PER_BLOCK >>>( mem.d_mem ); } template void hd_fillJtr( DeviceMemory<float>& d_Ptr ); template void hd_fillJtr( DeviceMemory<double>& d_Ptr );
984c7a679f1500f4d3534214ae122e9ee04ecde8.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/triplet_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void TripletLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CollectTripets(*bottom[1]); Dtype alpha = this->layer_param().triplet_loss_param().margin(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; const Dtype* bottom_data = bottom[0]->gpu_data(); Blob<Dtype> temp_sub_blob(1, dim, 1, 1); Dtype* temp_sub = temp_sub_blob.mutable_gpu_data(); map<uint64_t, Dtype> map_dist; Dtype loss = 0; for (int i = 0; i < triplets_.size(); i++) { Triplet<Dtype> &tri = triplets_[i]; const Dtype *x_a = bottom_data + tri.a * dim; const Dtype *x_p = bottom_data + tri.p * dim; const Dtype *x_n = bottom_data + tri.n * dim; Dtype dis_ap, dis_an; uint64_t pair_ap = ((uint64_t)tri.a) << 32 | (uint32_t)tri.p; uint64_t pair_an = ((uint64_t)tri.a) << 32 | (uint32_t)tri.n; if (map_dist.find(pair_ap) != map_dist.end()) dis_ap = map_dist[pair_ap]; else { caffe_gpu_sub(dim, x_a, x_p, temp_sub); caffe_gpu_dot(dim, temp_sub, temp_sub, &dis_ap); map_dist[pair_ap] = dis_ap; } if (map_dist.find(pair_an) != map_dist.end()) dis_an = map_dist[pair_an]; else { caffe_gpu_sub(dim, x_a, x_n, temp_sub); caffe_gpu_dot(dim, temp_sub, temp_sub, &dis_an); map_dist[pair_an] = dis_an; } tri.loss = ::max(Dtype(0), dis_ap - dis_an + alpha); loss += tri.loss; } top[0]->mutable_cpu_data()[0] = loss / triplets_.size(); } template <typename Dtype> void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { int num = bottom[0]->num(); int dim = bottom[0]->count() / num; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(num * dim, Dtype(0), bottom_diff); Blob<Dtype> temp_sub_blob(1, dim, 1, 1); Dtype* temp_sub = temp_sub_blob.mutable_gpu_data(); map<uint64_t, vector<int> > m_np, m_pa, m_an; // group n & p to get list a // group p & a to get list n // group a & n to get list p for (int i = 0; i < triplets_.size(); i++) { Triplet<Dtype> &tri = triplets_[i]; if (tri.loss == 0) continue; uint64_t pair; pair = ((uint64_t)tri.n) << 32 | (uint32_t)tri.p; m_np[pair].push_back(tri.a); pair = ((uint64_t)tri.p) << 32 | (uint32_t)tri.a; m_pa[pair].push_back(tri.n); pair = ((uint64_t)tri.a) << 32 | (uint32_t)tri.n; m_an[pair].push_back(tri.p); } map<uint64_t, vector<int> >::iterator iter; // dx_a += x_n - x_p for (iter = m_np.begin(); iter != m_np.end(); iter++) { uint64_t pair = iter->first; vector<int>& list = iter->second; int _n = pair >> 32; int _p = pair & 0xffffffff; caffe_gpu_sub(dim, bottom_data + _n * dim, bottom_data + _p * dim, temp_sub); for (int i = 0; i < list.size(); i++) caffe_gpu_axpy(dim, (Dtype)1, temp_sub, bottom_diff + list[i] * dim); } // dx_p += x_p - x_a for (iter = m_pa.begin(); iter != m_pa.end(); iter++) { uint64_t pair = iter->first; vector<int>& list = iter->second; int _p = pair >> 32; int _a = pair & 0xffffffff; caffe_gpu_sub(dim, bottom_data + _p * dim, bottom_data + _a * dim, temp_sub); caffe_gpu_axpy(dim, (Dtype)list.size(), temp_sub, bottom_diff + _p * dim); } // dx_n += x_a - x_n for (iter = m_an.begin(); iter != m_an.end(); iter++) { uint64_t pair = iter->first; vector<int>& list = iter->second; int _a = pair >> 32; int _n = pair & 0xffffffff; caffe_gpu_sub(dim, bottom_data + _a * dim, bottom_data + _n * dim, temp_sub); caffe_gpu_axpy(dim, (Dtype)list.size(), temp_sub, bottom_diff + _n * dim); } // Scale gradient Dtype loss_weight = top[0]->cpu_diff()[0] * 2 / triplets_.size(); // 2 is from dL_dxa, dL_dxp, dL_dxn caffe_gpu_scal(num * dim, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe
984c7a679f1500f4d3534214ae122e9ee04ecde8.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/triplet_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void TripletLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CollectTripets(*bottom[1]); Dtype alpha = this->layer_param().triplet_loss_param().margin(); int num = bottom[0]->num(); int dim = bottom[0]->count() / num; const Dtype* bottom_data = bottom[0]->gpu_data(); Blob<Dtype> temp_sub_blob(1, dim, 1, 1); Dtype* temp_sub = temp_sub_blob.mutable_gpu_data(); map<uint64_t, Dtype> map_dist; Dtype loss = 0; for (int i = 0; i < triplets_.size(); i++) { Triplet<Dtype> &tri = triplets_[i]; const Dtype *x_a = bottom_data + tri.a * dim; const Dtype *x_p = bottom_data + tri.p * dim; const Dtype *x_n = bottom_data + tri.n * dim; Dtype dis_ap, dis_an; uint64_t pair_ap = ((uint64_t)tri.a) << 32 | (uint32_t)tri.p; uint64_t pair_an = ((uint64_t)tri.a) << 32 | (uint32_t)tri.n; if (map_dist.find(pair_ap) != map_dist.end()) dis_ap = map_dist[pair_ap]; else { caffe_gpu_sub(dim, x_a, x_p, temp_sub); caffe_gpu_dot(dim, temp_sub, temp_sub, &dis_ap); map_dist[pair_ap] = dis_ap; } if (map_dist.find(pair_an) != map_dist.end()) dis_an = map_dist[pair_an]; else { caffe_gpu_sub(dim, x_a, x_n, temp_sub); caffe_gpu_dot(dim, temp_sub, temp_sub, &dis_an); map_dist[pair_an] = dis_an; } tri.loss = std::max(Dtype(0), dis_ap - dis_an + alpha); loss += tri.loss; } top[0]->mutable_cpu_data()[0] = loss / triplets_.size(); } template <typename Dtype> void TripletLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { int num = bottom[0]->num(); int dim = bottom[0]->count() / num; const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(num * dim, Dtype(0), bottom_diff); Blob<Dtype> temp_sub_blob(1, dim, 1, 1); Dtype* temp_sub = temp_sub_blob.mutable_gpu_data(); map<uint64_t, vector<int> > m_np, m_pa, m_an; // group n & p to get list a // group p & a to get list n // group a & n to get list p for (int i = 0; i < triplets_.size(); i++) { Triplet<Dtype> &tri = triplets_[i]; if (tri.loss == 0) continue; uint64_t pair; pair = ((uint64_t)tri.n) << 32 | (uint32_t)tri.p; m_np[pair].push_back(tri.a); pair = ((uint64_t)tri.p) << 32 | (uint32_t)tri.a; m_pa[pair].push_back(tri.n); pair = ((uint64_t)tri.a) << 32 | (uint32_t)tri.n; m_an[pair].push_back(tri.p); } map<uint64_t, vector<int> >::iterator iter; // dx_a += x_n - x_p for (iter = m_np.begin(); iter != m_np.end(); iter++) { uint64_t pair = iter->first; vector<int>& list = iter->second; int _n = pair >> 32; int _p = pair & 0xffffffff; caffe_gpu_sub(dim, bottom_data + _n * dim, bottom_data + _p * dim, temp_sub); for (int i = 0; i < list.size(); i++) caffe_gpu_axpy(dim, (Dtype)1, temp_sub, bottom_diff + list[i] * dim); } // dx_p += x_p - x_a for (iter = m_pa.begin(); iter != m_pa.end(); iter++) { uint64_t pair = iter->first; vector<int>& list = iter->second; int _p = pair >> 32; int _a = pair & 0xffffffff; caffe_gpu_sub(dim, bottom_data + _p * dim, bottom_data + _a * dim, temp_sub); caffe_gpu_axpy(dim, (Dtype)list.size(), temp_sub, bottom_diff + _p * dim); } // dx_n += x_a - x_n for (iter = m_an.begin(); iter != m_an.end(); iter++) { uint64_t pair = iter->first; vector<int>& list = iter->second; int _a = pair >> 32; int _n = pair & 0xffffffff; caffe_gpu_sub(dim, bottom_data + _a * dim, bottom_data + _n * dim, temp_sub); caffe_gpu_axpy(dim, (Dtype)list.size(), temp_sub, bottom_diff + _n * dim); } // Scale gradient Dtype loss_weight = top[0]->cpu_diff()[0] * 2 / triplets_.size(); // 2 is from dL_dxa, dL_dxp, dL_dxn caffe_gpu_scal(num * dim, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(TripletLossLayer); } // namespace caffe
b18b55eecada4380003854e32ed485a6964f1f4a.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "ContinuousConvTransposeOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvTranspose.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TReal, class TIndex> class ContinuousConvTransposeOpKernelCUDA : public ContinuousConvTransposeOpKernel<TIndex> { public: explicit ContinuousConvTransposeOpKernelCUDA( OpKernelConstruction* construction) : ContinuousConvTransposeOpKernel<TIndex>(construction) { texture_alignment = GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& out_positions, const tensorflow::Tensor& out_importance, const tensorflow::Tensor& extents, const tensorflow::Tensor& offset, const tensorflow::Tensor& inp_positions, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_neighbors_importance_sum, const tensorflow::Tensor& inp_neighbors_row_splits, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const std::vector<int>& filter_dims, const bool individual_extents, const bool isotropic_extents, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& out_features) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvTransposeComputeFeaturesCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); temp_size = ::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation CConvTransposeComputeFeaturesCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, realtype, indextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DContinuousConvTranspose") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<realtype>("TReal") \ .TypeConstraint<indextype>("TIndex"), \ ContinuousConvTransposeOpKernelCUDA<feattype, outtype, realtype, \ indextype>); REG_KB(float, float, float, int32) #undef REG_KB
b18b55eecada4380003854e32ed485a6964f1f4a.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "ContinuousConvTransposeOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/impl/continuous_conv/ContinuousConvTranspose.cuh" using namespace open3d; using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TReal, class TIndex> class ContinuousConvTransposeOpKernelCUDA : public ContinuousConvTransposeOpKernel<TIndex> { public: explicit ContinuousConvTransposeOpKernelCUDA( OpKernelConstruction* construction) : ContinuousConvTransposeOpKernel<TIndex>(construction) { texture_alignment = GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& out_positions, const tensorflow::Tensor& out_importance, const tensorflow::Tensor& extents, const tensorflow::Tensor& offset, const tensorflow::Tensor& inp_positions, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_neighbors_importance_sum, const tensorflow::Tensor& inp_neighbors_row_splits, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const std::vector<int>& filter_dims, const bool individual_extents, const bool isotropic_extents, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& out_features) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size CConvTransposeComputeFeaturesCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); temp_size = std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation CConvTransposeComputeFeaturesCUDA<TFeat, TOut, TReal, TIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), out_positions.shape().dim_size(0), out_positions.flat<TReal>().data(), point_importances ? out_importance.flat<TFeat>().data() : nullptr, inp_positions.shape().dim_size(0), inp_positions.flat<TReal>().data(), inp_features.flat<TFeat>().data(), has_neighbors_importances ? inp_neighbors_importance_sum.flat<TFeat>().data() : nullptr, (int64_t*)inp_neighbors_row_splits.flat<int64>().data(), neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), extents.flat<TReal>().data(), offset.flat<TReal>().data(), this->interpolation, this->coordinate_mapping, this->align_corners, individual_extents, isotropic_extents, this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, realtype, indextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DContinuousConvTranspose") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<realtype>("TReal") \ .TypeConstraint<indextype>("TIndex"), \ ContinuousConvTransposeOpKernelCUDA<feattype, outtype, realtype, \ indextype>); REG_KB(float, float, float, int32) #undef REG_KB
04f4e1c21df38739f3544a05fa0e9df45a3c0ab0.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> // Calculate an estimated value of pi using n random Monte-Carlo draws __global__ void estimate_pi(int seed, int per_thread, hiprandState_t *state, unsigned int *result) { int id = threadIdx.x + blockIdx.x * blockDim.x; hiprand_init((seed << 20) + id, 0, 0, &state[id]); float r,x,y; int i; for(i=0;i<per_thread;i++) { x = hiprand_uniform(&state[id]); y = hiprand_uniform(&state[id]); r = sqrtf(x*x+y*y); result[id] += (r < 1) ? 1 : 0; } } // Sum all the results together. void serial_reduce(int n, unsigned int *in, unsigned long *out) { int i; *out = 0; for(i=0;i<n;i++) { *out += in[i]; } } int main(int argc, char * argv[]) { if (argc < 2) { printf("Please input number of sample points \n"); exit(-1); } float n; int threads=512; int blocks=2048; sscanf(argv[1], "%e", &n); n=n-((int) n%threads*blocks); //get a nice number of points to run on our GPU if (n < threads*blocks) n=threads*blocks; // run at least enough points to use the GPU int per_thread = n/(threads*blocks); unsigned int *pi; printf("%d %d %d\n", blocks, threads, per_thread); hipMallocManaged(&pi, threads*blocks*sizeof(int)); hiprandState_t* state; hipMalloc(&state, threads*blocks*sizeof(hiprandState_t)); hipLaunchKernelGGL(( estimate_pi), dim3(blocks), dim3(threads), 0, 0, time(NULL), per_thread, state, pi); unsigned long *sum_pi; hipMallocManaged(&sum_pi, sizeof(long)); hipDeviceSynchronize(); serial_reduce(threads*blocks, pi, sum_pi); printf("Pi estimate: %7.6f\n", (float) 4.*(*sum_pi)/n); hipFree(state); return 0; }
04f4e1c21df38739f3544a05fa0e9df45a3c0ab0.cu
#include <time.h> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> // Calculate an estimated value of pi using n random Monte-Carlo draws __global__ void estimate_pi(int seed, int per_thread, curandState *state, unsigned int *result) { int id = threadIdx.x + blockIdx.x * blockDim.x; curand_init((seed << 20) + id, 0, 0, &state[id]); float r,x,y; int i; for(i=0;i<per_thread;i++) { x = curand_uniform(&state[id]); y = curand_uniform(&state[id]); r = sqrtf(x*x+y*y); result[id] += (r < 1) ? 1 : 0; } } // Sum all the results together. void serial_reduce(int n, unsigned int *in, unsigned long *out) { int i; *out = 0; for(i=0;i<n;i++) { *out += in[i]; } } int main(int argc, char * argv[]) { if (argc < 2) { printf("Please input number of sample points \n"); exit(-1); } float n; int threads=512; int blocks=2048; sscanf(argv[1], "%e", &n); n=n-((int) n%threads*blocks); //get a nice number of points to run on our GPU if (n < threads*blocks) n=threads*blocks; // run at least enough points to use the GPU int per_thread = n/(threads*blocks); unsigned int *pi; printf("%d %d %d\n", blocks, threads, per_thread); cudaMallocManaged(&pi, threads*blocks*sizeof(int)); curandState_t* state; cudaMalloc(&state, threads*blocks*sizeof(curandState_t)); estimate_pi<<<blocks, threads>>>(time(NULL), per_thread, state, pi); unsigned long *sum_pi; cudaMallocManaged(&sum_pi, sizeof(long)); cudaDeviceSynchronize(); serial_reduce(threads*blocks, pi, sum_pi); printf("Pi estimate: %7.6f\n", (float) 4.*(*sum_pi)/n); cudaFree(state); return 0; }
47fbeebea4d5be2748d67be0f4c4c933897e617f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/hip/Resize.cuh> #include <c10/util/Exception.h> #include <THH/THHGeneral.h> #include <THH/THHThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <algorithm> #include <cstddef> #include <cmath> namespace at { namespace native { Tensor& eye_out_cuda(Tensor& result, int64_t n) { return at::native::eye_out_cuda(result, n, /*m=*/-1); } Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) { TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n); if(m < 0) { m = n; } result.resize_({n, m}); result.zero_(); int64_t sz = std::min<int64_t>(n, m); int64_t stride = result.stride(0) + result.stride(1); Tensor diag = result.as_strided({sz}, {stride}); diag.fill_(1); return result; } Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) { AT_ASSERT(options.device().type() == at::DeviceType::CUDA); TORCH_INTERNAL_ASSERT(impl::variable_excluded_from_dispatch()); TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned"); check_size_nonnegative(size); auto* allocator = at::cuda::getCUDADeviceAllocator(); int64_t nelements = prod_intlist(size); auto dtype = options.dtype(); auto storage_impl = c10::make_intrusive<StorageImpl>( dtype, nelements, allocator->allocate(nelements * dtype.itemsize()), allocator, /*resizeable=*/true); auto tensor = detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDATensorId); // Default TensorImpl has size [0] if (size.size() != 1 || size[0] != 0) { tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size); } TORCH_CHECK( !(options.has_memory_format() && optional_memory_format.has_value()), "Cannot set memory_format both in TensorOptions and explicit argument; please delete " "the redundant setter."); auto memory_format = options.memory_format_opt().value_or(optional_memory_format.value_or(MemoryFormat::Contiguous)); tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); return tensor; } Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) { auto t = at::native::empty_cuda({0}, options); at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride); return t; } Tensor& randperm_out_cuda(Tensor& result, int64_t n, c10::optional<Generator> generator) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); check_supported_max_int_with_precision(n, result); result.resize_({n}); if (n < 30000) { // For small inputs, we offload it to CPU instead. auto result_cpu = at::empty({n}, result.options().device(kCPU)); randperm_out(result_cpu, n, generator); return result.copy_(result_cpu); } #if 0 // This if condition should never be true because if n >= 30000 and the tensor has a Half type, // check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here // for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this. if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid. auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA))); return result.copy_(randperm_out_cuda(result_float, n, generator)); } #endif // Generate random values for the keys array AT_DISPATCH_ALL_TYPES( result.scalar_type(), "randperm_out_cuda", [&] { auto keys = at::empty(result.sizes(), result.options()).random_(generator); auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>()); // shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it // points to a new tensor. Tensor shuffled; thrust::device_ptr<scalar_t> shuffled_data; if (result.is_contiguous()) { shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>()); } else { shuffled = at::empty(n, result.options()); shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>()); } auto state = globalContext().getTHCState(); THCThrustAllocator thrustAlloc(state); auto policy = thrust::hip::par(thrustAlloc).on(at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); thrust::sequence(policy, shuffled_data, shuffled_data + n); // Use the sorted order of keys to rearrange the result array thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data); if (!result.is_contiguous()) { result.copy_(shuffled); } } ); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ namespace { // To find the max integer that does not exceed the root of an int64_t variable, // we could use a loop to test one bit at a time, which takes up to 31 // iterations. This would give the accurate result, but is relatively slow and // is an overkill for most cases where double's precision suffice. // // If we directly use sqrt to calculate the root, the conversion from int64_t // to double would lose 11 bits precision. // // The following solution uses sqrt directly for most cases, and would only // special handle it if there is indeed precision loss. __device__ inline int64_t resolve_root_int( int64_t b, int64_t cX4, int64_t x, int32_t sign) { int64_t bXb_cX4 = b*b - cX4; // potential precision loss could occur here when casting int64_t (63 bits // precision) to double (52 bits precision) double sr = ::sqrt((double)bXb_cX4); int64_t res = ::__double2ll_rd((-b + sign * sr)/2); // have to cast double to int64_t, otherwise it would only compare up to the // precision of a double variable, ignoring the precision loss if (bXb_cX4 != (int64_t) (sr * sr)) { // handle precision loss by using binary search int64_t llsr = ::__double2ll_rd(sr); // Use the following math to reduce search space. // Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss // let d = abs(bXb_cX4 - llsr * llsr), then we have: // z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d) // z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d) // Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)). // And the true value of row would also be with in range, // [res - sqrt(d), res + sqrt(d) + 1) // as the denominator would only reduce the precision penalty. int64_t diff = ::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr)))); // l never exceeds (could equal to) the target row index auto l = res > diff ? res - diff : 0; // r is always larger than the target row index auto r = res + diff + 1; // binary search for the correct answer x <<= 1; // the loop always compares with 2x, so do it once here while (l + 1 < r) { auto m = (l + r) >> 1; // for tril: // b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2 // for triu: // b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2 if (sign * (b + m) * m > x) { r = m; } else { l = m; } } res = l; } return res; } // f: the number of elements in the first row of the trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the tril as a top trapezoid stacked on a bottom rectangle. Assume x // corresponds to the coordinate (row, col) in the trapezoid, where the row and // the col both start from 0, then we have: // // (f + f + row - 1) * row / 2 <= x [1] // (f + f + row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (row + 2f - 1)row <= 2x // row^2 + (2f-1)row - 2x <= 0. [3] // // Based on inequality [3], we have the following coefficients for formula of // root: // a = 1 // b = 2f - 1 // c = -2x // There are two roots, and we should use the largest integer that does not // exceed the root on the right. Intuitively, it is because: // i) the valid solution range of row is between two roots, as it is <= 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 + (2f-1)row - 2x. // Therefore, the valid range of row lies in between the nadir point and // the larger root on the right. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b + sqrt(b^2 - 4c)) / 2) // col = x - (f + f + row - 1) * row / 2 __device__ inline void get_coordinate_in_tril_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = f - 1; auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x; row = resolve_root_int(b, cX4, x, 1); col = x - ((f + row - 1) * row >> 1); } // f: the number of elements in the first row of the bottom trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the triu as a top rectangle stacked on a bottom trapezoid, where the // trapezoid is upside down. Assume x corresponds to the coordinate (row, col) // in the bottom trapezoid, where the row and the col start from 0, then we // have: // // (f + f - row + 1) * row / 2 <= x [1] // (f + f - row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (-row + 2f + 1)row <= 2x // row^2 - (2f+1)row + 2x >= 0. [3] // // Based on inequality [3], we have the following coefficients for formula of // root: // a = 1 // b = -1 - 2f // c = 2x // There are two roots, and we should use the largest integer that does not // exceed the root on the left. Intuitively, it is because: // i) the valid solution range of row is outside of the two roots, as it is < // > 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 - (2f+1)row + 2x. // Therefore, the valid range of row lies to the left of the smaller root // on the left. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b - sqrt(b^2 - 4c)) / 2) // col = x - (f + f - row + 1) * row / 2 __device__ inline void get_coordinate_in_triu_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = -1 - f; auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x; row = resolve_root_int(b, cX4, x, -1); col = x - ((f - row + 1) * row >> 1) + row; } } // namespace template <typename scalar_t> __global__ #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif void tril_indices_kernel(scalar_t * tensor, int64_t row_offset, int64_t m_first_row, int64_t col, int64_t trapezoid_size, int64_t tril_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < tril_size) { int64_t r, c; if (linear_index < trapezoid_size) { // the coordinate is within the top trapezoid get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c); } else { // the coordinate falls in the bottom rectangle auto surplus = linear_index - trapezoid_size; // add the height of trapezoid: m_last_row (col) - m_first_row + 1 r = surplus / col + col - m_first_row + 1; c = surplus % col; } r += row_offset; tensor[linear_index] = r; tensor[linear_index + tril_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor tril_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto tril_size = get_tril_size(row, col, offset); auto tensor = empty_cuda({2, tril_size}, options); if (tril_size > 0) { auto m_first_row = offset > 0 ? std::min<int64_t>(col, 1 + offset) : // upper bounded by col row + offset > 0; // either 0 or 1 auto trapezoid_row_offset = std::max<int64_t>(0, -offset); auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1; int64_t rectangle_size = 0; if (rectangle_row_offset < row) { rectangle_size = (row - rectangle_row_offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using tril_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] { hipLaunchKernelGGL(( tril_indices_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), tensor.data_ptr<scalar_t>(), trapezoid_row_offset, m_first_row, col, tril_size - rectangle_size, tril_size); }); } return tensor; } template <typename scalar_t> __global__ void triu_indices_kernel(scalar_t * tensor, int64_t col_offset, int64_t m_first_row, int64_t col, int64_t rectangle_size, int64_t triu_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < triu_size) { int64_t r, c; if (linear_index < rectangle_size) { // the coordinate is within the top rectangle r = linear_index / col; c = linear_index % col; } else { // the coordinate falls in the bottom trapezoid get_coordinate_in_triu_trapezoid( m_first_row, linear_index - rectangle_size, r, c); r += rectangle_size / col; } c += col_offset; tensor[linear_index] = r; tensor[linear_index + triu_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor triu_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto triu_size = row * col - get_tril_size(row, col, offset - 1); auto tensor = empty_cuda({2, triu_size}, options); if (triu_size > 0) { // # of triu elements in the first row auto m_first_row = offset > 0 ? std::max<int64_t>(col - offset, 0) : // upper bounded by col col; // size of the top rectangle int64_t rectangle_size = 0; if (offset < 0) { rectangle_size = std::min<int64_t>(row, -offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using triu_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] { hipLaunchKernelGGL(( triu_indices_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), tensor.data_ptr<scalar_t>(), std::max<int64_t>(0, offset), m_first_row, col, rectangle_size, triu_size); }); } return tensor; } }} // namespace at::native
47fbeebea4d5be2748d67be0f4c4c933897e617f.cu
#include <ATen/ATen.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/cuda/Resize.cuh> #include <c10/util/Exception.h> #include <THC/THCGeneral.h> #include <THC/THCThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <algorithm> #include <cstddef> #include <cmath> namespace at { namespace native { Tensor& eye_out_cuda(Tensor& result, int64_t n) { return at::native::eye_out_cuda(result, n, /*m=*/-1); } Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) { TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n); if(m < 0) { m = n; } result.resize_({n, m}); result.zero_(); int64_t sz = std::min<int64_t>(n, m); int64_t stride = result.stride(0) + result.stride(1); Tensor diag = result.as_strided({sz}, {stride}); diag.fill_(1); return result; } Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) { AT_ASSERT(options.device().type() == at::DeviceType::CUDA); TORCH_INTERNAL_ASSERT(impl::variable_excluded_from_dispatch()); TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned"); check_size_nonnegative(size); auto* allocator = at::cuda::getCUDADeviceAllocator(); int64_t nelements = prod_intlist(size); auto dtype = options.dtype(); auto storage_impl = c10::make_intrusive<StorageImpl>( dtype, nelements, allocator->allocate(nelements * dtype.itemsize()), allocator, /*resizeable=*/true); auto tensor = detail::make_tensor<TensorImpl>(storage_impl, DispatchKey::CUDATensorId); // Default TensorImpl has size [0] if (size.size() != 1 || size[0] != 0) { tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size); } TORCH_CHECK( !(options.has_memory_format() && optional_memory_format.has_value()), "Cannot set memory_format both in TensorOptions and explicit argument; please delete " "the redundant setter."); auto memory_format = options.memory_format_opt().value_or(optional_memory_format.value_or(MemoryFormat::Contiguous)); tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); return tensor; } Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) { auto t = at::native::empty_cuda({0}, options); at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride); return t; } Tensor& randperm_out_cuda(Tensor& result, int64_t n, c10::optional<Generator> generator) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); check_supported_max_int_with_precision(n, result); result.resize_({n}); if (n < 30000) { // For small inputs, we offload it to CPU instead. auto result_cpu = at::empty({n}, result.options().device(kCPU)); randperm_out(result_cpu, n, generator); return result.copy_(result_cpu); } #if 0 // This if condition should never be true because if n >= 30000 and the tensor has a Half type, // check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here // for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this. if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid. auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA))); return result.copy_(randperm_out_cuda(result_float, n, generator)); } #endif // Generate random values for the keys array AT_DISPATCH_ALL_TYPES( result.scalar_type(), "randperm_out_cuda", [&] { auto keys = at::empty(result.sizes(), result.options()).random_(generator); auto keys_data = thrust::device_ptr<scalar_t>(keys.data_ptr<scalar_t>()); // shuffled_data points to the underlying data of the output tensor if the tensor is contiguous; otherwise it // points to a new tensor. Tensor shuffled; thrust::device_ptr<scalar_t> shuffled_data; if (result.is_contiguous()) { shuffled_data = thrust::device_ptr<scalar_t>(result.data_ptr<scalar_t>()); } else { shuffled = at::empty(n, result.options()); shuffled_data = thrust::device_ptr<scalar_t>(shuffled.data_ptr<scalar_t>()); } auto state = globalContext().getTHCState(); THCThrustAllocator thrustAlloc(state); auto policy = thrust::cuda::par(thrustAlloc).on(at::cuda::getCurrentCUDAStream()); thrust::sequence(policy, shuffled_data, shuffled_data + n); // Use the sorted order of keys to rearrange the result array thrust::sort_by_key(policy, keys_data, keys_data + n, shuffled_data); if (!result.is_contiguous()) { result.copy_(shuffled); } } ); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ namespace { // To find the max integer that does not exceed the root of an int64_t variable, // we could use a loop to test one bit at a time, which takes up to 31 // iterations. This would give the accurate result, but is relatively slow and // is an overkill for most cases where double's precision suffice. // // If we directly use sqrt to calculate the root, the conversion from int64_t // to double would lose 11 bits precision. // // The following solution uses sqrt directly for most cases, and would only // special handle it if there is indeed precision loss. __device__ inline int64_t resolve_root_int( int64_t b, int64_t cX4, int64_t x, int32_t sign) { int64_t bXb_cX4 = b*b - cX4; // potential precision loss could occur here when casting int64_t (63 bits // precision) to double (52 bits precision) double sr = ::sqrt((double)bXb_cX4); int64_t res = ::__double2ll_rd((-b + sign * sr)/2); // have to cast double to int64_t, otherwise it would only compare up to the // precision of a double variable, ignoring the precision loss if (bXb_cX4 != (int64_t) (sr * sr)) { // handle precision loss by using binary search int64_t llsr = ::__double2ll_rd(sr); // Use the following math to reduce search space. // Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss // let d = abs(bXb_cX4 - llsr * llsr), then we have: // z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d) // z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d) // Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)). // And the true value of row would also be with in range, // [res - sqrt(d), res + sqrt(d) + 1) // as the denominator would only reduce the precision penalty. int64_t diff = ::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr)))); // l never exceeds (could equal to) the target row index auto l = res > diff ? res - diff : 0; // r is always larger than the target row index auto r = res + diff + 1; // binary search for the correct answer x <<= 1; // the loop always compares with 2x, so do it once here while (l + 1 < r) { auto m = (l + r) >> 1; // for tril: // b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2 // for triu: // b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2 if (sign * (b + m) * m > x) { r = m; } else { l = m; } } res = l; } return res; } // f: the number of elements in the first row of the trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the tril as a top trapezoid stacked on a bottom rectangle. Assume x // corresponds to the coordinate (row, col) in the trapezoid, where the row and // the col both start from 0, then we have: // // (f + f + row - 1) * row / 2 <= x [1] // (f + f + row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (row + 2f - 1)row <= 2x // row^2 + (2f-1)row - 2x <= 0. [3] // // Based on inequality [3], we have the following coefficients for formula of // root: // a = 1 // b = 2f - 1 // c = -2x // There are two roots, and we should use the largest integer that does not // exceed the root on the right. Intuitively, it is because: // i) the valid solution range of row is between two roots, as it is <= 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 + (2f-1)row - 2x. // Therefore, the valid range of row lies in between the nadir point and // the larger root on the right. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b + sqrt(b^2 - 4c)) / 2) // col = x - (f + f + row - 1) * row / 2 __device__ inline void get_coordinate_in_tril_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = f - 1; auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x; row = resolve_root_int(b, cX4, x, 1); col = x - ((f + row - 1) * row >> 1); } // f: the number of elements in the first row of the bottom trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the triu as a top rectangle stacked on a bottom trapezoid, where the // trapezoid is upside down. Assume x corresponds to the coordinate (row, col) // in the bottom trapezoid, where the row and the col start from 0, then we // have: // // (f + f - row + 1) * row / 2 <= x [1] // (f + f - row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (-row + 2f + 1)row <= 2x // row^2 - (2f+1)row + 2x >= 0. [3] // // Based on inequality [3], we have the following coefficients for formula of // root: // a = 1 // b = -1 - 2f // c = 2x // There are two roots, and we should use the largest integer that does not // exceed the root on the left. Intuitively, it is because: // i) the valid solution range of row is outside of the two roots, as it is < // > 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 - (2f+1)row + 2x. // Therefore, the valid range of row lies to the left of the smaller root // on the left. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b - sqrt(b^2 - 4c)) / 2) // col = x - (f + f - row + 1) * row / 2 __device__ inline void get_coordinate_in_triu_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = -1 - f; auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x; row = resolve_root_int(b, cX4, x, -1); col = x - ((f - row + 1) * row >> 1) + row; } } // namespace template <typename scalar_t> __global__ #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif void tril_indices_kernel(scalar_t * tensor, int64_t row_offset, int64_t m_first_row, int64_t col, int64_t trapezoid_size, int64_t tril_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < tril_size) { int64_t r, c; if (linear_index < trapezoid_size) { // the coordinate is within the top trapezoid get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c); } else { // the coordinate falls in the bottom rectangle auto surplus = linear_index - trapezoid_size; // add the height of trapezoid: m_last_row (col) - m_first_row + 1 r = surplus / col + col - m_first_row + 1; c = surplus % col; } r += row_offset; tensor[linear_index] = r; tensor[linear_index + tril_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor tril_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto tril_size = get_tril_size(row, col, offset); auto tensor = empty_cuda({2, tril_size}, options); if (tril_size > 0) { auto m_first_row = offset > 0 ? std::min<int64_t>(col, 1 + offset) : // upper bounded by col row + offset > 0; // either 0 or 1 auto trapezoid_row_offset = std::max<int64_t>(0, -offset); auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1; int64_t rectangle_size = 0; if (rectangle_row_offset < row) { rectangle_size = (row - rectangle_row_offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using tril_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] { tril_indices_kernel<<< dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( tensor.data_ptr<scalar_t>(), trapezoid_row_offset, m_first_row, col, tril_size - rectangle_size, tril_size); }); } return tensor; } template <typename scalar_t> __global__ void triu_indices_kernel(scalar_t * tensor, int64_t col_offset, int64_t m_first_row, int64_t col, int64_t rectangle_size, int64_t triu_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < triu_size) { int64_t r, c; if (linear_index < rectangle_size) { // the coordinate is within the top rectangle r = linear_index / col; c = linear_index % col; } else { // the coordinate falls in the bottom trapezoid get_coordinate_in_triu_trapezoid( m_first_row, linear_index - rectangle_size, r, c); r += rectangle_size / col; } c += col_offset; tensor[linear_index] = r; tensor[linear_index + triu_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor triu_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto triu_size = row * col - get_tril_size(row, col, offset - 1); auto tensor = empty_cuda({2, triu_size}, options); if (triu_size > 0) { // # of triu elements in the first row auto m_first_row = offset > 0 ? std::max<int64_t>(col - offset, 0) : // upper bounded by col col; // size of the top rectangle int64_t rectangle_size = 0; if (offset < 0) { rectangle_size = std::min<int64_t>(row, -offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using triu_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] { triu_indices_kernel<<< dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( tensor.data_ptr<scalar_t>(), std::max<int64_t>(0, offset), m_first_row, col, rectangle_size, triu_size); }); } return tensor; } }} // namespace at::native
1a7216b79e804be5c07965ee6b1d37826f611b7f.hip
// !!! This is a file automatically generated by hipify!!! #include "CUDAFCM.h" #include <iostream> #include <cmath> #include <omp.h> #include <stdio.h> #include <hiprand/hiprand.h> #include <hip/hip_runtime.h> #include <vector> /* Code: Fuzzy C-means Developer: Dennis Carnelossi Furlaneto License: MIT */ namespace CUDAFCM { #define N_THREADS 256 #define CUDA_CALL(x) do { if((x)!=hipSuccess) { \ printf("Error at %s:%d - ErroType:%s\n",__FILE__,__LINE__, hipGetErrorString(x));\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CheckCudaErrors(){ \ hipError_t errorCode = hipGetLastError();\ if(errorCode != hipSuccess)\ printf("Check Cuda Error at %s, line %d: %s\n", __FILE__, __LINE__, hipGetErrorString(errorCode));} void PrintMatrix(int nDim, const char* nameRow, std::vector<float> & matrix) { int nRows = matrix.size()/nDim; for(int i=0; i<nRows; ++i) { printf("%s %d: ",nameRow, i); for(int j=0; j<nDim; ++j) { printf("%.4f ", matrix[i*nDim + j]); } printf("\n"); } } __global__ void CudaCopyDataDeviceToDevice(volatile float * dst, const float * src, int nElems) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nElems) dst[idx] = src[idx]; } void CopyDataDeviceToDevice(float * dst, const float * src, int nPoints) { int blockSize = nPoints < N_THREADS ? nPoints : N_THREADS; int gridSize = ceil(((float)nPoints)/blockSize) < 1 ? 1 : ceil(((float)nPoints)/blockSize); hipLaunchKernelGGL(( CudaCopyDataDeviceToDevice), dim3(gridSize), dim3(blockSize), 0, 0, dst, src, nPoints); hipDeviceSynchronize(); } /*RunClustering: This function receives a data vector with all the points with nDims dimensions and returns a membership vector with nPoints*nClusters positions. The membership values are in the following order: points -> membership per cluster. Inputs: nDims, data, nCentroids, fuzziness, errorThreshold Outputs: centroids Returns: membership */ std::vector<float> RunClustering(int nDims, const std::vector<float> & data, float fuzziness, float errorThreshold, int nCentroids, std::vector<float> & centroids) { std::vector<float> currentMembership; std::vector<float> nextMembership; int nPoints = data.size() / nDims; int nMembershipSize = nPoints * nCentroids; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float miliseconds; hipEventRecord(start); float* deviceNextMembership; CudaInitializeMembership(nPoints, nCentroids, &deviceNextMembership, nextMembership); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&miliseconds, start, stop); printf("CudaInitializeMembership: %f ms\n", miliseconds); CheckCudaErrors(); float * deviceCentroids; float * deviceCurrentMembership; float * deviceDataPoints; float * deviceSquaredError; hipMalloc((void**)&deviceCentroids, nCentroids * nDims * sizeof(float)); hipMemset(deviceCentroids, 0, nCentroids * nDims * sizeof(float)); CheckCudaErrors(); hipMalloc((void**)&deviceCurrentMembership, nPoints * nCentroids * sizeof(float)); hipMalloc((void**)&deviceDataPoints, nPoints * nDims * sizeof(float)); hipMalloc((void**)&deviceSquaredError, sizeof(float)); hipMemcpy(deviceDataPoints, &data[0], nPoints * nDims * sizeof(float), hipMemcpyHostToDevice); CheckCudaErrors(); bool notConverged = !false; do { CopyDataDeviceToDevice( deviceCurrentMembership, deviceNextMembership, nPoints*nCentroids); //hipMemcpy(deviceCurrentMembership, deviceNextMembership, nCentroids * nPoints * sizeof(float), hipMemcpyDeviceToDevice); CheckCudaErrors(); CUDAFCM::ComputeCentroids(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceCurrentMembership, deviceCentroids); CheckCudaErrors(); CUDAFCM::ComputeMembership(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceNextMembership, deviceCentroids); CheckCudaErrors(); notConverged = IsMembershipDiffGreater(deviceCurrentMembership, deviceNextMembership, nMembershipSize, errorThreshold, deviceSquaredError); } while(notConverged); CUDAFCM::ComputeCentroids(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceNextMembership, deviceCentroids); CheckCudaErrors(); centroids.resize(nCentroids*nDims); nextMembership.resize(nPoints*nCentroids); hipMemcpy(&centroids[0], deviceCentroids, nCentroids * nDims * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&nextMembership[0], deviceCurrentMembership, nPoints * nCentroids * sizeof(float), hipMemcpyDeviceToHost); CheckCudaErrors(); hipFree(deviceNextMembership); hipFree(deviceCurrentMembership); hipFree(deviceDataPoints); hipFree(deviceCentroids); hipFree(deviceSquaredError); CheckCudaErrors(); return nextMembership; } __global__ void NormalizePointsMembership(float* values, int nPoints, int nClusters) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nPoints) { int c; float sum=0.0; for( c = 0; c<nClusters; ++c) sum = sum + values[c*nPoints + idx]; for( c = 0; c<nClusters; ++c) values[c*nPoints + idx] = values[c*nPoints + idx]/sum; } __syncthreads(); } int CudaInitializeMembership(int nPoints, int nClusters, float** deviceData, std::vector<float> & hostData) { hiprandGenerator_t gen; int n = nPoints * nClusters; hostData.resize(n); hipMalloc((void**)deviceData, n * sizeof(float)); CheckCudaErrors(); hiprandCreateGenerator(&gen,HIPRAND_RNG_PSEUDO_DEFAULT); CheckCudaErrors(); hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL); CheckCudaErrors(); hiprandGenerateUniform(gen, *deviceData, n); CheckCudaErrors(); int blockSize = n < N_THREADS ? n : N_THREADS; int gridSize = ceil(float(n)/(blockSize*nClusters)) < 1 ? 1 : ceil(float(n)/(blockSize*nClusters)); hipLaunchKernelGGL(( NormalizePointsMembership), dim3(gridSize), dim3(blockSize), 0, 0, *deviceData, nPoints, nClusters); CheckCudaErrors(); hipMemcpy(&hostData[0], *deviceData, n * sizeof(float), hipMemcpyDeviceToHost); hiprandDestroyGenerator(gen); CheckCudaErrors(); //CUDA_CALL(hipFree(deviceData)); return EXIT_SUCCESS; } __global__ void CudaIsMembershipDiffGreater(float * currentMembership, float * nextMembership, int size, float error, float * totalSquaredDiff) { int idx = threadIdx.x; int nThreads = blockDim.x; extern __shared__ float squaredDiffs[]; squaredDiffs[idx] = 0.0; int dataBlockSize; if(size < nThreads) dataBlockSize = 1; else dataBlockSize = ceil(((float)size) / nThreads); int startPos = idx*dataBlockSize; int endPos = startPos + dataBlockSize; if (endPos >= size) endPos = size; if(startPos < size) { int i; float diff; float calcError = 0.0; for(i=startPos; i<endPos; ++i) { diff = (nextMembership[i] - currentMembership[i]); calcError += diff*diff; } squaredDiffs[idx] = calcError; } __syncthreads(); float auxTotalSquareDiff = 0; if (idx == 0) { for(int i = 0; i<nThreads;++i) auxTotalSquareDiff += squaredDiffs[i]; totalSquaredDiff[0] = sqrt(auxTotalSquareDiff); printf("%f\n", totalSquaredDiff[0]); } } /*IsMembershipDiffGreater: Check if the absolute difference between nextMembership[u] and currentMembership[u] are larger than a given error Inputs: currentMembership, nextMembership, error Outputs: True if the absolute difference between nextMembership[u] and currentMembership[u] are larger than a given error */ bool IsMembershipDiffGreater(float * deviceCurrentMembership, float * deviceNextMembership, int size, float error, float * deviceSquaredError) { float miliseconds; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); int blockSize = size < N_THREADS ? size : N_THREADS; hipLaunchKernelGGL(( CudaIsMembershipDiffGreater), dim3(1), dim3(blockSize), blockSize*sizeof(float), 0, deviceCurrentMembership,deviceNextMembership, size, error, deviceSquaredError); CheckCudaErrors(); float squaredError[1]; hipMemcpy(&squaredError[0], deviceSquaredError, sizeof(float), hipMemcpyDeviceToHost); CheckCudaErrors(); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&miliseconds, start, stop); printf("MembershipDiffGreater: %f ms\n", miliseconds); if(error >= squaredError[0]) return false; return true; } void TestComputeSquareError() { std::vector<float> data1; data1.push_back(0.5); data1.push_back(0.0); data1.push_back(1.0); data1.push_back(0.0); data1.push_back(0.0); data1.push_back(1.0); std::vector<float> data2; data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); int size = data1.size(); float * dData1; float * dData2; float * deviceSquaredError; hipMalloc((void **)&deviceSquaredError, sizeof(float)); hipMalloc((void **)&dData1, size * sizeof(float)); hipMalloc((void **)&dData2, size * sizeof(float)); hipMemcpy(dData1, &data1[0], size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dData2, &data2[0], size * sizeof(float), hipMemcpyHostToDevice); float error = 1; IsMembershipDiffGreater(dData1, dData2, size, error, deviceSquaredError); hipFree(dData1); hipFree(dData2); hipFree(deviceSquaredError); } __global__ void CudaComputeCentroidsSingleBlock(float fuzziness, int nPoints, int idxCentroid, int nDims, float* data, float* membership, float* centroids) { int idx = threadIdx.x; int nThreads = blockDim.x; extern __shared__ float numerator[]; __shared__ float denominator[N_THREADS]; int dataBlockSize; if(nPoints < nThreads) dataBlockSize = 1; else dataBlockSize = ceil(((float)nPoints) / nThreads); int startPos = idx*dataBlockSize; int endPos = startPos + dataBlockSize; if(startPos < nPoints && endPos > nPoints) endPos = nPoints; if(startPos < nPoints) { for(int i=0; i<nDims; ++i) numerator[idx*nDims + i] = 0.0; denominator[idx] = 0.0; register float weightedMembership; for (int p = startPos; p < endPos; ++p) { weightedMembership = powf(membership[idxCentroid*nPoints + p], 2); for (int d = 0; d < nDims; d++) numerator[idx*nDims + d] += weightedMembership*data[p*nDims + d]; denominator[idx] += weightedMembership; } } __syncthreads(); if (idx == 0) { int i, j; for(i=1;i<nThreads;++i) denominator[0] += denominator[i]; for(i=nDims; i<nThreads*nDims; i+=nDims) for(j=0; j<nDims;j++) numerator[j] += numerator[i+j]; for (int d = 0; d < nDims; d++) centroids[idxCentroid*nDims + d] = numerator[d]/denominator[0]; } } __global__ void CudaComputePartialCentroid(float fuzziness, int nPoints, int idxCentroid, int nDims, float* data, float* membership, float* centroids, int dataBlockSize, float *globalNumerator, float * globalDenominator) { int tGlobalIdx = blockDim.x*blockIdx.x + threadIdx.x; extern __shared__ float numerator[]; __shared__ float denominator[N_THREADS]; for(int i=0; i<nDims; ++i) numerator[threadIdx.x*nDims + i] = 0.0; denominator[threadIdx.x] = 0.0; if(tGlobalIdx == 0) { for(int i=0; i<nDims; ++i) globalNumerator[i] = 0.0; globalDenominator[0] = 0.0; } int startPos = tGlobalIdx*dataBlockSize; int endPos = startPos + dataBlockSize; if(startPos < nPoints && endPos > nPoints) endPos = nPoints; if(startPos < nPoints) { register float weightedMembership; for (int p = startPos; p < endPos; ++p) { weightedMembership = powf(membership[idxCentroid*nPoints + p], 2); for (int d = 0; d < nDims; d++) numerator[threadIdx.x*nDims + d] += weightedMembership*data[p*nDims + d]; denominator[threadIdx.x] += weightedMembership; } } __syncthreads(); if (threadIdx.x == 0) { for(int i=1;i<blockDim.x;++i) denominator[0] += denominator[i]; for(int i=nDims; i<blockDim.x*nDims; ++i) numerator[i%nDims] += numerator[i]; atomicAdd(&globalDenominator[0], (float)denominator[0]); for(int i=0; i<nDims;i++) atomicAdd(&globalNumerator[i], (float)numerator[i]); } } __global__ void CudaComputeOnlyCentroids(int idxCentroid, int nDims, float* centroids, float *globalNumerator, float * globalDenominator) { int tGlobalIdx = blockDim.x*blockIdx.x + threadIdx.x; if(tGlobalIdx == 0) { for (int d = 0; d < nDims; d++) centroids[idxCentroid*nDims + d] = globalNumerator[d]/globalDenominator[0]; } } void ComputeCentroids(float fuzziness, int nPoints, int nCentroids, int nDims, float* data, float * membership, float * centroids) { float miliseconds; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); int dataBlockSize, blockSize, gridSize; if(nPoints < N_THREADS) { gridSize = 1; dataBlockSize = 1; blockSize = nPoints; } else { gridSize = 10; blockSize = N_THREADS; dataBlockSize = ceil((float)nPoints / (blockSize*gridSize)); } int numeratorSize = blockSize*nDims; float * deviceNumerator; float * deviceDenominator; for(int i=0; i<nCentroids; ++i) { hipMalloc((void **)&deviceNumerator, nDims * sizeof(float)); hipMalloc((void **)&deviceDenominator, sizeof(float)); hipLaunchKernelGGL(( CudaComputePartialCentroid), dim3(gridSize), dim3(blockSize), numeratorSize*sizeof(float), 0, fuzziness, nPoints, i, nDims, data, membership, centroids, dataBlockSize, deviceNumerator, deviceDenominator); hipDeviceSynchronize(); hipLaunchKernelGGL(( CudaComputeOnlyCentroids), dim3(1), dim3(1), 0, 0, i, nDims, centroids, deviceNumerator, deviceDenominator); hipFree(deviceNumerator); hipFree(deviceDenominator); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&miliseconds, start, stop); printf("ComputeCentroids: %f ms\n", miliseconds); CheckCudaErrors(); } void TestComputeCentroid() { int fuzziness = 2; int nDims = 2; std::vector<float> data; data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(5.0); data.push_back(5.0); data.push_back(5.0); data.push_back(6.0); data.push_back(6.0); data.push_back(7.0); std::vector<float> centroids; centroids.push_back(0);centroids.push_back(0); centroids.push_back(0);centroids.push_back(0); std::vector<float> membership; membership.push_back(0.9966);membership.push_back(0.9897);membership.push_back(0.99); membership.push_back(0.0250);membership.push_back(0.0020);membership.push_back(0.0185); membership.push_back(0.0034);membership.push_back(0.0103);membership.push_back(0.0100); membership.push_back(0.9750);membership.push_back(0.9980);membership.push_back(0.9815); int nPoints = data.size()/nDims; int nCentroids = centroids.size()/nDims; float * deviceCentroids; float * deviceCurrentMembership; float * deviceDataPoints; hipMalloc((void **)&deviceCentroids, nCentroids * nDims * sizeof(float)); hipMalloc((void **)&deviceCurrentMembership, nPoints * nCentroids * sizeof(float)); hipMalloc((void **)&deviceDataPoints, nPoints * nDims * sizeof(float)); hipMemcpy(deviceCentroids, &centroids[0], nCentroids * nDims * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceDataPoints, &data[0], nPoints * nDims * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceCurrentMembership, &membership[0], nPoints * nCentroids * sizeof(float), hipMemcpyHostToDevice); ComputeCentroids(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceCurrentMembership, deviceCentroids); hipMemcpy(&centroids[0], deviceCentroids, nCentroids * nDims * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&data[0], deviceDataPoints, nPoints * nDims * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&membership[0], deviceCurrentMembership, nPoints * nCentroids * sizeof(float), hipMemcpyDeviceToHost); PrintMatrix(nDims, "Points", data); PrintMatrix(nDims, "Centroids", centroids); hipFree(deviceCurrentMembership); hipFree(deviceDataPoints); hipFree(deviceCentroids); printf("ComputeCentroids: Finished!\n"); } __device__ float CudaComputeDistance(int nDims, int p1Idx, float * p1s, int p2Idx, float * p2s) { float distance = 0; float pointDiff = 0; int p1IdxStart = p1Idx*nDims; int p2IdxStart = p2Idx*nDims; for (int d = 0; d < nDims; d++) { pointDiff = p1s[p1IdxStart + d] - p2s[p2IdxStart + d]; distance += pointDiff * pointDiff; } return sqrt(distance); } __global__ void CudaComputeMembership(float fuzziness, int nDims, float * data, int nPoints, float * centroids, int nCentroids, float * membership) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nPoints) { register float denominator, distPC, divDistPC1DistPC2; extern __shared__ float sharedData[]; int c1, c2; for(int i=0; i<nDims; ++i) sharedData[threadIdx.x*nDims + i] = data[idx*nDims+i]; for (c1 = 0; c1 < nCentroids; ++c1) { denominator = 0.0; distPC = CudaComputeDistance(nDims, threadIdx.x, sharedData, c1, centroids); for (c2 = 0; c2 < nCentroids; ++c2) { divDistPC1DistPC2 = distPC / CudaComputeDistance(nDims, threadIdx.x, sharedData, c2, centroids); denominator += divDistPC1DistPC2 * divDistPC1DistPC2; } membership[idx + c1*nPoints] = 1./denominator; } } __syncthreads(); } void ComputeMembership(float fuzziness, int nPoints, int nCentroids, int nDims, float* data, float * membership, float * centroids) { float miliseconds; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); int blockSize = nPoints < N_THREADS ? nPoints : N_THREADS; int gridSize = ceil(((float)nPoints)/blockSize) < 1 ? 1 : ceil(((float)nPoints)/blockSize); hipLaunchKernelGGL(( CudaComputeMembership), dim3(gridSize), dim3(blockSize), blockSize*nDims*sizeof(float), 0, fuzziness, nDims, data, nPoints, centroids, nCentroids, membership); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&miliseconds, start, stop); printf("ComputeMembership: %f ms\n", miliseconds); CheckCudaErrors(); } void TestComputeMembership() { int fuzziness = 2; int nDims = 2; std::vector<float> data; data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(5.0); data.push_back(5.0); data.push_back(5.0); data.push_back(6.0); data.push_back(6.0); data.push_back(7.0); std::vector<float> centroids; centroids.push_back(0.3334);centroids.push_back(0.3337); centroids.push_back(5.3307);centroids.push_back(6.0040); std::vector<float> membership; membership.push_back(0);membership.push_back(0);membership.push_back(0); membership.push_back(0);membership.push_back(0);membership.push_back(0); membership.push_back(0);membership.push_back(0);membership.push_back(0); membership.push_back(0);membership.push_back(0);membership.push_back(0); int nPoints = data.size()/nDims; int nCentroids = centroids.size()/nDims; float * deviceCentroids; float * deviceCurrentMembership; float * deviceDataPoints; hipMalloc((void **)&deviceCentroids, nCentroids * nDims * sizeof(float)); hipMalloc((void **)&deviceCurrentMembership, nPoints * nCentroids * sizeof(float)); hipMalloc((void **)&deviceDataPoints, nPoints * nDims * sizeof(float)); hipMemcpy(deviceCentroids, &centroids[0], nCentroids * nDims * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceDataPoints, &data[0], nPoints * nDims * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceCurrentMembership, &membership[0], nPoints * nCentroids * sizeof(float), hipMemcpyHostToDevice); ComputeMembership(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceCurrentMembership, deviceCentroids); hipMemcpy(&centroids[0], deviceCentroids, nCentroids * nDims * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&data[0], deviceDataPoints, nPoints * nDims * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&membership[0], deviceCurrentMembership, nPoints * nCentroids * sizeof(float), hipMemcpyDeviceToHost); PrintMatrix(nDims, "Point", data); PrintMatrix(nDims, "Centroid", centroids); PrintMatrix(nPoints, "Membership", membership); hipFree(deviceCurrentMembership); hipFree(deviceDataPoints); hipFree(deviceCentroids); printf("ComputeMembership: Finished!\n"); } }
1a7216b79e804be5c07965ee6b1d37826f611b7f.cu
#include "CUDAFCM.h" #include <iostream> #include <cmath> #include <omp.h> #include <stdio.h> #include <curand.h> #include <cuda.h> #include <vector> /* Code: Fuzzy C-means Developer: Dennis Carnelossi Furlaneto License: MIT */ namespace CUDAFCM { #define N_THREADS 256 #define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d - ErroType:%s\n",__FILE__,__LINE__, cudaGetErrorString(x));\ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) #define CheckCudaErrors(){ \ cudaError_t errorCode = cudaGetLastError();\ if(errorCode != cudaSuccess)\ printf("Check Cuda Error at %s, line %d: %s\n", __FILE__, __LINE__, cudaGetErrorString(errorCode));} void PrintMatrix(int nDim, const char* nameRow, std::vector<float> & matrix) { int nRows = matrix.size()/nDim; for(int i=0; i<nRows; ++i) { printf("%s %d: ",nameRow, i); for(int j=0; j<nDim; ++j) { printf("%.4f ", matrix[i*nDim + j]); } printf("\n"); } } __global__ void CudaCopyDataDeviceToDevice(volatile float * dst, const float * src, int nElems) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nElems) dst[idx] = src[idx]; } void CopyDataDeviceToDevice(float * dst, const float * src, int nPoints) { int blockSize = nPoints < N_THREADS ? nPoints : N_THREADS; int gridSize = ceil(((float)nPoints)/blockSize) < 1 ? 1 : ceil(((float)nPoints)/blockSize); CudaCopyDataDeviceToDevice<<<gridSize, blockSize>>>(dst, src, nPoints); cudaDeviceSynchronize(); } /*RunClustering: This function receives a data vector with all the points with nDims dimensions and returns a membership vector with nPoints*nClusters positions. The membership values are in the following order: points -> membership per cluster. Inputs: nDims, data, nCentroids, fuzziness, errorThreshold Outputs: centroids Returns: membership */ std::vector<float> RunClustering(int nDims, const std::vector<float> & data, float fuzziness, float errorThreshold, int nCentroids, std::vector<float> & centroids) { std::vector<float> currentMembership; std::vector<float> nextMembership; int nPoints = data.size() / nDims; int nMembershipSize = nPoints * nCentroids; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float miliseconds; cudaEventRecord(start); float* deviceNextMembership; CudaInitializeMembership(nPoints, nCentroids, &deviceNextMembership, nextMembership); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&miliseconds, start, stop); printf("CudaInitializeMembership: %f ms\n", miliseconds); CheckCudaErrors(); float * deviceCentroids; float * deviceCurrentMembership; float * deviceDataPoints; float * deviceSquaredError; cudaMalloc((void**)&deviceCentroids, nCentroids * nDims * sizeof(float)); cudaMemset(deviceCentroids, 0, nCentroids * nDims * sizeof(float)); CheckCudaErrors(); cudaMalloc((void**)&deviceCurrentMembership, nPoints * nCentroids * sizeof(float)); cudaMalloc((void**)&deviceDataPoints, nPoints * nDims * sizeof(float)); cudaMalloc((void**)&deviceSquaredError, sizeof(float)); cudaMemcpy(deviceDataPoints, &data[0], nPoints * nDims * sizeof(float), cudaMemcpyHostToDevice); CheckCudaErrors(); bool notConverged = !false; do { CopyDataDeviceToDevice( deviceCurrentMembership, deviceNextMembership, nPoints*nCentroids); //cudaMemcpy(deviceCurrentMembership, deviceNextMembership, nCentroids * nPoints * sizeof(float), cudaMemcpyDeviceToDevice); CheckCudaErrors(); CUDAFCM::ComputeCentroids(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceCurrentMembership, deviceCentroids); CheckCudaErrors(); CUDAFCM::ComputeMembership(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceNextMembership, deviceCentroids); CheckCudaErrors(); notConverged = IsMembershipDiffGreater(deviceCurrentMembership, deviceNextMembership, nMembershipSize, errorThreshold, deviceSquaredError); } while(notConverged); CUDAFCM::ComputeCentroids(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceNextMembership, deviceCentroids); CheckCudaErrors(); centroids.resize(nCentroids*nDims); nextMembership.resize(nPoints*nCentroids); cudaMemcpy(&centroids[0], deviceCentroids, nCentroids * nDims * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&nextMembership[0], deviceCurrentMembership, nPoints * nCentroids * sizeof(float), cudaMemcpyDeviceToHost); CheckCudaErrors(); cudaFree(deviceNextMembership); cudaFree(deviceCurrentMembership); cudaFree(deviceDataPoints); cudaFree(deviceCentroids); cudaFree(deviceSquaredError); CheckCudaErrors(); return nextMembership; } __global__ void NormalizePointsMembership(float* values, int nPoints, int nClusters) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nPoints) { int c; float sum=0.0; for( c = 0; c<nClusters; ++c) sum = sum + values[c*nPoints + idx]; for( c = 0; c<nClusters; ++c) values[c*nPoints + idx] = values[c*nPoints + idx]/sum; } __syncthreads(); } int CudaInitializeMembership(int nPoints, int nClusters, float** deviceData, std::vector<float> & hostData) { curandGenerator_t gen; int n = nPoints * nClusters; hostData.resize(n); cudaMalloc((void**)deviceData, n * sizeof(float)); CheckCudaErrors(); curandCreateGenerator(&gen,CURAND_RNG_PSEUDO_DEFAULT); CheckCudaErrors(); curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); CheckCudaErrors(); curandGenerateUniform(gen, *deviceData, n); CheckCudaErrors(); int blockSize = n < N_THREADS ? n : N_THREADS; int gridSize = ceil(float(n)/(blockSize*nClusters)) < 1 ? 1 : ceil(float(n)/(blockSize*nClusters)); NormalizePointsMembership<<<gridSize, blockSize>>>(*deviceData, nPoints, nClusters); CheckCudaErrors(); cudaMemcpy(&hostData[0], *deviceData, n * sizeof(float), cudaMemcpyDeviceToHost); curandDestroyGenerator(gen); CheckCudaErrors(); //CUDA_CALL(cudaFree(deviceData)); return EXIT_SUCCESS; } __global__ void CudaIsMembershipDiffGreater(float * currentMembership, float * nextMembership, int size, float error, float * totalSquaredDiff) { int idx = threadIdx.x; int nThreads = blockDim.x; extern __shared__ float squaredDiffs[]; squaredDiffs[idx] = 0.0; int dataBlockSize; if(size < nThreads) dataBlockSize = 1; else dataBlockSize = ceil(((float)size) / nThreads); int startPos = idx*dataBlockSize; int endPos = startPos + dataBlockSize; if (endPos >= size) endPos = size; if(startPos < size) { int i; float diff; float calcError = 0.0; for(i=startPos; i<endPos; ++i) { diff = (nextMembership[i] - currentMembership[i]); calcError += diff*diff; } squaredDiffs[idx] = calcError; } __syncthreads(); float auxTotalSquareDiff = 0; if (idx == 0) { for(int i = 0; i<nThreads;++i) auxTotalSquareDiff += squaredDiffs[i]; totalSquaredDiff[0] = sqrt(auxTotalSquareDiff); printf("%f\n", totalSquaredDiff[0]); } } /*IsMembershipDiffGreater: Check if the absolute difference between nextMembership[u] and currentMembership[u] are larger than a given error Inputs: currentMembership, nextMembership, error Outputs: True if the absolute difference between nextMembership[u] and currentMembership[u] are larger than a given error */ bool IsMembershipDiffGreater(float * deviceCurrentMembership, float * deviceNextMembership, int size, float error, float * deviceSquaredError) { float miliseconds; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int blockSize = size < N_THREADS ? size : N_THREADS; CudaIsMembershipDiffGreater<<<1, blockSize, blockSize*sizeof(float)>>>(deviceCurrentMembership,deviceNextMembership, size, error, deviceSquaredError); CheckCudaErrors(); float squaredError[1]; cudaMemcpy(&squaredError[0], deviceSquaredError, sizeof(float), cudaMemcpyDeviceToHost); CheckCudaErrors(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&miliseconds, start, stop); printf("MembershipDiffGreater: %f ms\n", miliseconds); if(error >= squaredError[0]) return false; return true; } void TestComputeSquareError() { std::vector<float> data1; data1.push_back(0.5); data1.push_back(0.0); data1.push_back(1.0); data1.push_back(0.0); data1.push_back(0.0); data1.push_back(1.0); std::vector<float> data2; data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); data2.push_back(0.0); int size = data1.size(); float * dData1; float * dData2; float * deviceSquaredError; cudaMalloc((void **)&deviceSquaredError, sizeof(float)); cudaMalloc((void **)&dData1, size * sizeof(float)); cudaMalloc((void **)&dData2, size * sizeof(float)); cudaMemcpy(dData1, &data1[0], size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dData2, &data2[0], size * sizeof(float), cudaMemcpyHostToDevice); float error = 1; IsMembershipDiffGreater(dData1, dData2, size, error, deviceSquaredError); cudaFree(dData1); cudaFree(dData2); cudaFree(deviceSquaredError); } __global__ void CudaComputeCentroidsSingleBlock(float fuzziness, int nPoints, int idxCentroid, int nDims, float* data, float* membership, float* centroids) { int idx = threadIdx.x; int nThreads = blockDim.x; extern __shared__ float numerator[]; __shared__ float denominator[N_THREADS]; int dataBlockSize; if(nPoints < nThreads) dataBlockSize = 1; else dataBlockSize = ceil(((float)nPoints) / nThreads); int startPos = idx*dataBlockSize; int endPos = startPos + dataBlockSize; if(startPos < nPoints && endPos > nPoints) endPos = nPoints; if(startPos < nPoints) { for(int i=0; i<nDims; ++i) numerator[idx*nDims + i] = 0.0; denominator[idx] = 0.0; register float weightedMembership; for (int p = startPos; p < endPos; ++p) { weightedMembership = powf(membership[idxCentroid*nPoints + p], 2); for (int d = 0; d < nDims; d++) numerator[idx*nDims + d] += weightedMembership*data[p*nDims + d]; denominator[idx] += weightedMembership; } } __syncthreads(); if (idx == 0) { int i, j; for(i=1;i<nThreads;++i) denominator[0] += denominator[i]; for(i=nDims; i<nThreads*nDims; i+=nDims) for(j=0; j<nDims;j++) numerator[j] += numerator[i+j]; for (int d = 0; d < nDims; d++) centroids[idxCentroid*nDims + d] = numerator[d]/denominator[0]; } } __global__ void CudaComputePartialCentroid(float fuzziness, int nPoints, int idxCentroid, int nDims, float* data, float* membership, float* centroids, int dataBlockSize, float *globalNumerator, float * globalDenominator) { int tGlobalIdx = blockDim.x*blockIdx.x + threadIdx.x; extern __shared__ float numerator[]; __shared__ float denominator[N_THREADS]; for(int i=0; i<nDims; ++i) numerator[threadIdx.x*nDims + i] = 0.0; denominator[threadIdx.x] = 0.0; if(tGlobalIdx == 0) { for(int i=0; i<nDims; ++i) globalNumerator[i] = 0.0; globalDenominator[0] = 0.0; } int startPos = tGlobalIdx*dataBlockSize; int endPos = startPos + dataBlockSize; if(startPos < nPoints && endPos > nPoints) endPos = nPoints; if(startPos < nPoints) { register float weightedMembership; for (int p = startPos; p < endPos; ++p) { weightedMembership = powf(membership[idxCentroid*nPoints + p], 2); for (int d = 0; d < nDims; d++) numerator[threadIdx.x*nDims + d] += weightedMembership*data[p*nDims + d]; denominator[threadIdx.x] += weightedMembership; } } __syncthreads(); if (threadIdx.x == 0) { for(int i=1;i<blockDim.x;++i) denominator[0] += denominator[i]; for(int i=nDims; i<blockDim.x*nDims; ++i) numerator[i%nDims] += numerator[i]; atomicAdd(&globalDenominator[0], (float)denominator[0]); for(int i=0; i<nDims;i++) atomicAdd(&globalNumerator[i], (float)numerator[i]); } } __global__ void CudaComputeOnlyCentroids(int idxCentroid, int nDims, float* centroids, float *globalNumerator, float * globalDenominator) { int tGlobalIdx = blockDim.x*blockIdx.x + threadIdx.x; if(tGlobalIdx == 0) { for (int d = 0; d < nDims; d++) centroids[idxCentroid*nDims + d] = globalNumerator[d]/globalDenominator[0]; } } void ComputeCentroids(float fuzziness, int nPoints, int nCentroids, int nDims, float* data, float * membership, float * centroids) { float miliseconds; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int dataBlockSize, blockSize, gridSize; if(nPoints < N_THREADS) { gridSize = 1; dataBlockSize = 1; blockSize = nPoints; } else { gridSize = 10; blockSize = N_THREADS; dataBlockSize = ceil((float)nPoints / (blockSize*gridSize)); } int numeratorSize = blockSize*nDims; float * deviceNumerator; float * deviceDenominator; for(int i=0; i<nCentroids; ++i) { cudaMalloc((void **)&deviceNumerator, nDims * sizeof(float)); cudaMalloc((void **)&deviceDenominator, sizeof(float)); CudaComputePartialCentroid<<<gridSize, blockSize, numeratorSize*sizeof(float)>>>(fuzziness, nPoints, i, nDims, data, membership, centroids, dataBlockSize, deviceNumerator, deviceDenominator); cudaDeviceSynchronize(); CudaComputeOnlyCentroids<<<1, 1>>>(i, nDims, centroids, deviceNumerator, deviceDenominator); cudaFree(deviceNumerator); cudaFree(deviceDenominator); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&miliseconds, start, stop); printf("ComputeCentroids: %f ms\n", miliseconds); CheckCudaErrors(); } void TestComputeCentroid() { int fuzziness = 2; int nDims = 2; std::vector<float> data; data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(5.0); data.push_back(5.0); data.push_back(5.0); data.push_back(6.0); data.push_back(6.0); data.push_back(7.0); std::vector<float> centroids; centroids.push_back(0);centroids.push_back(0); centroids.push_back(0);centroids.push_back(0); std::vector<float> membership; membership.push_back(0.9966);membership.push_back(0.9897);membership.push_back(0.99); membership.push_back(0.0250);membership.push_back(0.0020);membership.push_back(0.0185); membership.push_back(0.0034);membership.push_back(0.0103);membership.push_back(0.0100); membership.push_back(0.9750);membership.push_back(0.9980);membership.push_back(0.9815); int nPoints = data.size()/nDims; int nCentroids = centroids.size()/nDims; float * deviceCentroids; float * deviceCurrentMembership; float * deviceDataPoints; cudaMalloc((void **)&deviceCentroids, nCentroids * nDims * sizeof(float)); cudaMalloc((void **)&deviceCurrentMembership, nPoints * nCentroids * sizeof(float)); cudaMalloc((void **)&deviceDataPoints, nPoints * nDims * sizeof(float)); cudaMemcpy(deviceCentroids, &centroids[0], nCentroids * nDims * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceDataPoints, &data[0], nPoints * nDims * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceCurrentMembership, &membership[0], nPoints * nCentroids * sizeof(float), cudaMemcpyHostToDevice); ComputeCentroids(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceCurrentMembership, deviceCentroids); cudaMemcpy(&centroids[0], deviceCentroids, nCentroids * nDims * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&data[0], deviceDataPoints, nPoints * nDims * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&membership[0], deviceCurrentMembership, nPoints * nCentroids * sizeof(float), cudaMemcpyDeviceToHost); PrintMatrix(nDims, "Points", data); PrintMatrix(nDims, "Centroids", centroids); cudaFree(deviceCurrentMembership); cudaFree(deviceDataPoints); cudaFree(deviceCentroids); printf("ComputeCentroids: Finished!\n"); } __device__ float CudaComputeDistance(int nDims, int p1Idx, float * p1s, int p2Idx, float * p2s) { float distance = 0; float pointDiff = 0; int p1IdxStart = p1Idx*nDims; int p2IdxStart = p2Idx*nDims; for (int d = 0; d < nDims; d++) { pointDiff = p1s[p1IdxStart + d] - p2s[p2IdxStart + d]; distance += pointDiff * pointDiff; } return sqrt(distance); } __global__ void CudaComputeMembership(float fuzziness, int nDims, float * data, int nPoints, float * centroids, int nCentroids, float * membership) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < nPoints) { register float denominator, distPC, divDistPC1DistPC2; extern __shared__ float sharedData[]; int c1, c2; for(int i=0; i<nDims; ++i) sharedData[threadIdx.x*nDims + i] = data[idx*nDims+i]; for (c1 = 0; c1 < nCentroids; ++c1) { denominator = 0.0; distPC = CudaComputeDistance(nDims, threadIdx.x, sharedData, c1, centroids); for (c2 = 0; c2 < nCentroids; ++c2) { divDistPC1DistPC2 = distPC / CudaComputeDistance(nDims, threadIdx.x, sharedData, c2, centroids); denominator += divDistPC1DistPC2 * divDistPC1DistPC2; } membership[idx + c1*nPoints] = 1./denominator; } } __syncthreads(); } void ComputeMembership(float fuzziness, int nPoints, int nCentroids, int nDims, float* data, float * membership, float * centroids) { float miliseconds; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int blockSize = nPoints < N_THREADS ? nPoints : N_THREADS; int gridSize = ceil(((float)nPoints)/blockSize) < 1 ? 1 : ceil(((float)nPoints)/blockSize); CudaComputeMembership<<<gridSize, blockSize, blockSize*nDims*sizeof(float)>>>(fuzziness, nDims, data, nPoints, centroids, nCentroids, membership); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&miliseconds, start, stop); printf("ComputeMembership: %f ms\n", miliseconds); CheckCudaErrors(); } void TestComputeMembership() { int fuzziness = 2; int nDims = 2; std::vector<float> data; data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(0.0); data.push_back(0.0); data.push_back(1.0); data.push_back(5.0); data.push_back(5.0); data.push_back(5.0); data.push_back(6.0); data.push_back(6.0); data.push_back(7.0); std::vector<float> centroids; centroids.push_back(0.3334);centroids.push_back(0.3337); centroids.push_back(5.3307);centroids.push_back(6.0040); std::vector<float> membership; membership.push_back(0);membership.push_back(0);membership.push_back(0); membership.push_back(0);membership.push_back(0);membership.push_back(0); membership.push_back(0);membership.push_back(0);membership.push_back(0); membership.push_back(0);membership.push_back(0);membership.push_back(0); int nPoints = data.size()/nDims; int nCentroids = centroids.size()/nDims; float * deviceCentroids; float * deviceCurrentMembership; float * deviceDataPoints; cudaMalloc((void **)&deviceCentroids, nCentroids * nDims * sizeof(float)); cudaMalloc((void **)&deviceCurrentMembership, nPoints * nCentroids * sizeof(float)); cudaMalloc((void **)&deviceDataPoints, nPoints * nDims * sizeof(float)); cudaMemcpy(deviceCentroids, &centroids[0], nCentroids * nDims * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceDataPoints, &data[0], nPoints * nDims * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceCurrentMembership, &membership[0], nPoints * nCentroids * sizeof(float), cudaMemcpyHostToDevice); ComputeMembership(fuzziness, nPoints, nCentroids, nDims, deviceDataPoints, deviceCurrentMembership, deviceCentroids); cudaMemcpy(&centroids[0], deviceCentroids, nCentroids * nDims * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&data[0], deviceDataPoints, nPoints * nDims * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&membership[0], deviceCurrentMembership, nPoints * nCentroids * sizeof(float), cudaMemcpyDeviceToHost); PrintMatrix(nDims, "Point", data); PrintMatrix(nDims, "Centroid", centroids); PrintMatrix(nPoints, "Membership", membership); cudaFree(deviceCurrentMembership); cudaFree(deviceDataPoints); cudaFree(deviceCentroids); printf("ComputeMembership: Finished!\n"); } }
c239315edbbd9423b711fc17eef37db7bc7dab37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudarray/common.hpp" #include "cudarray/elementwise.hpp" namespace cudarray { #define BINARY_OP(name, operation) \ template <typename Ta, typename Tb, typename Tc> \ struct name { \ __device__ Tc operator()(const Ta a, const Tb b) { \ return operation; \ } \ }; BINARY_OP(AddOp, a + b) BINARY_OP(DivOp, a / b) BINARY_OP(MaxOp, fmaxf(a, b)) BINARY_OP(MinOp, fminf(a, b)) BINARY_OP(MulOp, a * b) BINARY_OP(PowOp, powf(a, b)) BINARY_OP(SubOp, a - b) template<typename Ta, typename Tb, typename Tc, typename Op> __global__ void kernel_binary(const Ta *a, const Tb *b, unsigned int n, Tc *c) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { c[idx] = op(a[idx], b[idx]); } } template<typename Ta, typename Tb, typename Tc, typename Op> __global__ void kernel_binary_inplace(Ta *a, const Tb *b, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(a[idx], b[idx]); } } template<typename Ta, typename Tb, typename Tc, typename Op> void binary(const Ta *a, const Tb *b, unsigned int n, Tc *c) { if (c == (Tc *) a) { hipLaunchKernelGGL(( kernel_binary_inplace<Tc, Tb, Tc, Op>) , dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, c, b, n); } else if (c == (Tc *) b) { hipLaunchKernelGGL(( kernel_binary_inplace<Tc, Ta, Tc, Op>) , dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, c, a, n); } else { hipLaunchKernelGGL(( kernel_binary<Ta, Tb, Tc, Op>) , dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, a, b, n, c); } } template<typename Ta, typename Tb, typename Tc> void binary(BinaryOp op, const Ta *a, const Tb *b, unsigned int n, Tc *c) { switch (op) { case ADD_OP: binary<Ta, Tb, Tc, AddOp<Ta, Tb, Tc> >(a, b, n, c); break; case DIV_OP: binary<Ta, Tb, Tc, DivOp<Ta, Tb, Tc> >(a, b, n, c); break; case MAX_B_OP: binary<Ta, Tb, Tc, MaxOp<Ta, Tb, Tc> >(a, b, n, c); break; case MIN_B_OP: binary<Ta, Tb, Tc, MinOp<Ta, Tb, Tc> >(a, b, n, c); break; case MUL_OP: binary<Ta, Tb, Tc, MulOp<Ta, Tb, Tc> >(a, b, n, c); break; case POW_OP: binary<Ta, Tb, Tc, PowOp<Ta, Tb, Tc> >(a, b, n, c); break; case SUB_OP: binary<Ta, Tb, Tc, SubOp<Ta, Tb, Tc> >(a, b, n, c); break; } } template void binary<float, float, float>( BinaryOp op, const float *a, const float *b, unsigned int n, float *c); template void binary<float, int, float>( BinaryOp op, const float *a, const int *b, unsigned int n, float *c); template void binary<int, float, float>( BinaryOp op, const int *a, const float *b, unsigned int n, float *c); template void binary<int, int, int>( BinaryOp op, const int *a, const int *b, unsigned int n, int *c); template<typename Ta, typename Talpha, typename Tb, typename Op> __global__ void kernel_binary_scalar(const Ta *a, Talpha alpha, unsigned int n, Tb *b) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = op(a[idx], alpha); } } template<typename Ta, typename Talpha, typename Op> __global__ void kernel_binary_scalar_inplace(Ta *a, Talpha alpha, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(a[idx], alpha); } } template<typename Ta, typename Talpha, typename Tb, typename Op> void binary_scalar(const Ta *a, Talpha alpha, unsigned int n, Tb *b) { if (b == (Tb *)a) { hipLaunchKernelGGL(( kernel_binary_scalar_inplace<Tb, Talpha, Op>) , dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, b, alpha, n); } else { hipLaunchKernelGGL(( kernel_binary_scalar<Ta, Talpha, Tb, Op>) , dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, a, alpha, n, b); } } template<typename Ta, typename Talpha, typename Tb> void binary_scalar(BinaryOp op, const Ta *a, Talpha alpha, unsigned int n, Tb *b) { switch (op) { case ADD_OP: binary_scalar<Ta, Talpha, Tb, AddOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case DIV_OP: binary_scalar<Ta, Talpha, Tb, DivOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case MAX_B_OP: binary_scalar<Ta, Talpha, Tb, MaxOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case MIN_B_OP: binary_scalar<Ta, Talpha, Tb, MinOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case MUL_OP: binary_scalar<Ta, Talpha, Tb, MulOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case POW_OP: if (alpha == static_cast<Talpha>(2)) { binary<Ta, Ta, Tb, MulOp<Ta, Talpha, Tb> >(a, a, n, b); } else if (alpha == static_cast<Talpha>(1)) { binary_scalar<Ta, Ta, Tb, MulOp<Ta, Talpha, Tb> >(a, 1, n, b); } else { binary_scalar<Ta, Talpha, Tb, PowOp<Ta, Talpha, Tb> >(a, alpha, n, b); } break; case SUB_OP: binary_scalar<Ta, Talpha, Tb, SubOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; } } template void binary_scalar<float, float, float>( BinaryOp op, const float *a, float alpha, unsigned int n, float *c); template void binary_scalar<int, float, float>( BinaryOp op, const int *a, float alpha, unsigned int n, float *c); template void binary_scalar<int, int, int>( BinaryOp op, const int *a, int alpha, unsigned int n, int *c); template<typename Talpha, typename Ta, typename Tb, typename Op> __global__ void kernel_binary_scalar(Talpha alpha, const Ta *a, unsigned int n, Tb *b) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = op(alpha, a[idx]); } } template<typename Talpha, typename Ta, typename Op> __global__ void kernel_binary_scalar_inplace(Talpha alpha, Ta *a, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(alpha, a[idx]); } } template<typename Talpha, typename Ta, typename Tb, typename Op> void binary_scalar(Talpha alpha, const Ta *a, unsigned int n, Tb *b) { if (b == (Tb *)a) { hipLaunchKernelGGL(( kernel_binary_scalar_inplace<Talpha, Tb, Op>) , dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, alpha, b, n); } else { hipLaunchKernelGGL(( kernel_binary_scalar<Talpha, Ta, Tb, Op>) , dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, alpha, a, n, b); } } template<typename Talpha, typename Ta, typename Tb> void binary_scalar_(BinaryOp op, Talpha alpha, const Ta *a, unsigned int n, Tb *b) { switch (op) { case ADD_OP: binary_scalar<Talpha, Ta, Tb, AddOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case DIV_OP: binary_scalar<Talpha, Ta, Tb, DivOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case MAX_B_OP: binary_scalar<Talpha, Ta, Tb, MaxOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case MIN_B_OP: binary_scalar<Talpha, Ta, Tb, MinOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case MUL_OP: binary_scalar<Talpha, Ta, Tb, MulOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case POW_OP: if (alpha == static_cast<Talpha>(2)) { binary<Ta, Ta, Tb, MulOp<Talpha, Ta, Tb> >(a, a, n, b); } else if (alpha == static_cast<Talpha>(1)) { binary_scalar<Ta, Ta, Tb, MulOp<Talpha, Ta, Tb> >(1, a, n, b); } else { binary_scalar<Talpha, Ta, Tb, PowOp<Talpha, Ta, Tb> >(alpha, a, n, b); } break; case SUB_OP: binary_scalar<Talpha, Ta, Tb, SubOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; } } template void binary_scalar_<float, float, float>( BinaryOp op, float alpha, const float *a, unsigned int n, float *c); template void binary_scalar_<float, int, float>( BinaryOp op, float alpha, const int *a, unsigned int n, float *c); template void binary_scalar_<int, int, int>( BinaryOp op, int alpha, const int *a, unsigned int n, int *c); template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_leading> __global__ void kernel_binary_broadcast( const Ta *a, const Tb *b, unsigned int m, unsigned int n, Tc *c) { Op op; CUDA_GRID_STRIDE_LOOP(idx, m*n) { if (broadcast_leading) { c[idx] = op(a[idx], b[idx % n]); } else { c[idx] = op(a[idx], b[idx / m]); } } } template<typename Ta, typename Tb, typename Op, bool broadcast_leading> __global__ void kernel_binary_broadcast_inplace( Ta *a, const Tb *b, unsigned int m, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, m*n) { if (broadcast_leading) { a[idx] = op(a[idx], b[idx % n]); } else { a[idx] = op(a[idx], b[idx / m]); } } } template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_leading> void binary_broadcast(const Ta *a, const Tb *b, unsigned int m, unsigned int n, Tc *c) { if (c == (Tc *) a) { hipLaunchKernelGGL(( kernel_binary_broadcast_inplace<Ta, Tb, Op, broadcast_leading>) , dim3(cuda_blocks(m*n)), dim3(kNumBlockThreads), 0, 0, (Ta *) c, b, m, n); } else if (c == (Tc *) b) { hipLaunchKernelGGL(( kernel_binary_broadcast_inplace<Tb, Ta, Op, broadcast_leading>) , dim3(cuda_blocks(m*n)), dim3(kNumBlockThreads), 0, 0, (Tb *) b, a, m, n); } else { hipLaunchKernelGGL(( kernel_binary_broadcast<Ta, Tb, Tc, Op, broadcast_leading>) , dim3(cuda_blocks(m*n)), dim3(kNumBlockThreads), 0, 0, a, b, m, n, c); } } template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_inner> __global__ void kernel_binary_broadcast(const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { Op op; CUDA_GRID_STRIDE_LOOP(idx, k*m*n) { if (broadcast_inner) { c[idx] = op(a[idx], b[(idx / m / n) * n + (idx % n)]); } else { c[idx] = op(a[idx], b[(idx / n) % m]); } } } template<typename Ta, typename Tb, typename Op, bool broadcast_inner> __global__ void kernel_binary_broadcast_inplace(Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, k*m*n) { if (broadcast_inner) { a[idx] = op(a[idx], b[(idx / m / n) * n + (idx % n)]); } else { a[idx] = op(a[idx], b[(idx / n) % m]); } } } template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_inner> void binary_broadcast(const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { if (c == (Tc *) a) { hipLaunchKernelGGL(( kernel_binary_broadcast_inplace<Ta, Tb, Op, broadcast_inner>) , dim3(cuda_blocks(k*m*n)), dim3(kNumBlockThreads), 0, 0, (Ta *) c, b, k, m, n); } else if (c == (Tc *) b) { hipLaunchKernelGGL(( kernel_binary_broadcast_inplace<Tb, Ta, Op, broadcast_inner>) , dim3(cuda_blocks(k*m*n)), dim3(kNumBlockThreads), 0, 0, (Tb *) b, a, k, m, n); } else { hipLaunchKernelGGL(( kernel_binary_broadcast<Ta, Tb, Tc, Op, broadcast_inner>) , dim3(cuda_blocks(k*m*n)), dim3(kNumBlockThreads), 0, 0, a, b, k, m, n, c); } } template<typename Ta, typename Tb, typename Tc, typename Op> void binary_broadcast(BroadcastType btype, const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { switch (btype) { case BROADCAST_INNER: binary_broadcast<Ta, Tb, Tc, Op, true>(a, b, k, m, n, c); case BROADCAST_LEADING: binary_broadcast<Ta, Tb, Tc, Op, true>(a, b, m, n, c); break; case BROADCAST_OUTER: binary_broadcast<Ta, Tb, Tc, Op, false>(a, b, k, m, n, c); break; case BROADCAST_TRAILING: binary_broadcast<Ta, Tb, Tc, Op, false>(a, b, m, n, c); break; } } template<typename Ta, typename Tb, typename Tc> void binary_broadcast(BinaryOp op, BroadcastType btype, const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { switch (op) { case ADD_OP: binary_broadcast<Ta, Tb, Tc, AddOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case DIV_OP: binary_broadcast<Ta, Tb, Tc, DivOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case MAX_B_OP: binary_broadcast<Ta, Tb, Tc, MaxOp<Ta, Tb, Tc> >( btype, a, b, k, m, n, c); break; case MIN_B_OP: binary_broadcast<Ta, Tb, Tc, MinOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case MUL_OP: binary_broadcast<Ta, Tb, Tc, MulOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case POW_OP: binary_broadcast<Ta, Tb, Tc, PowOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case SUB_OP: binary_broadcast<Ta, Tb, Tc, SubOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; } } template void binary_broadcast<float, float, float>( BinaryOp op, BroadcastType btype, const float *a, const float *b, unsigned int k, unsigned int m, unsigned int n, float *c); template void binary_broadcast<float, int, float>( BinaryOp op, BroadcastType btype, const float *a, const int *b, unsigned int k, unsigned int m, unsigned int n, float *c); template void binary_broadcast<int, float, float>( BinaryOp op, BroadcastType btype, const int *a, const float *b, unsigned int k, unsigned int m, unsigned int n, float *c); template void binary_broadcast<int, int, int>( BinaryOp op, BroadcastType btype, const int *a, const int *b, unsigned int k, unsigned int m,unsigned int n, int *c); BINARY_OP(EqOp, a == b) BINARY_OP(GtOp, a > b) BINARY_OP(GtEqOp, a >= b) BINARY_OP(LtOp, a < b) BINARY_OP(LtEqOp, a <= b) BINARY_OP(NeqOp, a != b) template<typename Ta, typename Tb> void binary_cmp(BinaryCmpOp op, const Ta *a, const Tb *b, unsigned int n, bool_t *c) { switch (op) { case EQ_OP: binary<Ta, Tb, bool_t, EqOp<Ta, Tb, bool_t> >(a, b, n, c); break; case GT_OP: binary<Ta, Tb, bool_t, GtOp<Ta, Tb, bool_t> >(a, b, n, c); break; case GT_EQ_OP: binary<Ta, Tb, bool_t, GtEqOp<Ta, Tb, bool_t> >(a, b, n, c); break; case LT_OP: binary<Ta, Tb, bool_t, LtOp<Ta, Tb, bool_t> >(a, b, n, c); break; case LT_EQ_OP: binary<Ta, Tb, bool_t, LtEqOp<Ta, Tb, bool_t> >(a, b, n, c); break; case NEQ_OP: binary<Ta, Tb, bool_t, NeqOp<Ta, Tb, bool_t> >(a, b, n, c); break; } } template void binary_cmp<float, float>( BinaryCmpOp op, const float *a, const float *b, unsigned int n, bool_t *c); template void binary_cmp<float, int>( BinaryCmpOp op, const float *a, const int *b, unsigned int n, bool_t *c); template void binary_cmp<int, float>( BinaryCmpOp op, const int *a, const float *b, unsigned int n, bool_t *c); template void binary_cmp<int, int>( BinaryCmpOp op, const int *a, const int *b, unsigned int n, bool_t *c); template<typename T> void binary_cmp_scalar(BinaryCmpOp op, const T *a, T alpha, unsigned int n, bool_t *b) { switch (op) { case EQ_OP: binary_scalar<T, T, bool_t, EqOp<T, T, bool_t> > (a, alpha, n, b); break; case GT_OP: binary_scalar<T, T, bool_t, GtOp<T, T, bool_t> > (a, alpha, n, b); break; case GT_EQ_OP: binary_scalar<T, T, bool_t, GtEqOp<T, T, bool_t> > (a, alpha, n, b); break; case LT_OP: binary_scalar<T, T, bool_t, LtOp<T, T, bool_t> > (a, alpha, n, b); break; case LT_EQ_OP: binary_scalar<T, T, bool_t, LtEqOp<T, T, bool_t> > (a, alpha, n, b); break; case NEQ_OP: binary_scalar<T, T, bool_t, NeqOp<T, T, bool_t> > (a, alpha, n, b); break; } } template void binary_cmp_scalar<float>( BinaryCmpOp op, const float *a, float alpha, unsigned int n, bool_t *b); template void binary_cmp_scalar<int>( BinaryCmpOp op, const int *a, int alpha, unsigned int n, bool_t *b); template<typename T> void binary_cmp_scalar_(BinaryCmpOp op, T alpha, const T *a, unsigned int n, bool_t *b) { switch (op) { case EQ_OP: binary_scalar<T, T, bool_t, EqOp<T, T, bool_t> > (alpha, a, n, b); break; case GT_OP: binary_scalar<T, T, bool_t, GtOp<T, T, bool_t> > (alpha, a, n, b); break; case GT_EQ_OP: binary_scalar<T, T, bool_t, GtEqOp<T, T, bool_t> > (alpha, a, n, b); break; case LT_OP: binary_scalar<T, T, bool_t, LtOp<T, T, bool_t> > (alpha, a, n, b); break; case LT_EQ_OP: binary_scalar<T, T, bool_t, LtEqOp<T, T, bool_t> > (alpha, a, n, b); break; case NEQ_OP: binary_scalar<T, T, bool_t, NeqOp<T, T, bool_t> > (alpha, a, n, b); break; } } template void binary_cmp_scalar_<float>( BinaryCmpOp op, float alpha, const float *a, unsigned int n, bool_t *b);; template void binary_cmp_scalar_<int>( BinaryCmpOp op, int alpha, const int *a, unsigned int n, bool_t *b); template<typename Ta, typename Tb> void binary_cmp_broadcast(BinaryCmpOp op, BroadcastType btype, const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c) { switch (op) { case EQ_OP: binary_broadcast<Ta, Tb, bool_t, EqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case GT_OP: binary_broadcast<Ta, Tb, bool_t, GtOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case GT_EQ_OP: binary_broadcast<Ta, Tb, bool_t, GtEqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case LT_OP: binary_broadcast<Ta, Tb, bool_t, LtOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case LT_EQ_OP: binary_broadcast<Ta, Tb, bool_t, LtEqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case NEQ_OP: binary_broadcast<Ta, Tb, bool_t, NeqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; } } template void binary_cmp_broadcast<float, float>( BinaryCmpOp op, BroadcastType btype, const float *a, const float *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); template void binary_cmp_broadcast<float, int>( BinaryCmpOp op, BroadcastType btype, const float *a, const int *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); template void binary_cmp_broadcast<int, float>( BinaryCmpOp op, BroadcastType btype, const int *a, const float *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); template void binary_cmp_broadcast<int, int>( BinaryCmpOp op, BroadcastType btype, const int *a, const int *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); #define UNARY_OP(name, operation) \ template <typename Ta, typename Tb> \ struct name { \ __device__ Tb operator()(const Ta a) { \ operation; \ } \ }; UNARY_OP(AbsOp, return fabsf(a);) UNARY_OP(CosOp, return cosf(a);) UNARY_OP(ExpOp, return expf(a);) UNARY_OP(LogOp, return logf(a);) UNARY_OP(NegOp, return -a;) UNARY_OP(ReluOp, return fmaxf(0.0, a);) UNARY_OP(ReluDOp, return a >= 0.0 ? 1.0 : 0.0;) UNARY_OP(SigmoidOp, return 1.0/(1.0 + expf(-a));) UNARY_OP(SigmoidDOp, Ta tmp = 1.0/(1.0 + expf(-a)); return tmp*(1-tmp);) UNARY_OP(SinOp, return sinf(a);) UNARY_OP(SqrtOp, return sqrtf(a);) UNARY_OP(TanhOp, return tanhf(a);) UNARY_OP(TanhDOp, Ta tmp = tanhf(a); return 1-tmp*tmp;) template<typename T, typename Op> __global__ void kernel_unary(const T *a, unsigned int n, T *b) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = op(a[idx]); } } template<typename T, typename Op> __global__ void kernel_unary_inplace(T *a, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(a[idx]); } } template<typename T, typename Op> void unary(const T *a, unsigned int n, T *b) { if (a == b) { hipLaunchKernelGGL(( kernel_unary_inplace<T, Op>), dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, b, n); } else { hipLaunchKernelGGL(( kernel_unary<T, Op>), dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, a, n, b); } } template<typename T> void unary(UnaryOp op, const T *a, unsigned int n, T *b) { switch (op) { case ABS_OP: unary<T, AbsOp<T, T> >(a, n, b); break; case COS_OP: unary<T, CosOp<T, T> >(a, n, b); break; case EXP_OP: unary<T, ExpOp<T, T> >(a, n, b); break; case LOG_OP: unary<T, LogOp<T, T> >(a, n, b); break; case NEG_OP: unary<T, NegOp<T, T> >(a, n, b); break; case RELU_OP: unary<T, ReluOp<T, T> >(a, n, b); break; case RELU_D_OP: unary<T, ReluDOp<T, T> >(a, n, b); break; case SIGMOID_OP: unary<T, SigmoidOp<T, T> >(a, n, b); break; case SIGMOID_D_OP: unary<T, SigmoidDOp<T, T> >(a, n, b); break; case SIN_OP: unary<T, SinOp<T, T> >(a, n, b); break; case SQRT_OP: unary<T, SqrtOp<T, T> >(a, n, b); break; case TANH_OP: unary<T, TanhOp<T, T> >(a, n, b); break; case TANH_D_OP: unary<T, TanhDOp<T, T> >(a, n, b); break; } } template void unary<float>(UnaryOp op, const float *a, unsigned int n, float *b); // TODO: unary should convert to float for certain operations template void unary<int>(UnaryOp op, const int *a, unsigned int n, int *b); template<typename T> __global__ void kernel_clip(const T *a, T a_min, T a_max, unsigned int n, T *b) { CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = fminf(fmaxf(a[idx], a_min), a_max); } } template<typename T> __global__ void kernel_clip_inplace(T *a, T a_min, T a_max, unsigned int n) { CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = fminf(fmaxf(a[idx], a_min), a_max); } } template<typename T> void clip(const T *a, T a_min, T a_max, unsigned int n, T *b) { if (a == b) { hipLaunchKernelGGL(( kernel_clip_inplace<T>), dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, b, a_min, a_max, n); } else { hipLaunchKernelGGL(( kernel_clip<T>), dim3(cuda_blocks(n)), dim3(kNumBlockThreads), 0, 0, a, a_min, a_max, n, b); } } template void clip<float>(const float *a, float a_min, float a_max, unsigned int n, float *b); template void clip<int>(const int *a, int a_min, int a_max, unsigned int n, int *b); }
c239315edbbd9423b711fc17eef37db7bc7dab37.cu
#include "cudarray/common.hpp" #include "cudarray/elementwise.hpp" namespace cudarray { #define BINARY_OP(name, operation) \ template <typename Ta, typename Tb, typename Tc> \ struct name { \ __device__ Tc operator()(const Ta a, const Tb b) { \ return operation; \ } \ }; BINARY_OP(AddOp, a + b) BINARY_OP(DivOp, a / b) BINARY_OP(MaxOp, fmaxf(a, b)) BINARY_OP(MinOp, fminf(a, b)) BINARY_OP(MulOp, a * b) BINARY_OP(PowOp, powf(a, b)) BINARY_OP(SubOp, a - b) template<typename Ta, typename Tb, typename Tc, typename Op> __global__ void kernel_binary(const Ta *a, const Tb *b, unsigned int n, Tc *c) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { c[idx] = op(a[idx], b[idx]); } } template<typename Ta, typename Tb, typename Tc, typename Op> __global__ void kernel_binary_inplace(Ta *a, const Tb *b, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(a[idx], b[idx]); } } template<typename Ta, typename Tb, typename Tc, typename Op> void binary(const Ta *a, const Tb *b, unsigned int n, Tc *c) { if (c == (Tc *) a) { kernel_binary_inplace<Tc, Tb, Tc, Op> <<<cuda_blocks(n), kNumBlockThreads>>> (c, b, n); } else if (c == (Tc *) b) { kernel_binary_inplace<Tc, Ta, Tc, Op> <<<cuda_blocks(n), kNumBlockThreads>>> (c, a, n); } else { kernel_binary<Ta, Tb, Tc, Op> <<<cuda_blocks(n), kNumBlockThreads>>> (a, b, n, c); } } template<typename Ta, typename Tb, typename Tc> void binary(BinaryOp op, const Ta *a, const Tb *b, unsigned int n, Tc *c) { switch (op) { case ADD_OP: binary<Ta, Tb, Tc, AddOp<Ta, Tb, Tc> >(a, b, n, c); break; case DIV_OP: binary<Ta, Tb, Tc, DivOp<Ta, Tb, Tc> >(a, b, n, c); break; case MAX_B_OP: binary<Ta, Tb, Tc, MaxOp<Ta, Tb, Tc> >(a, b, n, c); break; case MIN_B_OP: binary<Ta, Tb, Tc, MinOp<Ta, Tb, Tc> >(a, b, n, c); break; case MUL_OP: binary<Ta, Tb, Tc, MulOp<Ta, Tb, Tc> >(a, b, n, c); break; case POW_OP: binary<Ta, Tb, Tc, PowOp<Ta, Tb, Tc> >(a, b, n, c); break; case SUB_OP: binary<Ta, Tb, Tc, SubOp<Ta, Tb, Tc> >(a, b, n, c); break; } } template void binary<float, float, float>( BinaryOp op, const float *a, const float *b, unsigned int n, float *c); template void binary<float, int, float>( BinaryOp op, const float *a, const int *b, unsigned int n, float *c); template void binary<int, float, float>( BinaryOp op, const int *a, const float *b, unsigned int n, float *c); template void binary<int, int, int>( BinaryOp op, const int *a, const int *b, unsigned int n, int *c); template<typename Ta, typename Talpha, typename Tb, typename Op> __global__ void kernel_binary_scalar(const Ta *a, Talpha alpha, unsigned int n, Tb *b) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = op(a[idx], alpha); } } template<typename Ta, typename Talpha, typename Op> __global__ void kernel_binary_scalar_inplace(Ta *a, Talpha alpha, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(a[idx], alpha); } } template<typename Ta, typename Talpha, typename Tb, typename Op> void binary_scalar(const Ta *a, Talpha alpha, unsigned int n, Tb *b) { if (b == (Tb *)a) { kernel_binary_scalar_inplace<Tb, Talpha, Op> <<<cuda_blocks(n), kNumBlockThreads>>> (b, alpha, n); } else { kernel_binary_scalar<Ta, Talpha, Tb, Op> <<<cuda_blocks(n), kNumBlockThreads>>> (a, alpha, n, b); } } template<typename Ta, typename Talpha, typename Tb> void binary_scalar(BinaryOp op, const Ta *a, Talpha alpha, unsigned int n, Tb *b) { switch (op) { case ADD_OP: binary_scalar<Ta, Talpha, Tb, AddOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case DIV_OP: binary_scalar<Ta, Talpha, Tb, DivOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case MAX_B_OP: binary_scalar<Ta, Talpha, Tb, MaxOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case MIN_B_OP: binary_scalar<Ta, Talpha, Tb, MinOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case MUL_OP: binary_scalar<Ta, Talpha, Tb, MulOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; case POW_OP: if (alpha == static_cast<Talpha>(2)) { binary<Ta, Ta, Tb, MulOp<Ta, Talpha, Tb> >(a, a, n, b); } else if (alpha == static_cast<Talpha>(1)) { binary_scalar<Ta, Ta, Tb, MulOp<Ta, Talpha, Tb> >(a, 1, n, b); } else { binary_scalar<Ta, Talpha, Tb, PowOp<Ta, Talpha, Tb> >(a, alpha, n, b); } break; case SUB_OP: binary_scalar<Ta, Talpha, Tb, SubOp<Ta, Talpha, Tb> >(a, alpha, n, b); break; } } template void binary_scalar<float, float, float>( BinaryOp op, const float *a, float alpha, unsigned int n, float *c); template void binary_scalar<int, float, float>( BinaryOp op, const int *a, float alpha, unsigned int n, float *c); template void binary_scalar<int, int, int>( BinaryOp op, const int *a, int alpha, unsigned int n, int *c); template<typename Talpha, typename Ta, typename Tb, typename Op> __global__ void kernel_binary_scalar(Talpha alpha, const Ta *a, unsigned int n, Tb *b) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = op(alpha, a[idx]); } } template<typename Talpha, typename Ta, typename Op> __global__ void kernel_binary_scalar_inplace(Talpha alpha, Ta *a, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(alpha, a[idx]); } } template<typename Talpha, typename Ta, typename Tb, typename Op> void binary_scalar(Talpha alpha, const Ta *a, unsigned int n, Tb *b) { if (b == (Tb *)a) { kernel_binary_scalar_inplace<Talpha, Tb, Op> <<<cuda_blocks(n), kNumBlockThreads>>> (alpha, b, n); } else { kernel_binary_scalar<Talpha, Ta, Tb, Op> <<<cuda_blocks(n), kNumBlockThreads>>> (alpha, a, n, b); } } template<typename Talpha, typename Ta, typename Tb> void binary_scalar_(BinaryOp op, Talpha alpha, const Ta *a, unsigned int n, Tb *b) { switch (op) { case ADD_OP: binary_scalar<Talpha, Ta, Tb, AddOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case DIV_OP: binary_scalar<Talpha, Ta, Tb, DivOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case MAX_B_OP: binary_scalar<Talpha, Ta, Tb, MaxOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case MIN_B_OP: binary_scalar<Talpha, Ta, Tb, MinOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case MUL_OP: binary_scalar<Talpha, Ta, Tb, MulOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; case POW_OP: if (alpha == static_cast<Talpha>(2)) { binary<Ta, Ta, Tb, MulOp<Talpha, Ta, Tb> >(a, a, n, b); } else if (alpha == static_cast<Talpha>(1)) { binary_scalar<Ta, Ta, Tb, MulOp<Talpha, Ta, Tb> >(1, a, n, b); } else { binary_scalar<Talpha, Ta, Tb, PowOp<Talpha, Ta, Tb> >(alpha, a, n, b); } break; case SUB_OP: binary_scalar<Talpha, Ta, Tb, SubOp<Talpha, Ta, Tb> >(alpha, a, n, b); break; } } template void binary_scalar_<float, float, float>( BinaryOp op, float alpha, const float *a, unsigned int n, float *c); template void binary_scalar_<float, int, float>( BinaryOp op, float alpha, const int *a, unsigned int n, float *c); template void binary_scalar_<int, int, int>( BinaryOp op, int alpha, const int *a, unsigned int n, int *c); template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_leading> __global__ void kernel_binary_broadcast( const Ta *a, const Tb *b, unsigned int m, unsigned int n, Tc *c) { Op op; CUDA_GRID_STRIDE_LOOP(idx, m*n) { if (broadcast_leading) { c[idx] = op(a[idx], b[idx % n]); } else { c[idx] = op(a[idx], b[idx / m]); } } } template<typename Ta, typename Tb, typename Op, bool broadcast_leading> __global__ void kernel_binary_broadcast_inplace( Ta *a, const Tb *b, unsigned int m, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, m*n) { if (broadcast_leading) { a[idx] = op(a[idx], b[idx % n]); } else { a[idx] = op(a[idx], b[idx / m]); } } } template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_leading> void binary_broadcast(const Ta *a, const Tb *b, unsigned int m, unsigned int n, Tc *c) { if (c == (Tc *) a) { kernel_binary_broadcast_inplace<Ta, Tb, Op, broadcast_leading> <<<cuda_blocks(m*n), kNumBlockThreads>>> ((Ta *) c, b, m, n); } else if (c == (Tc *) b) { kernel_binary_broadcast_inplace<Tb, Ta, Op, broadcast_leading> <<<cuda_blocks(m*n), kNumBlockThreads>>> ((Tb *) b, a, m, n); } else { kernel_binary_broadcast<Ta, Tb, Tc, Op, broadcast_leading> <<<cuda_blocks(m*n), kNumBlockThreads>>> (a, b, m, n, c); } } template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_inner> __global__ void kernel_binary_broadcast(const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { Op op; CUDA_GRID_STRIDE_LOOP(idx, k*m*n) { if (broadcast_inner) { c[idx] = op(a[idx], b[(idx / m / n) * n + (idx % n)]); } else { c[idx] = op(a[idx], b[(idx / n) % m]); } } } template<typename Ta, typename Tb, typename Op, bool broadcast_inner> __global__ void kernel_binary_broadcast_inplace(Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, k*m*n) { if (broadcast_inner) { a[idx] = op(a[idx], b[(idx / m / n) * n + (idx % n)]); } else { a[idx] = op(a[idx], b[(idx / n) % m]); } } } template<typename Ta, typename Tb, typename Tc, typename Op, bool broadcast_inner> void binary_broadcast(const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { if (c == (Tc *) a) { kernel_binary_broadcast_inplace<Ta, Tb, Op, broadcast_inner> <<<cuda_blocks(k*m*n), kNumBlockThreads>>> ((Ta *) c, b, k, m, n); } else if (c == (Tc *) b) { kernel_binary_broadcast_inplace<Tb, Ta, Op, broadcast_inner> <<<cuda_blocks(k*m*n), kNumBlockThreads>>> ((Tb *) b, a, k, m, n); } else { kernel_binary_broadcast<Ta, Tb, Tc, Op, broadcast_inner> <<<cuda_blocks(k*m*n), kNumBlockThreads>>> (a, b, k, m, n, c); } } template<typename Ta, typename Tb, typename Tc, typename Op> void binary_broadcast(BroadcastType btype, const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { switch (btype) { case BROADCAST_INNER: binary_broadcast<Ta, Tb, Tc, Op, true>(a, b, k, m, n, c); case BROADCAST_LEADING: binary_broadcast<Ta, Tb, Tc, Op, true>(a, b, m, n, c); break; case BROADCAST_OUTER: binary_broadcast<Ta, Tb, Tc, Op, false>(a, b, k, m, n, c); break; case BROADCAST_TRAILING: binary_broadcast<Ta, Tb, Tc, Op, false>(a, b, m, n, c); break; } } template<typename Ta, typename Tb, typename Tc> void binary_broadcast(BinaryOp op, BroadcastType btype, const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, Tc *c) { switch (op) { case ADD_OP: binary_broadcast<Ta, Tb, Tc, AddOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case DIV_OP: binary_broadcast<Ta, Tb, Tc, DivOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case MAX_B_OP: binary_broadcast<Ta, Tb, Tc, MaxOp<Ta, Tb, Tc> >( btype, a, b, k, m, n, c); break; case MIN_B_OP: binary_broadcast<Ta, Tb, Tc, MinOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case MUL_OP: binary_broadcast<Ta, Tb, Tc, MulOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case POW_OP: binary_broadcast<Ta, Tb, Tc, PowOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; case SUB_OP: binary_broadcast<Ta, Tb, Tc, SubOp<Ta, Tb, Tc> > (btype, a, b, k, m, n, c); break; } } template void binary_broadcast<float, float, float>( BinaryOp op, BroadcastType btype, const float *a, const float *b, unsigned int k, unsigned int m, unsigned int n, float *c); template void binary_broadcast<float, int, float>( BinaryOp op, BroadcastType btype, const float *a, const int *b, unsigned int k, unsigned int m, unsigned int n, float *c); template void binary_broadcast<int, float, float>( BinaryOp op, BroadcastType btype, const int *a, const float *b, unsigned int k, unsigned int m, unsigned int n, float *c); template void binary_broadcast<int, int, int>( BinaryOp op, BroadcastType btype, const int *a, const int *b, unsigned int k, unsigned int m,unsigned int n, int *c); BINARY_OP(EqOp, a == b) BINARY_OP(GtOp, a > b) BINARY_OP(GtEqOp, a >= b) BINARY_OP(LtOp, a < b) BINARY_OP(LtEqOp, a <= b) BINARY_OP(NeqOp, a != b) template<typename Ta, typename Tb> void binary_cmp(BinaryCmpOp op, const Ta *a, const Tb *b, unsigned int n, bool_t *c) { switch (op) { case EQ_OP: binary<Ta, Tb, bool_t, EqOp<Ta, Tb, bool_t> >(a, b, n, c); break; case GT_OP: binary<Ta, Tb, bool_t, GtOp<Ta, Tb, bool_t> >(a, b, n, c); break; case GT_EQ_OP: binary<Ta, Tb, bool_t, GtEqOp<Ta, Tb, bool_t> >(a, b, n, c); break; case LT_OP: binary<Ta, Tb, bool_t, LtOp<Ta, Tb, bool_t> >(a, b, n, c); break; case LT_EQ_OP: binary<Ta, Tb, bool_t, LtEqOp<Ta, Tb, bool_t> >(a, b, n, c); break; case NEQ_OP: binary<Ta, Tb, bool_t, NeqOp<Ta, Tb, bool_t> >(a, b, n, c); break; } } template void binary_cmp<float, float>( BinaryCmpOp op, const float *a, const float *b, unsigned int n, bool_t *c); template void binary_cmp<float, int>( BinaryCmpOp op, const float *a, const int *b, unsigned int n, bool_t *c); template void binary_cmp<int, float>( BinaryCmpOp op, const int *a, const float *b, unsigned int n, bool_t *c); template void binary_cmp<int, int>( BinaryCmpOp op, const int *a, const int *b, unsigned int n, bool_t *c); template<typename T> void binary_cmp_scalar(BinaryCmpOp op, const T *a, T alpha, unsigned int n, bool_t *b) { switch (op) { case EQ_OP: binary_scalar<T, T, bool_t, EqOp<T, T, bool_t> > (a, alpha, n, b); break; case GT_OP: binary_scalar<T, T, bool_t, GtOp<T, T, bool_t> > (a, alpha, n, b); break; case GT_EQ_OP: binary_scalar<T, T, bool_t, GtEqOp<T, T, bool_t> > (a, alpha, n, b); break; case LT_OP: binary_scalar<T, T, bool_t, LtOp<T, T, bool_t> > (a, alpha, n, b); break; case LT_EQ_OP: binary_scalar<T, T, bool_t, LtEqOp<T, T, bool_t> > (a, alpha, n, b); break; case NEQ_OP: binary_scalar<T, T, bool_t, NeqOp<T, T, bool_t> > (a, alpha, n, b); break; } } template void binary_cmp_scalar<float>( BinaryCmpOp op, const float *a, float alpha, unsigned int n, bool_t *b); template void binary_cmp_scalar<int>( BinaryCmpOp op, const int *a, int alpha, unsigned int n, bool_t *b); template<typename T> void binary_cmp_scalar_(BinaryCmpOp op, T alpha, const T *a, unsigned int n, bool_t *b) { switch (op) { case EQ_OP: binary_scalar<T, T, bool_t, EqOp<T, T, bool_t> > (alpha, a, n, b); break; case GT_OP: binary_scalar<T, T, bool_t, GtOp<T, T, bool_t> > (alpha, a, n, b); break; case GT_EQ_OP: binary_scalar<T, T, bool_t, GtEqOp<T, T, bool_t> > (alpha, a, n, b); break; case LT_OP: binary_scalar<T, T, bool_t, LtOp<T, T, bool_t> > (alpha, a, n, b); break; case LT_EQ_OP: binary_scalar<T, T, bool_t, LtEqOp<T, T, bool_t> > (alpha, a, n, b); break; case NEQ_OP: binary_scalar<T, T, bool_t, NeqOp<T, T, bool_t> > (alpha, a, n, b); break; } } template void binary_cmp_scalar_<float>( BinaryCmpOp op, float alpha, const float *a, unsigned int n, bool_t *b);; template void binary_cmp_scalar_<int>( BinaryCmpOp op, int alpha, const int *a, unsigned int n, bool_t *b); template<typename Ta, typename Tb> void binary_cmp_broadcast(BinaryCmpOp op, BroadcastType btype, const Ta *a, const Tb *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c) { switch (op) { case EQ_OP: binary_broadcast<Ta, Tb, bool_t, EqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case GT_OP: binary_broadcast<Ta, Tb, bool_t, GtOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case GT_EQ_OP: binary_broadcast<Ta, Tb, bool_t, GtEqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case LT_OP: binary_broadcast<Ta, Tb, bool_t, LtOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case LT_EQ_OP: binary_broadcast<Ta, Tb, bool_t, LtEqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; case NEQ_OP: binary_broadcast<Ta, Tb, bool_t, NeqOp<Ta, Tb, bool_t> > (btype, a, b, k, m, n, c); break; } } template void binary_cmp_broadcast<float, float>( BinaryCmpOp op, BroadcastType btype, const float *a, const float *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); template void binary_cmp_broadcast<float, int>( BinaryCmpOp op, BroadcastType btype, const float *a, const int *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); template void binary_cmp_broadcast<int, float>( BinaryCmpOp op, BroadcastType btype, const int *a, const float *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); template void binary_cmp_broadcast<int, int>( BinaryCmpOp op, BroadcastType btype, const int *a, const int *b, unsigned int k, unsigned int m, unsigned int n, bool_t *c); #define UNARY_OP(name, operation) \ template <typename Ta, typename Tb> \ struct name { \ __device__ Tb operator()(const Ta a) { \ operation; \ } \ }; UNARY_OP(AbsOp, return fabsf(a);) UNARY_OP(CosOp, return cosf(a);) UNARY_OP(ExpOp, return expf(a);) UNARY_OP(LogOp, return logf(a);) UNARY_OP(NegOp, return -a;) UNARY_OP(ReluOp, return fmaxf(0.0, a);) UNARY_OP(ReluDOp, return a >= 0.0 ? 1.0 : 0.0;) UNARY_OP(SigmoidOp, return 1.0/(1.0 + expf(-a));) UNARY_OP(SigmoidDOp, Ta tmp = 1.0/(1.0 + expf(-a)); return tmp*(1-tmp);) UNARY_OP(SinOp, return sinf(a);) UNARY_OP(SqrtOp, return sqrtf(a);) UNARY_OP(TanhOp, return tanhf(a);) UNARY_OP(TanhDOp, Ta tmp = tanhf(a); return 1-tmp*tmp;) template<typename T, typename Op> __global__ void kernel_unary(const T *a, unsigned int n, T *b) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = op(a[idx]); } } template<typename T, typename Op> __global__ void kernel_unary_inplace(T *a, unsigned int n) { Op op; CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = op(a[idx]); } } template<typename T, typename Op> void unary(const T *a, unsigned int n, T *b) { if (a == b) { kernel_unary_inplace<T, Op><<<cuda_blocks(n), kNumBlockThreads>>>(b, n); } else { kernel_unary<T, Op><<<cuda_blocks(n), kNumBlockThreads>>>(a, n, b); } } template<typename T> void unary(UnaryOp op, const T *a, unsigned int n, T *b) { switch (op) { case ABS_OP: unary<T, AbsOp<T, T> >(a, n, b); break; case COS_OP: unary<T, CosOp<T, T> >(a, n, b); break; case EXP_OP: unary<T, ExpOp<T, T> >(a, n, b); break; case LOG_OP: unary<T, LogOp<T, T> >(a, n, b); break; case NEG_OP: unary<T, NegOp<T, T> >(a, n, b); break; case RELU_OP: unary<T, ReluOp<T, T> >(a, n, b); break; case RELU_D_OP: unary<T, ReluDOp<T, T> >(a, n, b); break; case SIGMOID_OP: unary<T, SigmoidOp<T, T> >(a, n, b); break; case SIGMOID_D_OP: unary<T, SigmoidDOp<T, T> >(a, n, b); break; case SIN_OP: unary<T, SinOp<T, T> >(a, n, b); break; case SQRT_OP: unary<T, SqrtOp<T, T> >(a, n, b); break; case TANH_OP: unary<T, TanhOp<T, T> >(a, n, b); break; case TANH_D_OP: unary<T, TanhDOp<T, T> >(a, n, b); break; } } template void unary<float>(UnaryOp op, const float *a, unsigned int n, float *b); // TODO: unary should convert to float for certain operations template void unary<int>(UnaryOp op, const int *a, unsigned int n, int *b); template<typename T> __global__ void kernel_clip(const T *a, T a_min, T a_max, unsigned int n, T *b) { CUDA_GRID_STRIDE_LOOP(idx, n) { b[idx] = fminf(fmaxf(a[idx], a_min), a_max); } } template<typename T> __global__ void kernel_clip_inplace(T *a, T a_min, T a_max, unsigned int n) { CUDA_GRID_STRIDE_LOOP(idx, n) { a[idx] = fminf(fmaxf(a[idx], a_min), a_max); } } template<typename T> void clip(const T *a, T a_min, T a_max, unsigned int n, T *b) { if (a == b) { kernel_clip_inplace<T><<<cuda_blocks(n), kNumBlockThreads>>> (b, a_min, a_max, n); } else { kernel_clip<T><<<cuda_blocks(n), kNumBlockThreads>>> (a, a_min, a_max, n, b); } } template void clip<float>(const float *a, float a_min, float a_max, unsigned int n, float *b); template void clip<int>(const int *a, int a_min, int a_max, unsigned int n, int *b); }
9220f1569768bf0d244b82830640beea594432e3.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _MYNN #define _MYNN #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <vector> using namespace std; #include "types.h" #include "libnn.h" #define LAYERS (2) #define NINPUTS (2) #define NOUTPUTS (2) #define MIDDLE (10) #define L1M (MIDDLE) #define L1N (NINPUTS) #define L2M (NOUTPUTS) #define L2N (MIDDLE) const int mDim[LAYERS]={L1M,L2M};//,L3M}; const int nDim[LAYERS]={L1N,L2N};//,L3N}; //const int mDim[LAYERS]={L1M,L2M,L3M}; //const int nDim[LAYERS]={L1N,L2N,L3N}; void PRINTINFO(const Array<double> &pIn,const Array<double> &answer,const Array<double> &pOut,const Array<double> &pErr){ printf("in:[%.0f,%.0f] out:[%f,%f] targ:[%.0f,%.0f] err:[%f,%f]\n", pIn(0),pIn(1), answer(0),answer(1), pOut(0),pOut(1), pErr(0),pErr(1) ); } double ex1[NINPUTS]={-1,-1}; double ex2[NINPUTS]={-1,+1}; double ex3[NINPUTS]={+1,-1}; double ex4[NINPUTS]={+1,+1}; double ans1[NOUTPUTS]={-1,+1}; double ans2[NOUTPUTS]={+1,-1}; double ans3[NOUTPUTS]={+1,-1}; double ans4[NOUTPUTS]={-1,+1}; //const double ans1[NOUTPUTS]={-1,+1}; //const double ans2[NOUTPUTS]={+1,-1}; //const double ans3[NOUTPUTS]={+1,-1}; //const double ans4[NOUTPUTS]={+1,-1}; int main(){ int i,j; srand(time(0)); Net *net=0; net=new Net(LAYERS); for(i=0;i<LAYERS;i++){ net->insertLayer(i,mDim[i],nDim[i]); } net->rand(); Matrix<double> mat1=Matrix<double>(10,4); Array<double> arr1=Array<double>(10); Array<double> arr2=Array<double>(); arr2.resize(10); for(i=0;i<10;i++){ arr1(i)=i; arr2(i)=i+2; for(j=0;j<4;j++){ mat1(i,j)=(i+1)*(j+2); } } for(i=0;i<10;i++){ for(j=0;j<4;j++){ assert(mat1(i,j)==(i+1)*(j+2)); } } arr1.print(); arr2.print(); arr1=arr1+arr2; arr1.print(); arr1[0]+=5; arr1[5]+=15; arr1(2)=arr1[3]*4; arr1.print(); // return 0; vector<Array<double> > pIn,pOut; pIn.resize(4); pIn[0]=Array<double>(ex1,NINPUTS); pIn[1]=Array<double>(ex2,NINPUTS); pIn[2]=Array<double>(ex3,NINPUTS); pIn[3]=Array<double>(ex4,NINPUTS); for(Array<double> x:pIn) x.print(); pOut.resize(4); pOut[0]=Array<double>(ans1,NOUTPUTS); pOut[1]=Array<double>(ans2,NOUTPUTS); pOut[2]=Array<double>(ans3,NOUTPUTS); pOut[3]=Array<double>(ans4,NOUTPUTS); for(Array<double> x:pOut) x.print(); int tmpvar; for(i=0;i<EPOCHS;i++){ tmpvar=i%4; net->train(pIn[tmpvar],pOut[tmpvar]); PRINTINFO(pIn[tmpvar],net->answer,pOut[tmpvar],net->error); } } #endif
9220f1569768bf0d244b82830640beea594432e3.cu
#ifndef _MYNN #define _MYNN #include <cuda.h> #include <math.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <vector> using namespace std; #include "types.h" #include "libnn.h" #define LAYERS (2) #define NINPUTS (2) #define NOUTPUTS (2) #define MIDDLE (10) #define L1M (MIDDLE) #define L1N (NINPUTS) #define L2M (NOUTPUTS) #define L2N (MIDDLE) const int mDim[LAYERS]={L1M,L2M};//,L3M}; const int nDim[LAYERS]={L1N,L2N};//,L3N}; //const int mDim[LAYERS]={L1M,L2M,L3M}; //const int nDim[LAYERS]={L1N,L2N,L3N}; void PRINTINFO(const Array<double> &pIn,const Array<double> &answer,const Array<double> &pOut,const Array<double> &pErr){ printf("in:[%.0f,%.0f] out:[%f,%f] targ:[%.0f,%.0f] err:[%f,%f]\n", pIn(0),pIn(1), answer(0),answer(1), pOut(0),pOut(1), pErr(0),pErr(1) ); } double ex1[NINPUTS]={-1,-1}; double ex2[NINPUTS]={-1,+1}; double ex3[NINPUTS]={+1,-1}; double ex4[NINPUTS]={+1,+1}; double ans1[NOUTPUTS]={-1,+1}; double ans2[NOUTPUTS]={+1,-1}; double ans3[NOUTPUTS]={+1,-1}; double ans4[NOUTPUTS]={-1,+1}; //const double ans1[NOUTPUTS]={-1,+1}; //const double ans2[NOUTPUTS]={+1,-1}; //const double ans3[NOUTPUTS]={+1,-1}; //const double ans4[NOUTPUTS]={+1,-1}; int main(){ int i,j; srand(time(0)); Net *net=0; net=new Net(LAYERS); for(i=0;i<LAYERS;i++){ net->insertLayer(i,mDim[i],nDim[i]); } net->rand(); Matrix<double> mat1=Matrix<double>(10,4); Array<double> arr1=Array<double>(10); Array<double> arr2=Array<double>(); arr2.resize(10); for(i=0;i<10;i++){ arr1(i)=i; arr2(i)=i+2; for(j=0;j<4;j++){ mat1(i,j)=(i+1)*(j+2); } } for(i=0;i<10;i++){ for(j=0;j<4;j++){ assert(mat1(i,j)==(i+1)*(j+2)); } } arr1.print(); arr2.print(); arr1=arr1+arr2; arr1.print(); arr1[0]+=5; arr1[5]+=15; arr1(2)=arr1[3]*4; arr1.print(); // return 0; vector<Array<double> > pIn,pOut; pIn.resize(4); pIn[0]=Array<double>(ex1,NINPUTS); pIn[1]=Array<double>(ex2,NINPUTS); pIn[2]=Array<double>(ex3,NINPUTS); pIn[3]=Array<double>(ex4,NINPUTS); for(Array<double> x:pIn) x.print(); pOut.resize(4); pOut[0]=Array<double>(ans1,NOUTPUTS); pOut[1]=Array<double>(ans2,NOUTPUTS); pOut[2]=Array<double>(ans3,NOUTPUTS); pOut[3]=Array<double>(ans4,NOUTPUTS); for(Array<double> x:pOut) x.print(); int tmpvar; for(i=0;i<EPOCHS;i++){ tmpvar=i%4; net->train(pIn[tmpvar],pOut[tmpvar]); PRINTINFO(pIn[tmpvar],net->answer,pOut[tmpvar],net->error); } } #endif
36e94d6e1515d07ec47d8695a75ccefcbe624894.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ristretto/base_ristretto_layer.hpp" #include "ristretto/base_ristretto_layer.cuh" namespace caffe { template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeWeights_gpu( vector<shared_ptr<Blob<Dtype> > > weights_quantized, const int rounding, const bool bias_term) { Dtype* weight = weights_quantized[0]->mutable_gpu_data(); const int cnt_weight = weights_quantized[0]->count(); switch (precision_) { case QuantizationParameter_Precision_MINIFLOAT: Trim2MiniFloat_gpu(weight, cnt_weight, fp_mant_, fp_exp_, rounding); if (bias_term) { Trim2MiniFloat_gpu(weights_quantized[1]->mutable_gpu_data(), weights_quantized[1]->count(), fp_mant_, fp_exp_, rounding); } break; case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(weight, cnt_weight, bw_params_, rounding, fl_params_,fl_params_bias_); if (bias_term) { Trim2FixedPoint_gpu(weights_quantized[1]->mutable_gpu_data(), weights_quantized[1]->count(), bw_params_, rounding, fl_params_,fl_params_bias_); } break; case QuantizationParameter_Precision_INTEGER_POWER_OF_2_WEIGHTS: Trim2IntegerPowerOf2_gpu(weight, cnt_weight, pow_2_min_exp_, pow_2_max_exp_, rounding); // Don't trim bias break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerInputs_gpu( Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_INTEGER_POWER_OF_2_WEIGHTS: case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_in_, rounding_, fl_layer_in_); break; case QuantizationParameter_Precision_MINIFLOAT: Trim2MiniFloat_gpu(data, count, fp_mant_, fp_exp_, rounding_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerOutputs_gpu(Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_INTEGER_POWER_OF_2_WEIGHTS: case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_out_, rounding_, fl_layer_out_); break; case QuantizationParameter_Precision_MINIFLOAT: Trim2MiniFloat_gpu(data, count, fp_mant_, fp_exp_, rounding_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> __global__ void Trim2FixedPoint_kernel(Dtype* data, const int cnt, const int bit_width, const int rounding, const int fl) { CUDA_KERNEL_LOOP(index, cnt) { // Saturate data Dtype max_data = (powf(2, bit_width - 1) - 1) * powf(2, -fl); Dtype min_data = -powf(2, bit_width - 1) * powf(2, -fl); data[index] = fmax(fmin(data[index], max_data), min_data); // Round data data[index] /= powf(2, -fl); switch (rounding) { case QuantizationParameter_Rounding_NEAREST: data[index] = rint(data[index]); break; case QuantizationParameter_Rounding_STOCHASTIC: data[index] = __float2int_rd(data[index] + RandUniform_device(index)); break; default: break; } data[index] *= powf(2, -fl); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2FixedPoint_gpu(Dtype* data, const int cnt, const int bit_width, const int rounding, int fl) { hipLaunchKernelGGL(( Trim2FixedPoint_kernel), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, data, cnt, bit_width, rounding, fl); } template <typename Dtype> __global__ void Trim2MiniFloat_kernel(Dtype* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding){ CUDA_KERNEL_LOOP(index, cnt) { Trim2MiniFloat_device(&data[index], bw_mant, bw_exp, rounding, index); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2MiniFloat_gpu(Dtype* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding) { hipLaunchKernelGGL(( Trim2MiniFloat_kernel), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, data, cnt, bw_mant, bw_exp, rounding); } template <typename Dtype> __global__ void Trim2IntegerPowerOf2_kernel(Dtype* data, const int cnt, const int min_exp, const int max_exp, const int rounding) { CUDA_KERNEL_LOOP(index, cnt) { float exponent = log2f(fabs((float)data[index])); int sign = data[index] >= 0 ? 1 : -1; switch (rounding) { case QuantizationParameter_Rounding_NEAREST: exponent = rint(exponent); break; case QuantizationParameter_Rounding_STOCHASTIC: exponent = __float2int_rd(exponent + RandUniform_device(index)); break; default: break; } exponent = fmaxf(fminf(exponent, max_exp), min_exp); data[index] = sign * powf(2, exponent); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2IntegerPowerOf2_gpu(Dtype* data, const int cnt, const int min_exp, const int max_exp, const int rounding) { hipLaunchKernelGGL(( Trim2IntegerPowerOf2_kernel), dim3(CAFFE_GET_BLOCKS(cnt)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, data, cnt, min_exp, max_exp, rounding); } // Explicit instantiations template void BaseRistrettoLayer<double>::QuantizeWeights_gpu( vector<shared_ptr<Blob<double> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<float>::QuantizeWeights_gpu( vector<shared_ptr<Blob<float> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<double>::QuantizeLayerInputs_gpu(double* data, const int count); template void BaseRistrettoLayer<float>::QuantizeLayerInputs_gpu(float* data, const int count); template void BaseRistrettoLayer<double>::QuantizeLayerOutputs_gpu( double* top_data, const int top_count); template void BaseRistrettoLayer<float>::QuantizeLayerOutputs_gpu( float* top_data, const int top_count); template void BaseRistrettoLayer<double>::Trim2FixedPoint_gpu(double* data, const int cnt, const int bit_width, const int rounding, int fl); template void BaseRistrettoLayer<float>::Trim2FixedPoint_gpu(float* data, const int cnt, const int bit_width, const int rounding, int fl); template void BaseRistrettoLayer<double>::Trim2MiniFloat_gpu(double* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding); template void BaseRistrettoLayer<float>::Trim2MiniFloat_gpu(float* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding); template void BaseRistrettoLayer<double>::Trim2IntegerPowerOf2_gpu(double* data, const int cnt, const int min_exp, const int max_exp, const int rounding); template void BaseRistrettoLayer<float>::Trim2IntegerPowerOf2_gpu(float* data, const int cnt, const int min_exp, const int max_exp, const int rounding); } // namespace caffe
36e94d6e1515d07ec47d8695a75ccefcbe624894.cu
#include "ristretto/base_ristretto_layer.hpp" #include "ristretto/base_ristretto_layer.cuh" namespace caffe { template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeWeights_gpu( vector<shared_ptr<Blob<Dtype> > > weights_quantized, const int rounding, const bool bias_term) { Dtype* weight = weights_quantized[0]->mutable_gpu_data(); const int cnt_weight = weights_quantized[0]->count(); switch (precision_) { case QuantizationParameter_Precision_MINIFLOAT: Trim2MiniFloat_gpu(weight, cnt_weight, fp_mant_, fp_exp_, rounding); if (bias_term) { Trim2MiniFloat_gpu(weights_quantized[1]->mutable_gpu_data(), weights_quantized[1]->count(), fp_mant_, fp_exp_, rounding); } break; case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(weight, cnt_weight, bw_params_, rounding, fl_params_,fl_params_bias_); if (bias_term) { Trim2FixedPoint_gpu(weights_quantized[1]->mutable_gpu_data(), weights_quantized[1]->count(), bw_params_, rounding, fl_params_,fl_params_bias_); } break; case QuantizationParameter_Precision_INTEGER_POWER_OF_2_WEIGHTS: Trim2IntegerPowerOf2_gpu(weight, cnt_weight, pow_2_min_exp_, pow_2_max_exp_, rounding); // Don't trim bias break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerInputs_gpu( Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_INTEGER_POWER_OF_2_WEIGHTS: case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_in_, rounding_, fl_layer_in_); break; case QuantizationParameter_Precision_MINIFLOAT: Trim2MiniFloat_gpu(data, count, fp_mant_, fp_exp_, rounding_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::QuantizeLayerOutputs_gpu(Dtype* data, const int count) { switch (precision_) { case QuantizationParameter_Precision_INTEGER_POWER_OF_2_WEIGHTS: case QuantizationParameter_Precision_DYNAMIC_FIXED_POINT: Trim2FixedPoint_gpu(data, count, bw_layer_out_, rounding_, fl_layer_out_); break; case QuantizationParameter_Precision_MINIFLOAT: Trim2MiniFloat_gpu(data, count, fp_mant_, fp_exp_, rounding_); break; default: LOG(FATAL) << "Unknown trimming mode: " << precision_; break; } } template <typename Dtype> __global__ void Trim2FixedPoint_kernel(Dtype* data, const int cnt, const int bit_width, const int rounding, const int fl) { CUDA_KERNEL_LOOP(index, cnt) { // Saturate data Dtype max_data = (powf(2, bit_width - 1) - 1) * powf(2, -fl); Dtype min_data = -powf(2, bit_width - 1) * powf(2, -fl); data[index] = fmax(fmin(data[index], max_data), min_data); // Round data data[index] /= powf(2, -fl); switch (rounding) { case QuantizationParameter_Rounding_NEAREST: data[index] = rint(data[index]); break; case QuantizationParameter_Rounding_STOCHASTIC: data[index] = __float2int_rd(data[index] + RandUniform_device(index)); break; default: break; } data[index] *= powf(2, -fl); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2FixedPoint_gpu(Dtype* data, const int cnt, const int bit_width, const int rounding, int fl) { Trim2FixedPoint_kernel<<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS>>>( data, cnt, bit_width, rounding, fl); } template <typename Dtype> __global__ void Trim2MiniFloat_kernel(Dtype* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding){ CUDA_KERNEL_LOOP(index, cnt) { Trim2MiniFloat_device(&data[index], bw_mant, bw_exp, rounding, index); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2MiniFloat_gpu(Dtype* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding) { Trim2MiniFloat_kernel<<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS>>>( data, cnt, bw_mant, bw_exp, rounding); } template <typename Dtype> __global__ void Trim2IntegerPowerOf2_kernel(Dtype* data, const int cnt, const int min_exp, const int max_exp, const int rounding) { CUDA_KERNEL_LOOP(index, cnt) { float exponent = log2f(fabs((float)data[index])); int sign = data[index] >= 0 ? 1 : -1; switch (rounding) { case QuantizationParameter_Rounding_NEAREST: exponent = rint(exponent); break; case QuantizationParameter_Rounding_STOCHASTIC: exponent = __float2int_rd(exponent + RandUniform_device(index)); break; default: break; } exponent = fmaxf(fminf(exponent, max_exp), min_exp); data[index] = sign * powf(2, exponent); } } template <typename Dtype> void BaseRistrettoLayer<Dtype>::Trim2IntegerPowerOf2_gpu(Dtype* data, const int cnt, const int min_exp, const int max_exp, const int rounding) { Trim2IntegerPowerOf2_kernel<<<CAFFE_GET_BLOCKS(cnt), CAFFE_CUDA_NUM_THREADS>>>( data, cnt, min_exp, max_exp, rounding); } // Explicit instantiations template void BaseRistrettoLayer<double>::QuantizeWeights_gpu( vector<shared_ptr<Blob<double> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<float>::QuantizeWeights_gpu( vector<shared_ptr<Blob<float> > > weights_quantized, const int rounding, const bool bias_term); template void BaseRistrettoLayer<double>::QuantizeLayerInputs_gpu(double* data, const int count); template void BaseRistrettoLayer<float>::QuantizeLayerInputs_gpu(float* data, const int count); template void BaseRistrettoLayer<double>::QuantizeLayerOutputs_gpu( double* top_data, const int top_count); template void BaseRistrettoLayer<float>::QuantizeLayerOutputs_gpu( float* top_data, const int top_count); template void BaseRistrettoLayer<double>::Trim2FixedPoint_gpu(double* data, const int cnt, const int bit_width, const int rounding, int fl); template void BaseRistrettoLayer<float>::Trim2FixedPoint_gpu(float* data, const int cnt, const int bit_width, const int rounding, int fl); template void BaseRistrettoLayer<double>::Trim2MiniFloat_gpu(double* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding); template void BaseRistrettoLayer<float>::Trim2MiniFloat_gpu(float* data, const int cnt, const int bw_mant, const int bw_exp, const int rounding); template void BaseRistrettoLayer<double>::Trim2IntegerPowerOf2_gpu(double* data, const int cnt, const int min_exp, const int max_exp, const int rounding); template void BaseRistrettoLayer<float>::Trim2IntegerPowerOf2_gpu(float* data, const int cnt, const int min_exp, const int max_exp, const int rounding); } // namespace caffe
kernel_functions_for_tex_2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" texture<int, 2, hipReadModeElementType> tex_2d; __global__ void read_texture_2d(int nx, int ny){ int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < nx && y < ny){ int value = tex2D(tex_2d, x, y); printf("x: %d, y: %d, my value is %d\n", x, y, value); } }
kernel_functions_for_tex_2d.cu
texture<int, 2, cudaReadModeElementType> tex_2d; __global__ void read_texture_2d(int nx, int ny){ int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < nx && y < ny){ int value = tex2D(tex_2d, x, y); printf("x: %d, y: %d, my value is %d\n", x, y, value); } }
1fddf82d690852ad20c715ef6904c8bf20b36757.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> #include "array2d.h" #include "cuda_helper.h" #include "ns2d.h" #define value_t double #define index_t int // constants __constant__ value_t c_zero, c_two, c_half; __global__ void predictor(index_t Nx, index_t Ny, value_t *u, value_t *v, value_t *p, value_t *u_star, value_t *v_star, value_t *p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int gid = i * Ny + j; if (i < Nx && j < Ny) { if (i == 0) // i = 0 y = all { u_star[gid] = c_zero; v_star[gid] = c_zero; gid = gid + Ny; p_star[gid - Ny] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else if (i == Nx - 1) // i = end y = all { u_star[gid] = c_zero; v_star[gid] = c_zero; gid = gid - Ny; p_star[gid + Ny] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else { if (j == 0) // i = all except for two ends y = 0 { u_star[gid] = c_zero; v_star[gid] = c_zero; gid = gid + 1; p_star[gid - 1] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else if (j == Ny - 1) // i = all except for two ends y = end { u_star[gid] = u0; v_star[gid] = c_zero; gid = gid - 1; p_star[gid + 1] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else { u_star[gid] = u[gid] // - dtdx * (u[gid] * (u[gid + Ny] - u[gid]) + p[gid + Ny] - p[gid]) // - dtdy * v[gid] * (u[gid + 1] - u[gid]) // + nu_dtdxx * (u[gid + Ny] - c_two * u[gid] + u[gid - Ny]) // + nu_dtdyy * (u[gid + 1] - c_two * u[gid] + u[gid - 1]); v_star[gid] = v[gid] // - dtdx * u[gid] * (v[gid + Ny] - v[gid]) // - dtdy * (v[gid] * (v[gid + 1] - v[gid]) + p[gid + 1] - p[gid]) // + nu_dtdxx * (v[gid + Ny] - c_two * v[gid] + v[gid - Ny]) // + nu_dtdyy * (v[gid + 1] - c_two * v[gid] + v[gid - 1]); p_star[gid] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } } } } __global__ void corrector(index_t Nx, index_t Ny, value_t *u, value_t *v, value_t *p, value_t *u_star, value_t *v_star, value_t *p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int gid = i * Ny + j; if (i < Nx && j < Ny) { if (i == 0) // i = 0 y = all { u[gid] = c_zero; v[gid] = c_zero; gid = gid + Ny; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid - Ny] = c_half * (p[gid] + p_star2); } else if (i == Nx - 1) // i = end y = all { u[gid] = c_zero; v[gid] = c_zero; gid = gid - Ny; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid + Ny] = c_half * (p[gid] + p_star2); } else { if (j == 0) // i = all except for two ends y = 0 { u[gid] = c_zero; v[gid] = c_zero; gid = gid + 1; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid - 1] = c_half * (p[gid] + p_star2); } else if (j == Ny - 1) // i = all except for two ends y = end { u[gid] = u0; v[gid] = c_zero; gid = gid - 1; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid + 1] = c_half * (p[gid] + p_star2); } else { value_t u_star2, v_star2, p_star2; u_star2 = u_star[gid] // - dtdx * (u_star[gid] * (u_star[gid] - u_star[gid - Ny]) + p_star[gid] - p_star[gid - Ny]) // - dtdy * v_star[gid] * (u_star[gid] - u_star[gid - 1]) // + nu_dtdxx * (u_star[gid + Ny] - c_two * u_star[gid] + u_star[gid - Ny]) // + nu_dtdyy * (u_star[gid + 1] - c_two * u_star[gid] + u_star[gid - 1]); v_star2 = v_star[gid] // - dtdx * u_star[gid] * (v_star[gid] - v_star[gid - Ny]) // - dtdy * (v_star[gid] * (v_star[gid] - v_star[gid - 1]) + p_star[gid] - p_star[gid - 1]) // + nu_dtdxx * (v_star[gid + Ny] - c_two * v_star[gid] + v_star[gid - Ny]) // + nu_dtdyy * (v_star[gid + 1] - c_two * v_star[gid] + v_star[gid - 1]); p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); u[gid] = c_half * (u[gid] + u_star2); v[gid] = c_half * (v[gid] + v_star2); p[gid] = c_half * (p[gid] + p_star2); } } } } inline void one_step(dim3 grid, dim3 block, index_t Nx, index_t Ny, value_t *d_u, value_t *d_v, value_t *d_p, value_t *d_u_star, value_t *d_v_star, value_t *d_p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { hipLaunchKernelGGL(( predictor), dim3(grid), dim3(block), 0, 0, Nx, Ny, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); checkCudaErrorsAfterKernels; hipLaunchKernelGGL(( corrector), dim3(grid), dim3(block), 0, 0, Nx, Ny, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); checkCudaErrorsAfterKernels; } struct ns2d_gpu : public ns2d<value_t, index_t> { void benchmark() { print_bench(); value_t **u = create_array2d<value_t, index_t>(side_size, side_size); value_t **v = create_array2d<value_t, index_t>(side_size, side_size); value_t **p = create_array2d<value_t, index_t>(side_size, side_size); value_t **u_star = create_array2d<value_t, index_t>(side_size, side_size); value_t **v_star = create_array2d<value_t, index_t>(side_size, side_size); value_t **p_star = create_array2d<value_t, index_t>(side_size, side_size); initial_condition(u); value_t *d_u, *d_v, *d_p; value_t *d_u_star, *d_v_star, *d_p_star; value_t *h_u = &u[0][0], *h_v = &v[0][0], *h_p = &p[0][0]; checkCudaErrors(hipMalloc(&d_u, total_size * sizeof(value_t))); checkCudaErrors(hipMalloc(&d_v, total_size * sizeof(value_t))); checkCudaErrors(hipMalloc(&d_p, total_size * sizeof(value_t))); checkCudaErrors(hipMalloc(&d_u_star, total_size * sizeof(value_t))); checkCudaErrors(hipMalloc(&d_v_star, total_size * sizeof(value_t))); checkCudaErrors(hipMalloc(&d_p_star, total_size * sizeof(value_t))); checkCudaErrors(hipMemcpy(d_u, h_u, total_size * sizeof(value_t), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_v, h_v, total_size * sizeof(value_t), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_p, h_p, total_size * sizeof(value_t), hipMemcpyHostToDevice)); value_t zero = 0.0; value_t two = 2.0; value_t half = 0.5; checkCudaErrors(hipMemcpyToSymbol(c_zero, &zero, sizeof(value_t), 0, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(c_two, &two, sizeof(value_t), 0, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpyToSymbol(c_half, &half, sizeof(value_t), 0, hipMemcpyHostToDevice)); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); dim3 blockd3 = dim3(block0, block1, 1); dim3 grid = calc_grid2d(blockd3, side_size, side_size); std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n" << " Grid size: " << grid.x << "(x) X " << grid.y << "(y)\n\n"; value_t e0 = get_u_increment(grid, blockd3, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); loops = 0; auto startcpu = std::chrono::high_resolution_clock::now(); checkCudaErrors(hipEventRecord(start)); while ((std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::high_resolution_clock::now() - startcpu) .count()) < 1000.0 * benchtime) // while (loops < 200000) { one_step(grid, blockd3, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); loops++; } checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); float du = 0; checkCudaErrors(hipEventElapsedTime(&du, start, stop)); duration = 1.0e-3 * du; checkCudaErrors(hipMemcpy(h_u, d_u, total_size * sizeof(value_t), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_v, d_v, total_size * sizeof(value_t), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_p, d_p, total_size * sizeof(value_t), hipMemcpyDeviceToHost)); value_t ef = get_u_increment(grid, blockd3, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); std::cout << " \nCheck result\n" << " u incremtal initially: " << e0 << '\n' << " u incremtal initially: " << ef << '\n' << " ratio: " << ef / e0 << '\n'; print_performance(); // std::string fname = "test"; // write_txt_array2d<value_t, index_t>(u, side_size, side_size, fname + "_u"); // write_txt_array2d<value_t, index_t>(v, side_size, side_size, fname + "_v"); // write_txt_array2d<value_t, index_t>(p, side_size, side_size, fname + "_p"); destroy_array2d<value_t, index_t>(u); destroy_array2d<value_t, index_t>(v); destroy_array2d<value_t, index_t>(p); destroy_array2d<value_t, index_t>(u_star); destroy_array2d<value_t, index_t>(v_star); destroy_array2d<value_t, index_t>(p_star); checkCudaErrors(hipFree(d_u)); checkCudaErrors(hipFree(d_v)); checkCudaErrors(hipFree(d_p)); checkCudaErrors(hipFree(d_u_star)); checkCudaErrors(hipFree(d_v_star)); checkCudaErrors(hipFree(d_p_star)); } value_t get_u_increment(dim3 grid, dim3 block, index_t Nx, index_t Ny, value_t *d_u, value_t *d_v, value_t *d_p, value_t *d_u_star, value_t *d_v_star, value_t *d_p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { value_t **h_u = create_array2d<value_t, index_t>(side_size, side_size); value_t **u_inc = create_array2d<value_t, index_t>(side_size, side_size); checkCudaErrors(hipMemcpy(h_u[0], d_u, total_size * sizeof(value_t), hipMemcpyDeviceToHost)); #pragma omp parallel for for (index_t i = 0; i < Nx; i++) { for (index_t j = 0; j < Ny; j++) { u_inc[i][j] = -h_u[i][j]; } } one_step(grid, block, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); checkCudaErrors(hipMemcpy(h_u[0], d_u, total_size * sizeof(value_t), hipMemcpyDeviceToHost)); value_t sum = 0.0; for (index_t i = 0; i < Nx; i++) { for (index_t j = 0; j < Ny; j++) { u_inc[i][j] += h_u[i][j]; sum += u_inc[i][j] * u_inc[i][j]; } } destroy_array2d<value_t, index_t>(u_inc); return sum; } ns2d_gpu(int narg, char **arg) : ns2d(narg, arg) { } }; int main(int narg, char **arg) { ns2d_gpu test(narg, arg); test.benchmark(); }
1fddf82d690852ad20c715ef6904c8bf20b36757.cu
#include <chrono> #include "array2d.h" #include "cuda_helper.h" #include "ns2d.h" #define value_t double #define index_t int // constants __constant__ value_t c_zero, c_two, c_half; __global__ void predictor(index_t Nx, index_t Ny, value_t *u, value_t *v, value_t *p, value_t *u_star, value_t *v_star, value_t *p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int gid = i * Ny + j; if (i < Nx && j < Ny) { if (i == 0) // i = 0 y = all { u_star[gid] = c_zero; v_star[gid] = c_zero; gid = gid + Ny; p_star[gid - Ny] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else if (i == Nx - 1) // i = end y = all { u_star[gid] = c_zero; v_star[gid] = c_zero; gid = gid - Ny; p_star[gid + Ny] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else { if (j == 0) // i = all except for two ends y = 0 { u_star[gid] = c_zero; v_star[gid] = c_zero; gid = gid + 1; p_star[gid - 1] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else if (j == Ny - 1) // i = all except for two ends y = end { u_star[gid] = u0; v_star[gid] = c_zero; gid = gid - 1; p_star[gid + 1] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } else { u_star[gid] = u[gid] // - dtdx * (u[gid] * (u[gid + Ny] - u[gid]) + p[gid + Ny] - p[gid]) // - dtdy * v[gid] * (u[gid + 1] - u[gid]) // + nu_dtdxx * (u[gid + Ny] - c_two * u[gid] + u[gid - Ny]) // + nu_dtdyy * (u[gid + 1] - c_two * u[gid] + u[gid - 1]); v_star[gid] = v[gid] // - dtdx * u[gid] * (v[gid + Ny] - v[gid]) // - dtdy * (v[gid] * (v[gid + 1] - v[gid]) + p[gid + 1] - p[gid]) // + nu_dtdxx * (v[gid + Ny] - c_two * v[gid] + v[gid - Ny]) // + nu_dtdyy * (v[gid + 1] - c_two * v[gid] + v[gid - 1]); p_star[gid] = p[gid] // - c2_dtdx * (u[gid + Ny] - u[gid]) // - c2_dtdy * (v[gid + 1] - v[gid]); } } } } __global__ void corrector(index_t Nx, index_t Ny, value_t *u, value_t *v, value_t *p, value_t *u_star, value_t *v_star, value_t *p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int gid = i * Ny + j; if (i < Nx && j < Ny) { if (i == 0) // i = 0 y = all { u[gid] = c_zero; v[gid] = c_zero; gid = gid + Ny; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid - Ny] = c_half * (p[gid] + p_star2); } else if (i == Nx - 1) // i = end y = all { u[gid] = c_zero; v[gid] = c_zero; gid = gid - Ny; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid + Ny] = c_half * (p[gid] + p_star2); } else { if (j == 0) // i = all except for two ends y = 0 { u[gid] = c_zero; v[gid] = c_zero; gid = gid + 1; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid - 1] = c_half * (p[gid] + p_star2); } else if (j == Ny - 1) // i = all except for two ends y = end { u[gid] = u0; v[gid] = c_zero; gid = gid - 1; value_t p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); p[gid + 1] = c_half * (p[gid] + p_star2); } else { value_t u_star2, v_star2, p_star2; u_star2 = u_star[gid] // - dtdx * (u_star[gid] * (u_star[gid] - u_star[gid - Ny]) + p_star[gid] - p_star[gid - Ny]) // - dtdy * v_star[gid] * (u_star[gid] - u_star[gid - 1]) // + nu_dtdxx * (u_star[gid + Ny] - c_two * u_star[gid] + u_star[gid - Ny]) // + nu_dtdyy * (u_star[gid + 1] - c_two * u_star[gid] + u_star[gid - 1]); v_star2 = v_star[gid] // - dtdx * u_star[gid] * (v_star[gid] - v_star[gid - Ny]) // - dtdy * (v_star[gid] * (v_star[gid] - v_star[gid - 1]) + p_star[gid] - p_star[gid - 1]) // + nu_dtdxx * (v_star[gid + Ny] - c_two * v_star[gid] + v_star[gid - Ny]) // + nu_dtdyy * (v_star[gid + 1] - c_two * v_star[gid] + v_star[gid - 1]); p_star2 = p_star[gid] // - c2_dtdx * (u_star[gid] - u_star[gid - Ny]) // - c2_dtdy * (v_star[gid] - v_star[gid - 1]); u[gid] = c_half * (u[gid] + u_star2); v[gid] = c_half * (v[gid] + v_star2); p[gid] = c_half * (p[gid] + p_star2); } } } } inline void one_step(dim3 grid, dim3 block, index_t Nx, index_t Ny, value_t *d_u, value_t *d_v, value_t *d_p, value_t *d_u_star, value_t *d_v_star, value_t *d_p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { predictor<<<grid, block>>>(Nx, Ny, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); checkCudaErrorsAfterKernels; corrector<<<grid, block>>>(Nx, Ny, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); checkCudaErrorsAfterKernels; } struct ns2d_gpu : public ns2d<value_t, index_t> { void benchmark() { print_bench(); value_t **u = create_array2d<value_t, index_t>(side_size, side_size); value_t **v = create_array2d<value_t, index_t>(side_size, side_size); value_t **p = create_array2d<value_t, index_t>(side_size, side_size); value_t **u_star = create_array2d<value_t, index_t>(side_size, side_size); value_t **v_star = create_array2d<value_t, index_t>(side_size, side_size); value_t **p_star = create_array2d<value_t, index_t>(side_size, side_size); initial_condition(u); value_t *d_u, *d_v, *d_p; value_t *d_u_star, *d_v_star, *d_p_star; value_t *h_u = &u[0][0], *h_v = &v[0][0], *h_p = &p[0][0]; checkCudaErrors(cudaMalloc(&d_u, total_size * sizeof(value_t))); checkCudaErrors(cudaMalloc(&d_v, total_size * sizeof(value_t))); checkCudaErrors(cudaMalloc(&d_p, total_size * sizeof(value_t))); checkCudaErrors(cudaMalloc(&d_u_star, total_size * sizeof(value_t))); checkCudaErrors(cudaMalloc(&d_v_star, total_size * sizeof(value_t))); checkCudaErrors(cudaMalloc(&d_p_star, total_size * sizeof(value_t))); checkCudaErrors(cudaMemcpy(d_u, h_u, total_size * sizeof(value_t), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_v, h_v, total_size * sizeof(value_t), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_p, h_p, total_size * sizeof(value_t), cudaMemcpyHostToDevice)); value_t zero = 0.0; value_t two = 2.0; value_t half = 0.5; checkCudaErrors(cudaMemcpyToSymbol(c_zero, &zero, sizeof(value_t), 0, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(c_two, &two, sizeof(value_t), 0, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpyToSymbol(c_half, &half, sizeof(value_t), 0, cudaMemcpyHostToDevice)); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); dim3 blockd3 = dim3(block0, block1, 1); dim3 grid = calc_grid2d(blockd3, side_size, side_size); std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n" << " Grid size: " << grid.x << "(x) X " << grid.y << "(y)\n\n"; value_t e0 = get_u_increment(grid, blockd3, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); loops = 0; auto startcpu = std::chrono::high_resolution_clock::now(); checkCudaErrors(cudaEventRecord(start)); while ((std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::high_resolution_clock::now() - startcpu) .count()) < 1000.0 * benchtime) // while (loops < 200000) { one_step(grid, blockd3, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); loops++; } checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); float du = 0; checkCudaErrors(cudaEventElapsedTime(&du, start, stop)); duration = 1.0e-3 * du; checkCudaErrors(cudaMemcpy(h_u, d_u, total_size * sizeof(value_t), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_v, d_v, total_size * sizeof(value_t), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_p, d_p, total_size * sizeof(value_t), cudaMemcpyDeviceToHost)); value_t ef = get_u_increment(grid, blockd3, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); std::cout << " \nCheck result\n" << " u incremtal initially: " << e0 << '\n' << " u incremtal initially: " << ef << '\n' << " ratio: " << ef / e0 << '\n'; print_performance(); // std::string fname = "test"; // write_txt_array2d<value_t, index_t>(u, side_size, side_size, fname + "_u"); // write_txt_array2d<value_t, index_t>(v, side_size, side_size, fname + "_v"); // write_txt_array2d<value_t, index_t>(p, side_size, side_size, fname + "_p"); destroy_array2d<value_t, index_t>(u); destroy_array2d<value_t, index_t>(v); destroy_array2d<value_t, index_t>(p); destroy_array2d<value_t, index_t>(u_star); destroy_array2d<value_t, index_t>(v_star); destroy_array2d<value_t, index_t>(p_star); checkCudaErrors(cudaFree(d_u)); checkCudaErrors(cudaFree(d_v)); checkCudaErrors(cudaFree(d_p)); checkCudaErrors(cudaFree(d_u_star)); checkCudaErrors(cudaFree(d_v_star)); checkCudaErrors(cudaFree(d_p_star)); } value_t get_u_increment(dim3 grid, dim3 block, index_t Nx, index_t Ny, value_t *d_u, value_t *d_v, value_t *d_p, value_t *d_u_star, value_t *d_v_star, value_t *d_p_star, value_t dtdx, value_t dtdy, value_t nu_dtdxx, value_t nu_dtdyy, value_t c2_dtdx, value_t c2_dtdy, value_t u0) { value_t **h_u = create_array2d<value_t, index_t>(side_size, side_size); value_t **u_inc = create_array2d<value_t, index_t>(side_size, side_size); checkCudaErrors(cudaMemcpy(h_u[0], d_u, total_size * sizeof(value_t), cudaMemcpyDeviceToHost)); #pragma omp parallel for for (index_t i = 0; i < Nx; i++) { for (index_t j = 0; j < Ny; j++) { u_inc[i][j] = -h_u[i][j]; } } one_step(grid, block, side_size, side_size, d_u, d_v, d_p, d_u_star, d_v_star, d_p_star, dtdx, dtdy, nu_dtdxx, nu_dtdyy, c2_dtdx, c2_dtdy, u0); checkCudaErrors(cudaMemcpy(h_u[0], d_u, total_size * sizeof(value_t), cudaMemcpyDeviceToHost)); value_t sum = 0.0; for (index_t i = 0; i < Nx; i++) { for (index_t j = 0; j < Ny; j++) { u_inc[i][j] += h_u[i][j]; sum += u_inc[i][j] * u_inc[i][j]; } } destroy_array2d<value_t, index_t>(u_inc); return sum; } ns2d_gpu(int narg, char **arg) : ns2d(narg, arg) { } }; int main(int narg, char **arg) { ns2d_gpu test(narg, arg); test.benchmark(); }
2688a0872f112b8e826f471dded80da854940803.hip
// !!! This is a file automatically generated by hipify!!! //Program to multiply square matrix with activation vector, both filled with random numbers, and then to multiply the resulting vector by the matrix again- repeat for the //specified number of iterations. #include <hip/hip_runtime.h> #include <rocblas.h> #include <hiprand/hiprand.h> #include <cstdlib> #include <iostream> using std::cout; using std::endl; using std::copy; float *h_A = nullptr; float *h_B = nullptr; // fills matrix with random float // Param: pointer to matrix, number of rows, number of columns void GPU_fill_rand(float *matrix, int rows, int cols) { // Create a pseudo-random number generator hiprandGenerator_t prng; hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); // Fill the array with uniformly distributed random numbers on the device between 0 and 1, where 0 is included and 1 is excluded hiprandGenerateUniform(prng, matrix, rows * cols); } // converts float values to integer between 0 and 10, where 0 is included and 10 is excluded // Param: pointer to matrix, number of elements in matrix __global__ void changeValues(float *matrix, int size) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { float a = matrix[index] * 10; int b = (int) a; matrix[index] = (float) b; } } //Print matrix storage in column-major format //Param: pointer to matrix, number of rows, number of columns void print_matrix(const float *matrix, int rows, int cols) { for(int i = 0; i < rows; ++i){ for(int j = 0; j < cols; ++j){ std::cout << matrix[j * rows + i] << " "; } std::cout << std::endl; } std::cout << std::endl; } // perform the sqeeze function on each element of the vector resulting from the later iteration of matrix multiplication // Param: B = pointer to activation vector, dim = starting point of the vector results of the last iteration of matrix multiplication, // L and M are parameters of the squeeze function __global__ void squeeze(float *B, int dim, int length, float L, float M) { int index = (blockIdx.x * blockDim.x) + threadIdx.x + length; if (index < length + dim) { B[index] = 1 / (1 + expf(-1 * L * (B[index] - M))); } } // perform the matrix multiplication operation // Param: handle = handle to the cuBLAS library context. iterations = number of times we multiply activation vector by matrix // A = matrix. B = array of activation vectors calculated so far. dim = length & width of square matrix. L, M = parameter for squeeze function void gpu_blas_mmul(hipblasHandle_t &handle, int iterations, const float *A, float *B, const int dim, float L, float M) { const float alf = 1; // scalar used for multiplication const float bet = 0; // scalar used for multiplication const float *alpha = &alf; const float *beta = &bet; int length = 0; for (int i = 0; i < iterations; i++) { hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, dim, 1, dim, alpha, A, dim, B, dim, beta, (B + length), dim); hipLaunchKernelGGL(( squeeze), dim3((31 + dim) / 32), dim3(32), 0, 0, B, dim, length, L, M); length += dim; } } // external function defined in RandomMatrixMultiplication.h extern "C++" float *matrixMultiplication(int dim, int iterations, float L, float M) { int size_A = dim * dim; // allocate square matrix on host h_A = new float[size_A]; //allocate array to hold activation vectors on host h_B = new float[dim * iterations]; // allocate arrays on device float *dev_A, *dev_B; hipMalloc(&dev_A, size_A * sizeof(float)); hipMalloc(&dev_B, dim * iterations * sizeof(float)); // fill matrix and first activation vector with random values GPU_fill_rand(dev_A, dim, dim); GPU_fill_rand(dev_B, dim, 1); // change decimal values in matrix to integers between 0 and 10 hipLaunchKernelGGL(( changeValues), dim3((31 + size_A) / 32), dim3(32), 0, 0, dev_A, size_A); // create handle to the cuBLAS library context hipblasHandle_t handle; hipblasCreate(&handle); gpu_blas_mmul(handle, iterations, dev_A, dev_B, dim, L, M); // destroy handle hipblasDestroy(handle); // copy results to host hipMemcpy(h_A, dev_A, size_A * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_B, dev_B, dim * iterations * sizeof(float),hipMemcpyDeviceToHost); //Free GPU memory hipFree(dev_A); hipFree(dev_B); return h_B; } // return matrix extern "C++" float *getMatrix() { return h_A; }
2688a0872f112b8e826f471dded80da854940803.cu
//Program to multiply square matrix with activation vector, both filled with random numbers, and then to multiply the resulting vector by the matrix again- repeat for the //specified number of iterations. #include <cuda.h> #include <cublas_v2.h> #include <curand.h> #include <cstdlib> #include <iostream> using std::cout; using std::endl; using std::copy; float *h_A = nullptr; float *h_B = nullptr; // fills matrix with random float // Param: pointer to matrix, number of rows, number of columns void GPU_fill_rand(float *matrix, int rows, int cols) { // Create a pseudo-random number generator curandGenerator_t prng; curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT); // Set the seed for the random number generator using the system clock curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); // Fill the array with uniformly distributed random numbers on the device between 0 and 1, where 0 is included and 1 is excluded curandGenerateUniform(prng, matrix, rows * cols); } // converts float values to integer between 0 and 10, where 0 is included and 10 is excluded // Param: pointer to matrix, number of elements in matrix __global__ void changeValues(float *matrix, int size) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < size) { float a = matrix[index] * 10; int b = (int) a; matrix[index] = (float) b; } } //Print matrix storage in column-major format //Param: pointer to matrix, number of rows, number of columns void print_matrix(const float *matrix, int rows, int cols) { for(int i = 0; i < rows; ++i){ for(int j = 0; j < cols; ++j){ std::cout << matrix[j * rows + i] << " "; } std::cout << std::endl; } std::cout << std::endl; } // perform the sqeeze function on each element of the vector resulting from the later iteration of matrix multiplication // Param: B = pointer to activation vector, dim = starting point of the vector results of the last iteration of matrix multiplication, // L and M are parameters of the squeeze function __global__ void squeeze(float *B, int dim, int length, float L, float M) { int index = (blockIdx.x * blockDim.x) + threadIdx.x + length; if (index < length + dim) { B[index] = 1 / (1 + expf(-1 * L * (B[index] - M))); } } // perform the matrix multiplication operation // Param: handle = handle to the cuBLAS library context. iterations = number of times we multiply activation vector by matrix // A = matrix. B = array of activation vectors calculated so far. dim = length & width of square matrix. L, M = parameter for squeeze function void gpu_blas_mmul(cublasHandle_t &handle, int iterations, const float *A, float *B, const int dim, float L, float M) { const float alf = 1; // scalar used for multiplication const float bet = 0; // scalar used for multiplication const float *alpha = &alf; const float *beta = &bet; int length = 0; for (int i = 0; i < iterations; i++) { cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, dim, 1, dim, alpha, A, dim, B, dim, beta, (B + length), dim); squeeze<<<(31 + dim) / 32, 32>>>(B, dim, length, L, M); length += dim; } } // external function defined in RandomMatrixMultiplication.h extern "C++" float *matrixMultiplication(int dim, int iterations, float L, float M) { int size_A = dim * dim; // allocate square matrix on host h_A = new float[size_A]; //allocate array to hold activation vectors on host h_B = new float[dim * iterations]; // allocate arrays on device float *dev_A, *dev_B; cudaMalloc(&dev_A, size_A * sizeof(float)); cudaMalloc(&dev_B, dim * iterations * sizeof(float)); // fill matrix and first activation vector with random values GPU_fill_rand(dev_A, dim, dim); GPU_fill_rand(dev_B, dim, 1); // change decimal values in matrix to integers between 0 and 10 changeValues<<<(31 + size_A) / 32, 32>>>(dev_A, size_A); // create handle to the cuBLAS library context cublasHandle_t handle; cublasCreate(&handle); gpu_blas_mmul(handle, iterations, dev_A, dev_B, dim, L, M); // destroy handle cublasDestroy(handle); // copy results to host cudaMemcpy(h_A, dev_A, size_A * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_B, dev_B, dim * iterations * sizeof(float),cudaMemcpyDeviceToHost); //Free GPU memory cudaFree(dev_A); cudaFree(dev_B); return h_B; } // return matrix extern "C++" float *getMatrix() { return h_A; }
77d50ad86abeb0e57d2131adb6213475c6fbcc2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mat_ones_kernel.h" #define BLOCK_SIZE 32 __global__ void mat_ones_kernel(const float *__restrict__ src, float *__restrict__ dst, int m, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < m && col < n) { dst[row * n + col] = 1.0; } } void mat_ones_kernel_exec(const float *src, float *dst, int m, int n) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); hipLaunchKernelGGL(( mat_ones_kernel), dim3(grid), dim3(block), 0, 0, src, dst, m, n); hipDeviceSynchronize(); }
77d50ad86abeb0e57d2131adb6213475c6fbcc2c.cu
#include "mat_ones_kernel.h" #define BLOCK_SIZE 32 __global__ void mat_ones_kernel(const float *__restrict__ src, float *__restrict__ dst, int m, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < m && col < n) { dst[row * n + col] = 1.0; } } void mat_ones_kernel_exec(const float *src, float *dst, int m, int n) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); mat_ones_kernel<<<grid, block>>>(src, dst, m, n); cudaThreadSynchronize(); }
a379dbe091ae1d93e56dc84ea98b17cb2c99fc13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //#include <cutil.h> // cutil32.lib #include <string.h> #include "system_kern.cu" extern Particles_struct specie; extern "C" { int iDivUp (int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } void computeNumBlocks (int numPnts, int maxThreads, int &numBlocks, int &numThreads) { //numThreads = min( maxThreads, numPnts ); numThreads = 256; numBlocks = iDivUp ( numPnts, numThreads ); } bool InitCUDA(void) { int count = 0; int i = 0; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } hipSetDevice(i); printf("CUDA initialized.\n"); return true; } void SetupCUDA (int grid_num1, int grid_num3, int number) { //computeNumBlocks ( params.NumParticles, 256, params.NumBlocks, params.NumThreads); // particles //computeNumBlocks ( params.NumBoundaries, 256, params.NumBoundaryBlocks, params.NumBoundaryThreads); // boundaries // CUDA_SAFE_CALL ( hipMalloc ( (void**) &&cuda_specie, sizeof(Particles_struct)) ); //hipMemcpyToSymbol ( (char *)&cuda_specie, &specie, sizeof(Particles_struct), 0, hipMemcpyHostToDevice ) ; hipMalloc ( (void**) &x1, sizeof(flcuda)* number) ; hipMalloc ( (void**) &x3, sizeof(flcuda)* number) ; hipMalloc ( (void**) &v1, sizeof(flcuda)* number) ; hipMalloc ( (void**) &v2, sizeof(flcuda)* number) ; hipMalloc ( (void**) &v3, sizeof(flcuda)* number) ; hipMalloc ( (void**) &is_alive, sizeof(bool)* number) ; hipMalloc ( (void**) &e1, sizeof(flcuda)* (grid_num1 - 1) * grid_num3) ; hipMalloc ( (void**) &e2, sizeof(flcuda)* grid_num1 * grid_num3 ) ; hipMalloc ( (void**) &e3, sizeof(flcuda)* grid_num1 * (grid_num3 - 1) ) ; hipMalloc ( (void**) &h1, sizeof(flcuda)* grid_num1 * (grid_num3 - 1) ) ; hipMalloc ( (void**) &h2, sizeof(flcuda)* (grid_num1 - 1) * (grid_num3 - 1) ) ; hipMalloc ( (void**) &h3, sizeof(flcuda)* (grid_num1 - 1) * grid_num3) ; hipDeviceSynchronize (); } void CopySpecie2Cuda (Particles_struct specie) { hipMemcpyToSymbol ( (char *)&cuda_specie, &specie, sizeof(Particles_struct), 0, hipMemcpyHostToDevice ) ; hipDeviceSynchronize (); } //void TransferXVToCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3); //void TransferEHToCUDA (flcuda* CPU_e1, flcuda* CPU_e2, flcuda* CPU_e3, flcuda* CPU_h1, flcuda* CPU_h2, flcuda* CPU_h3); //void TransferXVFromCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3); void TransferXVToCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3, bool* CPU_is_alive, int number) { hipMemcpy (x1, CPU_x1, number * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (x3, CPU_x3, number * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (v1, CPU_v1, number * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (v2, CPU_v2, number * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (v3, CPU_v3, number * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (is_alive, CPU_is_alive, number * sizeof(bool), hipMemcpyHostToDevice ) ; hipDeviceSynchronize (); } void TransferEHToCUDA (flcuda* CPU_e1, flcuda* CPU_e2, flcuda* CPU_e3, flcuda* CPU_h1, flcuda* CPU_h2, flcuda* CPU_h3, int grid_num1, int grid_num3) { hipMemcpy (e1, CPU_e1, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (e2, CPU_e2, grid_num1 * grid_num3 * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (e3, CPU_e3, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (h1, CPU_h1, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (h2, CPU_h2, (grid_num1 - 1) * (grid_num3 - 1) * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipMemcpy (h3, CPU_h3, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), hipMemcpyHostToDevice ) ; hipDeviceSynchronize (); } void TransferEHFromCUDA (flcuda* CPU_e1, flcuda* CPU_e2, flcuda* CPU_e3, flcuda* CPU_h1, flcuda* CPU_h2, flcuda* CPU_h3, int grid_num1, int grid_num3) { hipMemcpy (CPU_e1, e1, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_e2, e2, grid_num1 * grid_num3 * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_e3, e3, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_h1, h1, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_h2, h2, (grid_num1 - 1) * (grid_num3 - 1) * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_h3, h3, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipDeviceSynchronize (); } void TransferXVFromCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3, int number) { // hipMemcpy (CPU_x1, x1, number * sizeof(flcuda), hipMemcpyDeviceToHost ) ; //hipMemcpy (CPU_x3, x3, number * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_v1, v1, number * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_v2, v2, number * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipMemcpy (CPU_v3, v3, number * sizeof(flcuda), hipMemcpyDeviceToHost ) ; hipDeviceSynchronize (); } //void TransferToCUDA (Particle* CPU_ParticlesArray, int numPoints ) //{ // CUDA_SAFE_CALL( hipMemcpy (ParticlesArray, CPU_ParticlesArray, numPoints * sizeof(Particle), hipMemcpyHostToDevice ) ); // hipDeviceSynchronize (); //} // //void TransferFromCUDA ( Particle* CPU_ParticlesArray, int numPoints ) //{ // CUDA_SAFE_CALL( hipMemcpy ( CPU_ParticlesArray, ParticlesArray, numPoints * sizeof(Particle), hipMemcpyDeviceToHost ) ); // hipDeviceSynchronize (); //} void CUDA_StepV(int number, flcuda dt) { int numBlocks = 0, numThreads = 0; //computeNumBlocks (cuda_specie.number, 256, numBlocks, numThreads); numThreads = 240; numBlocks = iDivUp(number, numThreads); hipLaunchKernelGGL(( StepV), dim3(numBlocks), dim3(numThreads), 0, 0, x1, x3, v1, v2, v3, e1, e2, e3, h1, h2, h3, is_alive, number, dt); //CUT_CHECK_ERROR( "Kernel execution failed"); hipDeviceSynchronize (); } //void TransferFromCUDA ( Particle* CPU_ParticlesArray, int numPoints ) //{ // CUDA_SAFE_CALL( hipMemcpy ( CPU_ParticlesArray, ParticlesArray, numPoints * sizeof(Particle), hipMemcpyDeviceToHost ) ); // hipDeviceSynchronize (); //} //void CUDA_Advance (flcuda dt) //{ // Advance<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, params.TimeStep); // CUT_CHECK_ERROR( "Kernel execution failed"); // hipDeviceSynchronize (); //} // //void CUDA_AdvanceCoordinates () //{ // AdvanceCoordinates<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, params.TimeStep); // CUT_CHECK_ERROR( "Kernel execution failed"); // hipDeviceSynchronize (); //} //void CUDA_AdvanceVelocities () //{ // AdvanceVelocities<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, params.TimeStep); // CUT_CHECK_ERROR( "Kernel execution failed"); // hipDeviceSynchronize (); //} // // //void CUDA_CopyParticlesToGLBuffer(float3* positions) //{ // CopyParticlesToGLBuffer<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, positions ); //} }
a379dbe091ae1d93e56dc84ea98b17cb2c99fc13.cu
//#include <cutil.h> // cutil32.lib #include <string.h> #include "system_kern.cu" extern Particles_struct specie; extern "C" { int iDivUp (int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } void computeNumBlocks (int numPnts, int maxThreads, int &numBlocks, int &numThreads) { //numThreads = min( maxThreads, numPnts ); numThreads = 256; numBlocks = iDivUp ( numPnts, numThreads ); } bool InitCUDA(void) { int count = 0; int i = 0; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } cudaSetDevice(i); printf("CUDA initialized.\n"); return true; } void SetupCUDA (int grid_num1, int grid_num3, int number) { //computeNumBlocks ( params.NumParticles, 256, params.NumBlocks, params.NumThreads); // particles //computeNumBlocks ( params.NumBoundaries, 256, params.NumBoundaryBlocks, params.NumBoundaryThreads); // boundaries // CUDA_SAFE_CALL ( cudaMalloc ( (void**) &&cuda_specie, sizeof(Particles_struct)) ); //cudaMemcpyToSymbol ( (char *)&cuda_specie, &specie, sizeof(Particles_struct), 0, cudaMemcpyHostToDevice ) ; cudaMalloc ( (void**) &x1, sizeof(flcuda)* number) ; cudaMalloc ( (void**) &x3, sizeof(flcuda)* number) ; cudaMalloc ( (void**) &v1, sizeof(flcuda)* number) ; cudaMalloc ( (void**) &v2, sizeof(flcuda)* number) ; cudaMalloc ( (void**) &v3, sizeof(flcuda)* number) ; cudaMalloc ( (void**) &is_alive, sizeof(bool)* number) ; cudaMalloc ( (void**) &e1, sizeof(flcuda)* (grid_num1 - 1) * grid_num3) ; cudaMalloc ( (void**) &e2, sizeof(flcuda)* grid_num1 * grid_num3 ) ; cudaMalloc ( (void**) &e3, sizeof(flcuda)* grid_num1 * (grid_num3 - 1) ) ; cudaMalloc ( (void**) &h1, sizeof(flcuda)* grid_num1 * (grid_num3 - 1) ) ; cudaMalloc ( (void**) &h2, sizeof(flcuda)* (grid_num1 - 1) * (grid_num3 - 1) ) ; cudaMalloc ( (void**) &h3, sizeof(flcuda)* (grid_num1 - 1) * grid_num3) ; cudaThreadSynchronize (); } void CopySpecie2Cuda (Particles_struct specie) { cudaMemcpyToSymbol ( (char *)&cuda_specie, &specie, sizeof(Particles_struct), 0, cudaMemcpyHostToDevice ) ; cudaThreadSynchronize (); } //void TransferXVToCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3); //void TransferEHToCUDA (flcuda* CPU_e1, flcuda* CPU_e2, flcuda* CPU_e3, flcuda* CPU_h1, flcuda* CPU_h2, flcuda* CPU_h3); //void TransferXVFromCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3); void TransferXVToCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3, bool* CPU_is_alive, int number) { cudaMemcpy (x1, CPU_x1, number * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (x3, CPU_x3, number * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (v1, CPU_v1, number * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (v2, CPU_v2, number * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (v3, CPU_v3, number * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (is_alive, CPU_is_alive, number * sizeof(bool), cudaMemcpyHostToDevice ) ; cudaThreadSynchronize (); } void TransferEHToCUDA (flcuda* CPU_e1, flcuda* CPU_e2, flcuda* CPU_e3, flcuda* CPU_h1, flcuda* CPU_h2, flcuda* CPU_h3, int grid_num1, int grid_num3) { cudaMemcpy (e1, CPU_e1, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (e2, CPU_e2, grid_num1 * grid_num3 * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (e3, CPU_e3, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (h1, CPU_h1, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (h2, CPU_h2, (grid_num1 - 1) * (grid_num3 - 1) * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaMemcpy (h3, CPU_h3, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), cudaMemcpyHostToDevice ) ; cudaThreadSynchronize (); } void TransferEHFromCUDA (flcuda* CPU_e1, flcuda* CPU_e2, flcuda* CPU_e3, flcuda* CPU_h1, flcuda* CPU_h2, flcuda* CPU_h3, int grid_num1, int grid_num3) { cudaMemcpy (CPU_e1, e1, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_e2, e2, grid_num1 * grid_num3 * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_e3, e3, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_h1, h1, grid_num1 * (grid_num3 - 1) * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_h2, h2, (grid_num1 - 1) * (grid_num3 - 1) * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_h3, h3, (grid_num1 - 1) * grid_num3 * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaThreadSynchronize (); } void TransferXVFromCUDA (flcuda* CPU_x1, flcuda* CPU_x3, flcuda* CPU_v1, flcuda* CPU_v2, flcuda* CPU_v3, int number) { // cudaMemcpy (CPU_x1, x1, number * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; //cudaMemcpy (CPU_x3, x3, number * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_v1, v1, number * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_v2, v2, number * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaMemcpy (CPU_v3, v3, number * sizeof(flcuda), cudaMemcpyDeviceToHost ) ; cudaThreadSynchronize (); } //void TransferToCUDA (Particle* CPU_ParticlesArray, int numPoints ) //{ // CUDA_SAFE_CALL( cudaMemcpy (ParticlesArray, CPU_ParticlesArray, numPoints * sizeof(Particle), cudaMemcpyHostToDevice ) ); // cudaThreadSynchronize (); //} // //void TransferFromCUDA ( Particle* CPU_ParticlesArray, int numPoints ) //{ // CUDA_SAFE_CALL( cudaMemcpy ( CPU_ParticlesArray, ParticlesArray, numPoints * sizeof(Particle), cudaMemcpyDeviceToHost ) ); // cudaThreadSynchronize (); //} void CUDA_StepV(int number, flcuda dt) { int numBlocks = 0, numThreads = 0; //computeNumBlocks (cuda_specie.number, 256, numBlocks, numThreads); numThreads = 240; numBlocks = iDivUp(number, numThreads); StepV<<<numBlocks, numThreads>>> (x1, x3, v1, v2, v3, e1, e2, e3, h1, h2, h3, is_alive, number, dt); //CUT_CHECK_ERROR( "Kernel execution failed"); cudaThreadSynchronize (); } //void TransferFromCUDA ( Particle* CPU_ParticlesArray, int numPoints ) //{ // CUDA_SAFE_CALL( cudaMemcpy ( CPU_ParticlesArray, ParticlesArray, numPoints * sizeof(Particle), cudaMemcpyDeviceToHost ) ); // cudaThreadSynchronize (); //} //void CUDA_Advance (flcuda dt) //{ // Advance<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, params.TimeStep); // CUT_CHECK_ERROR( "Kernel execution failed"); // cudaThreadSynchronize (); //} // //void CUDA_AdvanceCoordinates () //{ // AdvanceCoordinates<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, params.TimeStep); // CUT_CHECK_ERROR( "Kernel execution failed"); // cudaThreadSynchronize (); //} //void CUDA_AdvanceVelocities () //{ // AdvanceVelocities<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, params.TimeStep); // CUT_CHECK_ERROR( "Kernel execution failed"); // cudaThreadSynchronize (); //} // // //void CUDA_CopyParticlesToGLBuffer(float3* positions) //{ // CopyParticlesToGLBuffer<<< params.NumBlocks, params.NumThreads>>> (ParticlesArray, positions ); //} }
ae7ce8f1e13e5a896d8d79b158ac6147a095eebb.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <sparse.hpp> #include <arith.hpp> #include <cast.hpp> #include <common/err_common.hpp> #include <complex.hpp> #include <copy.hpp> #include <cusparse.hpp> #include <kernel/sparse.hpp> #include <lookup.hpp> #include <math.hpp> #include <platform.hpp> #include <where.hpp> #include <stdexcept> #include <string> namespace cuda { using namespace common; // hipsparseStatus_t hipsparseZdense2csr(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *A, int lda, // const int *nnzPerRow, // hipDoubleComplex *csrValA, // int *csrRowPtrA, int *csrColIndA) template<typename T> struct dense2csr_func_def_t { typedef hipsparseStatus_t (*dense2csr_func_def)(hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; // hipsparseStatus_t hipsparseZdense2csc(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *A, int lda, // const int *nnzPerCol, // hipDoubleComplex *cscValA, // int *cscRowIndA, int *cscColPtrA) template<typename T> struct dense2csc_func_def_t { typedef hipsparseStatus_t (*dense2csc_func_def)(hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; // hipsparseStatus_t hipsparseZcsr2dense(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *csrValA, // const int *csrRowPtrA, // const int *csrColIndA, // hipDoubleComplex *A, int lda) template<typename T> struct csr2dense_func_def_t { typedef hipsparseStatus_t (*csr2dense_func_def)(hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, const int *, const int *, T *, int); }; // hipsparseStatus_t hipsparseZcsc2dense(hipsparseHandle_t handle, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *cscValA, // const int *cscRowIndA, // const int *cscColPtrA, // hipDoubleComplex *A, int lda) template<typename T> struct csc2dense_func_def_t { typedef hipsparseStatus_t (*csc2dense_func_def)(hipsparseHandle_t, int, int, const hipsparseMatDescr_t, const T *, const int *, const int *, T *, int); }; // hipsparseStatus_t hipsparseZnnz(hipsparseHandle_t handle, // hipsparseDirection_t dirA, // int m, int n, // const hipsparseMatDescr_t descrA, // const hipDoubleComplex *A, int lda, // int *nnzPerRowColumn, // int *nnzTotalDevHostPtr) template<typename T> struct nnz_func_def_t { typedef hipsparseStatus_t (*nnz_func_def)(hipsparseHandle_t, hipsparseDirection_t, int, int, const hipsparseMatDescr_t, const T *, int, int *, int *); }; // hipsparseStatus_t hipsparseZgthr(hipsparseHandle_t handle, // int nnz, // const hipDoubleComplex *y, // hipDoubleComplex *xVal, const int *xInd, // hipsparseIndexBase_t idxBase) template<typename T> struct gthr_func_def_t { typedef hipsparseStatus_t (*gthr_func_def)(hipsparseHandle_t, int, const T *, T *, const int *, hipsparseIndexBase_t); }; #define SPARSE_FUNC_DEF(FUNC) \ template<typename T> \ typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func(); #define SPARSE_FUNC(FUNC, TYPE, PREFIX) \ template<> \ typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def) & \ cusparse##PREFIX##FUNC; \ } SPARSE_FUNC_DEF(dense2csr) SPARSE_FUNC(dense2csr, float, S) SPARSE_FUNC(dense2csr, double, D) SPARSE_FUNC(dense2csr, cfloat, C) SPARSE_FUNC(dense2csr, cdouble, Z) SPARSE_FUNC_DEF(dense2csc) SPARSE_FUNC(dense2csc, float, S) SPARSE_FUNC(dense2csc, double, D) SPARSE_FUNC(dense2csc, cfloat, C) SPARSE_FUNC(dense2csc, cdouble, Z) SPARSE_FUNC_DEF(csr2dense) SPARSE_FUNC(csr2dense, float, S) SPARSE_FUNC(csr2dense, double, D) SPARSE_FUNC(csr2dense, cfloat, C) SPARSE_FUNC(csr2dense, cdouble, Z) SPARSE_FUNC_DEF(csc2dense) SPARSE_FUNC(csc2dense, float, S) SPARSE_FUNC(csc2dense, double, D) SPARSE_FUNC(csc2dense, cfloat, C) SPARSE_FUNC(csc2dense, cdouble, Z) SPARSE_FUNC_DEF(nnz) SPARSE_FUNC(nnz, float, S) SPARSE_FUNC(nnz, double, D) SPARSE_FUNC(nnz, cfloat, C) SPARSE_FUNC(nnz, cdouble, Z) SPARSE_FUNC_DEF(gthr) SPARSE_FUNC(gthr, float, S) SPARSE_FUNC(gthr, double, D) SPARSE_FUNC(gthr, cfloat, C) SPARSE_FUNC(gthr, cdouble, Z) #undef SPARSE_FUNC #undef SPARSE_FUNC_DEF // Partial template specialization of sparseConvertDenseToStorage for COO // However, template specialization is not allowed template<typename T> SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) { Array<uint> nonZeroIdx_ = where<T>(in); Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_); dim_t nNZ = nonZeroIdx.elements(); Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]); Array<int> rowIdx = arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<int> colIdx = arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<T> values = copyArray<T>(in); values.modDims(dim4(values.elements())); values = lookup<T, int>(values, nonZeroIdx, 0); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, AF_STORAGE_COO); } template<typename T, af_storage stype> SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) { const int M = in.dims()[0]; const int N = in.dims()[1]; // Create Sparse Matrix Descriptor hipsparseMatDescr_t descr = 0; CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); int d = -1; hipsparseDirection_t dir = HIPSPARSE_DIRECTION_ROW; if (stype == AF_STORAGE_CSR) { d = M; dir = HIPSPARSE_DIRECTION_ROW; } else { d = N; dir = HIPSPARSE_DIRECTION_COLUMN; } Array<int> nnzPerDir = createEmptyArray<int>(dim4(d)); int nNZ = -1; CUSPARSE_CHECK(nnz_func<T>()(sparseHandle(), dir, M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), &nNZ)); Array<int> rowIdx = createEmptyArray<int>(dim4()); Array<int> colIdx = createEmptyArray<int>(dim4()); if (stype == AF_STORAGE_CSR) { rowIdx = createEmptyArray<int>(dim4(M + 1)); colIdx = createEmptyArray<int>(dim4(nNZ)); } else { rowIdx = createEmptyArray<int>(dim4(nNZ)); colIdx = createEmptyArray<int>(dim4(N + 1)); } Array<T> values = createEmptyArray<T>(dim4(nNZ)); if (stype == AF_STORAGE_CSR) CUSPARSE_CHECK(dense2csr_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); else CUSPARSE_CHECK(dense2csc_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr)); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, stype); } // Partial template specialization of sparseConvertStorageToDense for COO // However, template specialization is not allowed template<typename T> Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) { Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); const Array<T> values = in.getValues(); const Array<int> rowIdx = in.getRowIdx(); const Array<int> colIdx = in.getColIdx(); kernel::coo2dense<T>(dense, values, rowIdx, colIdx); return dense; } template<typename T, af_storage stype> Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) { // Create Sparse Matrix Descriptor hipsparseMatDescr_t descr = 0; CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); int M = in.dims()[0]; int N = in.dims()[1]; Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); int d_strides1 = dense.strides()[1]; if (stype == AF_STORAGE_CSR) CUSPARSE_CHECK( csr2dense_func<T>()(sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); else CUSPARSE_CHECK( csc2dense_func<T>()(sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr)); return dense; } template<typename T, af_storage dest, af_storage src> SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) { using std::shared_ptr; in.eval(); int nNZ = in.getNNZ(); SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest); if (src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) { // Copy colIdx as is CUDA_CHECK( hipMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(), in.getColIdx().elements() * sizeof(int), hipMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to expand compressed row into coordinate CUSPARSE_CHECK(hipsparseXcsr2coo( sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0], converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO)); // Call sort size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes).release(), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>); CUSPARSE_CHECK( hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(hipsparseXcoosortByColumn( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), P.get(), (void *)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()(sparseHandle(), nNZ, in.getValues().get(), converted.getValues().get(), P.get(), HIPSPARSE_INDEX_BASE_ZERO)); } else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) { // The cusparse csr sort function is not behaving correctly. // So the work around is to convert the COO into row major and then // convert it to CSR // Deep copy input into temporary COO Row Major SparseArray<T> cooT = createArrayDataSparseArray<T>( in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(), in.getStorage(), true); // Call sort to convert column major to row major { size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(hipsparseXcoosort_bufferSizeExt( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer( memAlloc<char>(pBufferSizeInBytes).release(), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>); CUSPARSE_CHECK(hipsparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(hipsparseXcoosortByRow( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), P.get(), (void *)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()( sparseHandle(), nNZ, in.getValues().get(), cooT.getValues().get(), P.get(), HIPSPARSE_INDEX_BASE_ZERO)); } // Copy values and colIdx as is CUDA_CHECK( hipMemcpyAsync(converted.getValues().get(), cooT.getValues().get(), cooT.getValues().elements() * sizeof(T), hipMemcpyDeviceToDevice, cuda::getActiveStream())); CUDA_CHECK( hipMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(), cooT.getColIdx().elements() * sizeof(int), hipMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to compress row from coordinate CUSPARSE_CHECK(hipsparseXcoo2csr( sparseHandle(), cooT.getRowIdx().get(), nNZ, cooT.dims()[0], converted.getRowIdx().get(), HIPSPARSE_INDEX_BASE_ZERO)); // No need to call CSRSORT } else { // Should never come here AF_ERROR("CUDA Backend invalid conversion combination", AF_ERR_NOT_SUPPORTED); } return converted; } #define INSTANTIATE_TO_STORAGE(T, S) \ template SparseArray<T> \ sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>( \ const SparseArray<T> &in); \ template SparseArray<T> \ sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>( \ const SparseArray<T> &in); \ template SparseArray<T> \ sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>( \ const SparseArray<T> &in); #define INSTANTIATE_COO_SPECIAL(T) \ template<> \ SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>( \ const Array<T> &in) { \ return sparseConvertDenseToCOO<T>(in); \ } \ template<> \ Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>( \ const SparseArray<T> &in) { \ return sparseConvertCOOToDense<T>(in); \ } #define INSTANTIATE_SPARSE(T) \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>( \ const Array<T> &in); \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>( \ const Array<T> &in); \ \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>( \ const SparseArray<T> &in); \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>( \ const SparseArray<T> &in); \ \ INSTANTIATE_COO_SPECIAL(T) \ \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO) INSTANTIATE_SPARSE(float) INSTANTIATE_SPARSE(double) INSTANTIATE_SPARSE(cfloat) INSTANTIATE_SPARSE(cdouble) #undef INSTANTIATE_TO_STORAGE #undef INSTANTIATE_COO_SPECIAL #undef INSTANTIATE_SPARSE } // namespace cuda
ae7ce8f1e13e5a896d8d79b158ac6147a095eebb.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ #include <sparse.hpp> #include <arith.hpp> #include <cast.hpp> #include <common/err_common.hpp> #include <complex.hpp> #include <copy.hpp> #include <cusparse.hpp> #include <kernel/sparse.hpp> #include <lookup.hpp> #include <math.hpp> #include <platform.hpp> #include <where.hpp> #include <stdexcept> #include <string> namespace cuda { using namespace common; // cusparseStatus_t cusparseZdense2csr(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *A, int lda, // const int *nnzPerRow, // cuDoubleComplex *csrValA, // int *csrRowPtrA, int *csrColIndA) template<typename T> struct dense2csr_func_def_t { typedef cusparseStatus_t (*dense2csr_func_def)(cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; // cusparseStatus_t cusparseZdense2csc(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *A, int lda, // const int *nnzPerCol, // cuDoubleComplex *cscValA, // int *cscRowIndA, int *cscColPtrA) template<typename T> struct dense2csc_func_def_t { typedef cusparseStatus_t (*dense2csc_func_def)(cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, int, const int *, T *, int *, int *); }; // cusparseStatus_t cusparseZcsr2dense(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *csrValA, // const int *csrRowPtrA, // const int *csrColIndA, // cuDoubleComplex *A, int lda) template<typename T> struct csr2dense_func_def_t { typedef cusparseStatus_t (*csr2dense_func_def)(cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, const int *, const int *, T *, int); }; // cusparseStatus_t cusparseZcsc2dense(cusparseHandle_t handle, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *cscValA, // const int *cscRowIndA, // const int *cscColPtrA, // cuDoubleComplex *A, int lda) template<typename T> struct csc2dense_func_def_t { typedef cusparseStatus_t (*csc2dense_func_def)(cusparseHandle_t, int, int, const cusparseMatDescr_t, const T *, const int *, const int *, T *, int); }; // cusparseStatus_t cusparseZnnz(cusparseHandle_t handle, // cusparseDirection_t dirA, // int m, int n, // const cusparseMatDescr_t descrA, // const cuDoubleComplex *A, int lda, // int *nnzPerRowColumn, // int *nnzTotalDevHostPtr) template<typename T> struct nnz_func_def_t { typedef cusparseStatus_t (*nnz_func_def)(cusparseHandle_t, cusparseDirection_t, int, int, const cusparseMatDescr_t, const T *, int, int *, int *); }; // cusparseStatus_t cusparseZgthr(cusparseHandle_t handle, // int nnz, // const cuDoubleComplex *y, // cuDoubleComplex *xVal, const int *xInd, // cusparseIndexBase_t idxBase) template<typename T> struct gthr_func_def_t { typedef cusparseStatus_t (*gthr_func_def)(cusparseHandle_t, int, const T *, T *, const int *, cusparseIndexBase_t); }; #define SPARSE_FUNC_DEF(FUNC) \ template<typename T> \ typename FUNC##_func_def_t<T>::FUNC##_func_def FUNC##_func(); #define SPARSE_FUNC(FUNC, TYPE, PREFIX) \ template<> \ typename FUNC##_func_def_t<TYPE>::FUNC##_func_def FUNC##_func<TYPE>() { \ return (FUNC##_func_def_t<TYPE>::FUNC##_func_def) & \ cusparse##PREFIX##FUNC; \ } SPARSE_FUNC_DEF(dense2csr) SPARSE_FUNC(dense2csr, float, S) SPARSE_FUNC(dense2csr, double, D) SPARSE_FUNC(dense2csr, cfloat, C) SPARSE_FUNC(dense2csr, cdouble, Z) SPARSE_FUNC_DEF(dense2csc) SPARSE_FUNC(dense2csc, float, S) SPARSE_FUNC(dense2csc, double, D) SPARSE_FUNC(dense2csc, cfloat, C) SPARSE_FUNC(dense2csc, cdouble, Z) SPARSE_FUNC_DEF(csr2dense) SPARSE_FUNC(csr2dense, float, S) SPARSE_FUNC(csr2dense, double, D) SPARSE_FUNC(csr2dense, cfloat, C) SPARSE_FUNC(csr2dense, cdouble, Z) SPARSE_FUNC_DEF(csc2dense) SPARSE_FUNC(csc2dense, float, S) SPARSE_FUNC(csc2dense, double, D) SPARSE_FUNC(csc2dense, cfloat, C) SPARSE_FUNC(csc2dense, cdouble, Z) SPARSE_FUNC_DEF(nnz) SPARSE_FUNC(nnz, float, S) SPARSE_FUNC(nnz, double, D) SPARSE_FUNC(nnz, cfloat, C) SPARSE_FUNC(nnz, cdouble, Z) SPARSE_FUNC_DEF(gthr) SPARSE_FUNC(gthr, float, S) SPARSE_FUNC(gthr, double, D) SPARSE_FUNC(gthr, cfloat, C) SPARSE_FUNC(gthr, cdouble, Z) #undef SPARSE_FUNC #undef SPARSE_FUNC_DEF // Partial template specialization of sparseConvertDenseToStorage for COO // However, template specialization is not allowed template<typename T> SparseArray<T> sparseConvertDenseToCOO(const Array<T> &in) { Array<uint> nonZeroIdx_ = where<T>(in); Array<int> nonZeroIdx = cast<int, uint>(nonZeroIdx_); dim_t nNZ = nonZeroIdx.elements(); Array<int> constDim = createValueArray<int>(dim4(nNZ), in.dims()[0]); Array<int> rowIdx = arithOp<int, af_mod_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<int> colIdx = arithOp<int, af_div_t>(nonZeroIdx, constDim, nonZeroIdx.dims()); Array<T> values = copyArray<T>(in); values.modDims(dim4(values.elements())); values = lookup<T, int>(values, nonZeroIdx, 0); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, AF_STORAGE_COO); } template<typename T, af_storage stype> SparseArray<T> sparseConvertDenseToStorage(const Array<T> &in) { const int M = in.dims()[0]; const int N = in.dims()[1]; // Create Sparse Matrix Descriptor cusparseMatDescr_t descr = 0; CUSPARSE_CHECK(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); int d = -1; cusparseDirection_t dir = CUSPARSE_DIRECTION_ROW; if (stype == AF_STORAGE_CSR) { d = M; dir = CUSPARSE_DIRECTION_ROW; } else { d = N; dir = CUSPARSE_DIRECTION_COLUMN; } Array<int> nnzPerDir = createEmptyArray<int>(dim4(d)); int nNZ = -1; CUSPARSE_CHECK(nnz_func<T>()(sparseHandle(), dir, M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), &nNZ)); Array<int> rowIdx = createEmptyArray<int>(dim4()); Array<int> colIdx = createEmptyArray<int>(dim4()); if (stype == AF_STORAGE_CSR) { rowIdx = createEmptyArray<int>(dim4(M + 1)); colIdx = createEmptyArray<int>(dim4(nNZ)); } else { rowIdx = createEmptyArray<int>(dim4(nNZ)); colIdx = createEmptyArray<int>(dim4(N + 1)); } Array<T> values = createEmptyArray<T>(dim4(nNZ)); if (stype == AF_STORAGE_CSR) CUSPARSE_CHECK(dense2csr_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); else CUSPARSE_CHECK(dense2csc_func<T>()( sparseHandle(), M, N, descr, in.get(), in.strides()[1], nnzPerDir.get(), values.get(), rowIdx.get(), colIdx.get())); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(cusparseDestroyMatDescr(descr)); return createArrayDataSparseArray<T>(in.dims(), values, rowIdx, colIdx, stype); } // Partial template specialization of sparseConvertStorageToDense for COO // However, template specialization is not allowed template<typename T> Array<T> sparseConvertCOOToDense(const SparseArray<T> &in) { Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); const Array<T> values = in.getValues(); const Array<int> rowIdx = in.getRowIdx(); const Array<int> colIdx = in.getColIdx(); kernel::coo2dense<T>(dense, values, rowIdx, colIdx); return dense; } template<typename T, af_storage stype> Array<T> sparseConvertStorageToDense(const SparseArray<T> &in) { // Create Sparse Matrix Descriptor cusparseMatDescr_t descr = 0; CUSPARSE_CHECK(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); int M = in.dims()[0]; int N = in.dims()[1]; Array<T> dense = createValueArray<T>(in.dims(), scalar<T>(0)); int d_strides1 = dense.strides()[1]; if (stype == AF_STORAGE_CSR) CUSPARSE_CHECK( csr2dense_func<T>()(sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); else CUSPARSE_CHECK( csc2dense_func<T>()(sparseHandle(), M, N, descr, in.getValues().get(), in.getRowIdx().get(), in.getColIdx().get(), dense.get(), d_strides1)); // Destory Sparse Matrix Descriptor CUSPARSE_CHECK(cusparseDestroyMatDescr(descr)); return dense; } template<typename T, af_storage dest, af_storage src> SparseArray<T> sparseConvertStorageToStorage(const SparseArray<T> &in) { using std::shared_ptr; in.eval(); int nNZ = in.getNNZ(); SparseArray<T> converted = createEmptySparseArray<T>(in.dims(), nNZ, dest); if (src == AF_STORAGE_CSR && dest == AF_STORAGE_COO) { // Copy colIdx as is CUDA_CHECK( cudaMemcpyAsync(converted.getColIdx().get(), in.getColIdx().get(), in.getColIdx().elements() * sizeof(int), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to expand compressed row into coordinate CUSPARSE_CHECK(cusparseXcsr2coo( sparseHandle(), in.getRowIdx().get(), nNZ, in.dims()[0], converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO)); // Call sort size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer(memAlloc<char>(pBufferSizeInBytes).release(), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>); CUSPARSE_CHECK( cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(cusparseXcoosortByColumn( sparseHandle(), in.dims()[0], in.dims()[1], nNZ, converted.getRowIdx().get(), converted.getColIdx().get(), P.get(), (void *)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()(sparseHandle(), nNZ, in.getValues().get(), converted.getValues().get(), P.get(), CUSPARSE_INDEX_BASE_ZERO)); } else if (src == AF_STORAGE_COO && dest == AF_STORAGE_CSR) { // The cusparse csr sort function is not behaving correctly. // So the work around is to convert the COO into row major and then // convert it to CSR // Deep copy input into temporary COO Row Major SparseArray<T> cooT = createArrayDataSparseArray<T>( in.dims(), in.getValues(), in.getRowIdx(), in.getColIdx(), in.getStorage(), true); // Call sort to convert column major to row major { size_t pBufferSizeInBytes = 0; CUSPARSE_CHECK(cusparseXcoosort_bufferSizeExt( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), &pBufferSizeInBytes)); shared_ptr<char> pBuffer( memAlloc<char>(pBufferSizeInBytes).release(), memFree<char>); shared_ptr<int> P(memAlloc<int>(nNZ).release(), memFree<int>); CUSPARSE_CHECK(cusparseCreateIdentityPermutation(sparseHandle(), nNZ, P.get())); CUSPARSE_CHECK(cusparseXcoosortByRow( sparseHandle(), cooT.dims()[0], cooT.dims()[1], nNZ, cooT.getRowIdx().get(), cooT.getColIdx().get(), P.get(), (void *)pBuffer.get())); CUSPARSE_CHECK(gthr_func<T>()( sparseHandle(), nNZ, in.getValues().get(), cooT.getValues().get(), P.get(), CUSPARSE_INDEX_BASE_ZERO)); } // Copy values and colIdx as is CUDA_CHECK( cudaMemcpyAsync(converted.getValues().get(), cooT.getValues().get(), cooT.getValues().elements() * sizeof(T), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); CUDA_CHECK( cudaMemcpyAsync(converted.getColIdx().get(), cooT.getColIdx().get(), cooT.getColIdx().elements() * sizeof(int), cudaMemcpyDeviceToDevice, cuda::getActiveStream())); // cusparse function to compress row from coordinate CUSPARSE_CHECK(cusparseXcoo2csr( sparseHandle(), cooT.getRowIdx().get(), nNZ, cooT.dims()[0], converted.getRowIdx().get(), CUSPARSE_INDEX_BASE_ZERO)); // No need to call CSRSORT } else { // Should never come here AF_ERROR("CUDA Backend invalid conversion combination", AF_ERR_NOT_SUPPORTED); } return converted; } #define INSTANTIATE_TO_STORAGE(T, S) \ template SparseArray<T> \ sparseConvertStorageToStorage<T, S, AF_STORAGE_CSR>( \ const SparseArray<T> &in); \ template SparseArray<T> \ sparseConvertStorageToStorage<T, S, AF_STORAGE_CSC>( \ const SparseArray<T> &in); \ template SparseArray<T> \ sparseConvertStorageToStorage<T, S, AF_STORAGE_COO>( \ const SparseArray<T> &in); #define INSTANTIATE_COO_SPECIAL(T) \ template<> \ SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_COO>( \ const Array<T> &in) { \ return sparseConvertDenseToCOO<T>(in); \ } \ template<> \ Array<T> sparseConvertStorageToDense<T, AF_STORAGE_COO>( \ const SparseArray<T> &in) { \ return sparseConvertCOOToDense<T>(in); \ } #define INSTANTIATE_SPARSE(T) \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSR>( \ const Array<T> &in); \ template SparseArray<T> sparseConvertDenseToStorage<T, AF_STORAGE_CSC>( \ const Array<T> &in); \ \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSR>( \ const SparseArray<T> &in); \ template Array<T> sparseConvertStorageToDense<T, AF_STORAGE_CSC>( \ const SparseArray<T> &in); \ \ INSTANTIATE_COO_SPECIAL(T) \ \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSR) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_CSC) \ INSTANTIATE_TO_STORAGE(T, AF_STORAGE_COO) INSTANTIATE_SPARSE(float) INSTANTIATE_SPARSE(double) INSTANTIATE_SPARSE(cfloat) INSTANTIATE_SPARSE(cdouble) #undef INSTANTIATE_TO_STORAGE #undef INSTANTIATE_COO_SPECIAL #undef INSTANTIATE_SPARSE } // namespace cuda
6c5cc2fba7c78a399edc6c0b43b0ee75968b2aa6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ThirdAngle(int *a1, int *a2, int *a3) { *a3 = (180-*a1-*a2); }
6c5cc2fba7c78a399edc6c0b43b0ee75968b2aa6.cu
#include "includes.h" __global__ void ThirdAngle(int *a1, int *a2, int *a3) { *a3 = (180-*a1-*a2); }
c4bb06aefc4b50b5272b981ac6c84ec9a0088c10.hip
// !!! This is a file automatically generated by hipify!!! #include <Chain.h> #include <Config.h> #include <constants.h> #include <functions.h> #include <stdlib.h> #include <stdio.h> #include <sys/stat.h> #include <unistd.h> __host__ void oneChain(Chain *host_a, Chain *dev_a, Config *cfg){ ++cfg->chainNum; if(cfg->verbose) printf("\n Chain %d of %d.\n", cfg->chainNum, cfg->chains); runChain(host_a, dev_a, cfg); summarizeChain(host_a, dev_a, cfg); resetChain(host_a, dev_a, cfg); } __host__ void mcmc(int *pargc, char **argv){ int i, argc = *pargc; Config *cfg = config(argc, argv); Chain *host_a = NULL, *dev_a = NULL; if(cfg->debug) printConfig(cfg); newChain(&host_a, &dev_a, cfg); if(host_a == NULL){ free(cfg); exit(EXIT_FAILURE); } if(cfg->verbose) printf("Running %d chain(s).\n", cfg->chains); for(i = 0; i < cfg->chains; ++i) oneChain(host_a, dev_a, cfg); freeChain(host_a, cfg, 0); hipFree(dev_a); if(cfg->verbose) printf("Done. MCMC output written to directory: %s.\n", cfg->outDir); chdir(cfg->cwd); free(cfg); }
c4bb06aefc4b50b5272b981ac6c84ec9a0088c10.cu
#include <Chain.h> #include <Config.h> #include <constants.h> #include <functions.h> #include <stdlib.h> #include <stdio.h> #include <sys/stat.h> #include <unistd.h> __host__ void oneChain(Chain *host_a, Chain *dev_a, Config *cfg){ ++cfg->chainNum; if(cfg->verbose) printf("\n Chain %d of %d.\n", cfg->chainNum, cfg->chains); runChain(host_a, dev_a, cfg); summarizeChain(host_a, dev_a, cfg); resetChain(host_a, dev_a, cfg); } __host__ void mcmc(int *pargc, char **argv){ int i, argc = *pargc; Config *cfg = config(argc, argv); Chain *host_a = NULL, *dev_a = NULL; if(cfg->debug) printConfig(cfg); newChain(&host_a, &dev_a, cfg); if(host_a == NULL){ free(cfg); exit(EXIT_FAILURE); } if(cfg->verbose) printf("Running %d chain(s).\n", cfg->chains); for(i = 0; i < cfg->chains; ++i) oneChain(host_a, dev_a, cfg); freeChain(host_a, cfg, 0); cudaFree(dev_a); if(cfg->verbose) printf("Done. MCMC output written to directory: %s.\n", cfg->outDir); chdir(cfg->cwd); free(cfg); }