hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
f27fb53ae94e9b7bac000f67e2cd916b8d7b1303.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hipsparse.h" #include <thrust/sort.h> #include <thrust/device_ptr.h> #include "lusol.h" __global__ void LU_SOL_DYNR_INIT(int n, int *ia, int *da, int *dpl, int *dpu); __global__ void DYNR_ANA_1(int n, int *ia, int *ii); __global__ void DYNR_ANA_2(int n, int nnz, int *jb, int *jj, int *ib, int *db); /*-------------------------------------------*/ /* Row level scheduling kernel */ /*-------------------------------------------*/ // HALF-WARP __global__ void L_SOL_LEVR16(REAL *b, REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevL, int l1, int l2) { int i,k,jj; // num of half-warps int nhw = gridDim.x*BLOCKDIM/HALFWARP; // half warp id int hwid = (blockIdx.x*BLOCKDIM+threadIdx.x)/HALFWARP; // thread lane in each half warp int lane = threadIdx.x & (HALFWARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+8]; for (i=l1+hwid; i<l2; i+=nhw) { jj = jlevL[i-1]-1; int p1 = ia[jj]; int q1 = di[jj]; REAL sum = 0.0; for (k=p1+lane; k<q1; k+=HALFWARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[q1-1]; x[jj] = t*(b[jj] - r[threadIdx.x]); } } } /*----------------- x = U^{-1}*x */ __global__ void U_SOL_LEVR16(REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevU, int l1, int l2) { int i,k,jj; // num of half-warps int nhw = gridDim.x*BLOCKDIM/HALFWARP; // half warp id int hwid = (blockIdx.x*BLOCKDIM+threadIdx.x)/HALFWARP; // thread lane in each half warp int lane = threadIdx.x & (HALFWARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+8]; for (i=l1+hwid; i<l2; i+=nhw) { jj = jlevU[i-1]-1; int p1 = di[jj]; int q1 = ia[jj+1]; REAL sum = 0.0; for (k=p1+1+lane; k<q1; k+=HALFWARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[p1-1]; x[jj] = t*(x[jj] - r[threadIdx.x]); } } } //-------------------------------------------------------- void luSolvLevR16(int n, int nnz, struct csr_t *csr, REAL *x, REAL *b, int REPEAT, bool print) { int i, j, *d_ia, *d_ja, *d_di, *d_jlevL, *d_jlevU; REAL *d_a, *d_b, *d_x; double t1, t2, ta; struct level_t lev; allocLevel(n, &lev); hipMalloc((void **)&d_jlevL, n*sizeof(int)); hipMalloc((void **)&d_jlevU, n*sizeof(int)); /*------------------- allocate Device Memory */ hipMalloc((void **)&d_ia, (n+1)*sizeof(int)); hipMalloc((void **)&d_ja, nnz*sizeof(int)); hipMalloc((void **)&d_di, n*sizeof(int)); hipMalloc((void **)&d_a, nnz*sizeof(REAL)); hipMalloc((void **)&d_b, n*sizeof(REAL)); hipMalloc((void **)&d_x, n*sizeof(REAL)); /*------------------- Memcpy */ hipMemcpy(d_ia, csr->ia, (n+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_ja, csr->ja, nnz*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_a, csr->a, nnz*sizeof(REAL), hipMemcpyHostToDevice); hipMemcpy(d_di, csr->di, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, n*sizeof(REAL), hipMemcpyHostToDevice); /*------------------- analysis */ ta = wall_timer(); if (!GPU_LEVEL) { for (int j=0; j<REPEAT; j++) { hipMemcpy(csr->ia, d_ia, (n+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(csr->ja, d_ja, nnz*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(csr->di, d_di, n*sizeof(int), hipMemcpyDeviceToHost); makeLevelCSR(n, csr->ia, csr->ja, csr->di, &lev); hipMemcpy(d_jlevL, lev.jlevL, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_jlevU, lev.jlevU, n*sizeof(int), hipMemcpyHostToDevice); } } else { for (int j=0; j<REPEAT; j++) { int *d_dp, *d_ib, *d_jb, *d_db; hipMalloc((void **)&d_dp, 2*n*sizeof(int)); hipMalloc((void **)&d_jb, nnz*sizeof(int)); hipMalloc((void **)&d_ib, (n+1)*sizeof(int)); hipMalloc((void **)&d_db, n*sizeof(int)); int gDim = (n + BLOCKDIM - 1) / BLOCKDIM; hipLaunchKernelGGL(( LU_SOL_DYNR_INIT), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ia, d_di, d_dp, d_dp+n); int nhwb = BLOCKDIM / HALFWARP; // number of half-warps per block gDim = (n + nhwb - 1) / nhwb; hipLaunchKernelGGL(( DYNR_ANA_1), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ia, d_jb); //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_data(d_jb); thrust::device_ptr<int> dev_keys(d_ja); thrust::stable_sort_by_key(dev_keys, dev_keys + nnz, dev_data); gDim = (nnz + BLOCKDIM-1) / BLOCKDIM; hipLaunchKernelGGL(( DYNR_ANA_2), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, nnz, d_jb, d_ja, d_ib, d_db); makeLevelCSC_SYNC(n, d_ib, d_jb, d_db, d_dp, d_jlevL, lev.ilevL, &lev.nlevL, d_jlevU, lev.ilevU, &lev.nlevU); // copy again since it was changed by the sorting hipMemcpy(d_ja, csr->ja, nnz*sizeof(int), hipMemcpyHostToDevice); hipFree(d_dp); hipFree(d_ib); hipFree(d_jb); hipFree(d_db); } } ta = wall_timer() - ta; t1 = wall_timer(); int ilev_shift; ilev_shift = lev.ilevL[0] == 0; for (int i=0; i < lev.nlevL+1; i++) { lev.ilevL[i] += ilev_shift; } ilev_shift = lev.ilevU[0] == 0; for (int i=0; i < lev.nlevU+1; i++) { lev.ilevU[i] += ilev_shift; } for (j=0; j<REPEAT; j++) { // L-solve for (i=0; i<lev.nlevL; i++) { int l1 = lev.ilevL[i]; int l2 = lev.ilevL[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*HALFWARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; hipLaunchKernelGGL(( L_SOL_LEVR16), dim3(gDim), dim3(bDim), 0, 0, d_b, d_x, d_a, d_ja, d_ia, d_di, d_jlevL, l1, l2); } // U-solve for (i=0; i<lev.nlevU; i++) { int l1 = lev.ilevU[i]; int l2 = lev.ilevU[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*HALFWARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; hipLaunchKernelGGL(( U_SOL_LEVR16), dim3(gDim), dim3(bDim), 0, 0, d_x, d_a, d_ja, d_ia, d_di, d_jlevU, l1, l2); } } //Barrier for GPU calls hipDeviceSynchronize(); t2 = wall_timer() - t1; if (print) { printf("[GPU] level-scheduling R16, #lev in L %d, #lev in U %d\n", lev.nlevL, lev.nlevU); printf(" time(s)=%f, Gflops=%5.3f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2); printf(" analysis time %f (%f) ", ta/REPEAT, ta/t2); } /*-------- copy x to host mem */ hipMemcpy(x, d_x, n*sizeof(REAL), hipMemcpyDeviceToHost); hipFree(d_ia); hipFree(d_ja); hipFree(d_di); hipFree(d_a); hipFree(d_b); hipFree(d_x); FreeLev(&lev); hipFree(d_jlevL); hipFree(d_jlevU); } // WARP __global__ void L_SOL_LEVR32(REAL *b, REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevL, int l1, int l2) { int i,k,jj; // num of warps int nw = gridDim.x*BLOCKDIM/WARP; // warp id int wid = (blockIdx.x*BLOCKDIM+threadIdx.x)/WARP; // thread lane in each warp int lane = threadIdx.x & (WARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+16]; for (i=l1+wid; i<l2; i+=nw) { jj = jlevL[i-1]-1; int p1 = ia[jj]; int q1 = di[jj]; REAL sum = 0.0; for (k=p1+lane; k<q1; k+=WARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+16]; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[q1-1]; x[jj] = t*(b[jj] - r[threadIdx.x]); } } } /*----------------- x = U^{-1}*x */ __global__ void U_SOL_LEVR32(REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevU, int l1, int l2) { int i,k,jj; // num of warps int nw = gridDim.x*BLOCKDIM/WARP; // warp id int wid = (blockIdx.x*BLOCKDIM+threadIdx.x)/WARP; // thread lane in each warp int lane = threadIdx.x & (WARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+16]; for (i=l1+wid; i<l2; i+=nw) { jj = jlevU[i-1]-1; int p1 = di[jj]; int q1 = ia[jj+1]; REAL sum = 0.0; for (k=p1+1+lane; k<q1; k+=WARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+16]; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[p1-1]; x[jj] = t*(x[jj] - r[threadIdx.x]); } } } //-------------------------------------------------------- void luSolvLevR32(int n, int nnz, struct csr_t *csr, REAL *x, REAL *b, int REPEAT, bool print) { int i, j, *d_ia, *d_ja, *d_di, *d_jlevL, *d_jlevU; REAL *d_a, *d_b, *d_x; double t1, t2, ta; struct level_t lev; allocLevel(n, &lev); hipMalloc((void **)&d_jlevL, n*sizeof(int)); hipMalloc((void **)&d_jlevU, n*sizeof(int)); /*------------------- allocate Device Memory */ hipMalloc((void **)&d_ia, (n+1)*sizeof(int)); hipMalloc((void **)&d_ja, nnz*sizeof(int)); hipMalloc((void **)&d_di, n*sizeof(int)); hipMalloc((void **)&d_a, nnz*sizeof(REAL)); hipMalloc((void **)&d_b, n*sizeof(REAL)); hipMalloc((void **)&d_x, n*sizeof(REAL)); /*------------------- Memcpy */ hipMemcpy(d_ia, csr->ia, (n+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_ja, csr->ja, nnz*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_a, csr->a, nnz*sizeof(REAL), hipMemcpyHostToDevice); hipMemcpy(d_di, csr->di, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_b, b, n*sizeof(REAL), hipMemcpyHostToDevice); /*------------------- analysis */ ta = wall_timer(); if (!GPU_LEVEL) { for (int j=0; j<REPEAT; j++) { hipMemcpy(csr->ia, d_ia, (n+1)*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(csr->ja, d_ja, nnz*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(csr->di, d_di, n*sizeof(int), hipMemcpyDeviceToHost); makeLevelCSR(n, csr->ia, csr->ja, csr->di, &lev); hipMemcpy(d_jlevL, lev.jlevL, n*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_jlevU, lev.jlevU, n*sizeof(int), hipMemcpyHostToDevice); } } else { for (int j=0; j<REPEAT; j++) { int *d_dp, *d_ib, *d_jb, *d_db; hipMalloc((void **)&d_dp, 2*n*sizeof(int)); hipMalloc((void **)&d_jb, nnz*sizeof(int)); hipMalloc((void **)&d_ib, (n+1)*sizeof(int)); hipMalloc((void **)&d_db, n*sizeof(int)); int gDim = (n + BLOCKDIM - 1) / BLOCKDIM; hipLaunchKernelGGL(( LU_SOL_DYNR_INIT), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ia, d_di, d_dp, d_dp+n); int nhwb = BLOCKDIM / HALFWARP; // number of half-warps per block gDim = (n + nhwb - 1) / nhwb; hipLaunchKernelGGL(( DYNR_ANA_1), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, d_ia, d_jb); //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_data(d_jb); thrust::device_ptr<int> dev_keys(d_ja); thrust::stable_sort_by_key(dev_keys, dev_keys + nnz, dev_data); gDim = (nnz + BLOCKDIM-1) / BLOCKDIM; hipLaunchKernelGGL(( DYNR_ANA_2), dim3(gDim), dim3(BLOCKDIM), 0, 0, n, nnz, d_jb, d_ja, d_ib, d_db); // double tt1 = wall_timer(); makeLevelCSC_SYNC(n, d_ib, d_jb, d_db, d_dp, d_jlevL, lev.ilevL, &lev.nlevL, d_jlevU, lev.ilevU, &lev.nlevU); // hipDeviceSynchronize(); // tt1 = wall_timer() - tt1; // printf("GPU LEVEL TIME %f\n", tt1); // copy again since it was changed by the sorting hipMemcpy(d_ja, csr->ja, nnz*sizeof(int), hipMemcpyHostToDevice); hipFree(d_dp); hipFree(d_ib); hipFree(d_jb); hipFree(d_db); } } ta = wall_timer() - ta; int ilev_shift; ilev_shift = lev.ilevL[0] == 0; for (int i=0; i < lev.nlevL+1; i++) { lev.ilevL[i] += ilev_shift; } ilev_shift = lev.ilevU[0] == 0; for (int i=0; i < lev.nlevU+1; i++) { lev.ilevU[i] += ilev_shift; } t1 = wall_timer(); for (j=0; j<REPEAT; j++) { // L-solve for (i=0; i<lev.nlevL; i++) { int l1 = lev.ilevL[i]; int l2 = lev.ilevL[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*WARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; hipLaunchKernelGGL(( L_SOL_LEVR32), dim3(gDim), dim3(bDim), 0, 0, d_b, d_x, d_a, d_ja, d_ia, d_di, d_jlevL, l1, l2); } // U-solve for (i=0; i<lev.nlevU; i++) { int l1 = lev.ilevU[i]; int l2 = lev.ilevU[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*WARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; hipLaunchKernelGGL(( U_SOL_LEVR32), dim3(gDim), dim3(bDim), 0, 0, d_x, d_a, d_ja, d_ia, d_di, d_jlevU, l1, l2); } } //Barrier for GPU calls hipDeviceSynchronize(); t2 = wall_timer() - t1; if (print) { printf("[GPU] level-scheduling R32, #lev in L %d, #lev in U %d\n", lev.nlevL, lev.nlevU); printf(" time(s)=%f, Gflops=%5.3f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2); printf(" analysis time %f (%f) ", ta/REPEAT, ta/t2); } /*-------- copy x to host mem */ hipMemcpy(x, d_x, n*sizeof(REAL), hipMemcpyDeviceToHost); hipFree(d_ia); hipFree(d_ja); hipFree(d_di); hipFree(d_a); hipFree(d_b); hipFree(d_x); FreeLev(&lev); hipFree(d_jlevL); hipFree(d_jlevU); }
f27fb53ae94e9b7bac000f67e2cd916b8d7b1303.cu
#include "cusparse.h" #include <thrust/sort.h> #include <thrust/device_ptr.h> #include "lusol.h" __global__ void LU_SOL_DYNR_INIT(int n, int *ia, int *da, int *dpl, int *dpu); __global__ void DYNR_ANA_1(int n, int *ia, int *ii); __global__ void DYNR_ANA_2(int n, int nnz, int *jb, int *jj, int *ib, int *db); /*-------------------------------------------*/ /* Row level scheduling kernel */ /*-------------------------------------------*/ // HALF-WARP __global__ void L_SOL_LEVR16(REAL *b, REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevL, int l1, int l2) { int i,k,jj; // num of half-warps int nhw = gridDim.x*BLOCKDIM/HALFWARP; // half warp id int hwid = (blockIdx.x*BLOCKDIM+threadIdx.x)/HALFWARP; // thread lane in each half warp int lane = threadIdx.x & (HALFWARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+8]; for (i=l1+hwid; i<l2; i+=nhw) { jj = jlevL[i-1]-1; int p1 = ia[jj]; int q1 = di[jj]; REAL sum = 0.0; for (k=p1+lane; k<q1; k+=HALFWARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[q1-1]; x[jj] = t*(b[jj] - r[threadIdx.x]); } } } /*----------------- x = U^{-1}*x */ __global__ void U_SOL_LEVR16(REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevU, int l1, int l2) { int i,k,jj; // num of half-warps int nhw = gridDim.x*BLOCKDIM/HALFWARP; // half warp id int hwid = (blockIdx.x*BLOCKDIM+threadIdx.x)/HALFWARP; // thread lane in each half warp int lane = threadIdx.x & (HALFWARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+8]; for (i=l1+hwid; i<l2; i+=nhw) { jj = jlevU[i-1]-1; int p1 = di[jj]; int q1 = ia[jj+1]; REAL sum = 0.0; for (k=p1+1+lane; k<q1; k+=HALFWARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[p1-1]; x[jj] = t*(x[jj] - r[threadIdx.x]); } } } //-------------------------------------------------------- void luSolvLevR16(int n, int nnz, struct csr_t *csr, REAL *x, REAL *b, int REPEAT, bool print) { int i, j, *d_ia, *d_ja, *d_di, *d_jlevL, *d_jlevU; REAL *d_a, *d_b, *d_x; double t1, t2, ta; struct level_t lev; allocLevel(n, &lev); cudaMalloc((void **)&d_jlevL, n*sizeof(int)); cudaMalloc((void **)&d_jlevU, n*sizeof(int)); /*------------------- allocate Device Memory */ cudaMalloc((void **)&d_ia, (n+1)*sizeof(int)); cudaMalloc((void **)&d_ja, nnz*sizeof(int)); cudaMalloc((void **)&d_di, n*sizeof(int)); cudaMalloc((void **)&d_a, nnz*sizeof(REAL)); cudaMalloc((void **)&d_b, n*sizeof(REAL)); cudaMalloc((void **)&d_x, n*sizeof(REAL)); /*------------------- Memcpy */ cudaMemcpy(d_ia, csr->ia, (n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ja, csr->ja, nnz*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_a, csr->a, nnz*sizeof(REAL), cudaMemcpyHostToDevice); cudaMemcpy(d_di, csr->di, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n*sizeof(REAL), cudaMemcpyHostToDevice); /*------------------- analysis */ ta = wall_timer(); if (!GPU_LEVEL) { for (int j=0; j<REPEAT; j++) { cudaMemcpy(csr->ia, d_ia, (n+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(csr->ja, d_ja, nnz*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(csr->di, d_di, n*sizeof(int), cudaMemcpyDeviceToHost); makeLevelCSR(n, csr->ia, csr->ja, csr->di, &lev); cudaMemcpy(d_jlevL, lev.jlevL, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_jlevU, lev.jlevU, n*sizeof(int), cudaMemcpyHostToDevice); } } else { for (int j=0; j<REPEAT; j++) { int *d_dp, *d_ib, *d_jb, *d_db; cudaMalloc((void **)&d_dp, 2*n*sizeof(int)); cudaMalloc((void **)&d_jb, nnz*sizeof(int)); cudaMalloc((void **)&d_ib, (n+1)*sizeof(int)); cudaMalloc((void **)&d_db, n*sizeof(int)); int gDim = (n + BLOCKDIM - 1) / BLOCKDIM; LU_SOL_DYNR_INIT<<<gDim, BLOCKDIM>>>(n, d_ia, d_di, d_dp, d_dp+n); int nhwb = BLOCKDIM / HALFWARP; // number of half-warps per block gDim = (n + nhwb - 1) / nhwb; DYNR_ANA_1<<<gDim, BLOCKDIM>>>(n, d_ia, d_jb); //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_data(d_jb); thrust::device_ptr<int> dev_keys(d_ja); thrust::stable_sort_by_key(dev_keys, dev_keys + nnz, dev_data); gDim = (nnz + BLOCKDIM-1) / BLOCKDIM; DYNR_ANA_2<<<gDim, BLOCKDIM>>>(n, nnz, d_jb, d_ja, d_ib, d_db); makeLevelCSC_SYNC(n, d_ib, d_jb, d_db, d_dp, d_jlevL, lev.ilevL, &lev.nlevL, d_jlevU, lev.ilevU, &lev.nlevU); // copy again since it was changed by the sorting cudaMemcpy(d_ja, csr->ja, nnz*sizeof(int), cudaMemcpyHostToDevice); cudaFree(d_dp); cudaFree(d_ib); cudaFree(d_jb); cudaFree(d_db); } } ta = wall_timer() - ta; t1 = wall_timer(); int ilev_shift; ilev_shift = lev.ilevL[0] == 0; for (int i=0; i < lev.nlevL+1; i++) { lev.ilevL[i] += ilev_shift; } ilev_shift = lev.ilevU[0] == 0; for (int i=0; i < lev.nlevU+1; i++) { lev.ilevU[i] += ilev_shift; } for (j=0; j<REPEAT; j++) { // L-solve for (i=0; i<lev.nlevL; i++) { int l1 = lev.ilevL[i]; int l2 = lev.ilevL[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*HALFWARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; L_SOL_LEVR16<<<gDim, bDim>>>(d_b, d_x, d_a, d_ja, d_ia, d_di, d_jlevL, l1, l2); } // U-solve for (i=0; i<lev.nlevU; i++) { int l1 = lev.ilevU[i]; int l2 = lev.ilevU[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*HALFWARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; U_SOL_LEVR16<<<gDim, bDim>>>(d_x, d_a, d_ja, d_ia, d_di, d_jlevU, l1, l2); } } //Barrier for GPU calls cudaThreadSynchronize(); t2 = wall_timer() - t1; if (print) { printf("[GPU] level-scheduling R16, #lev in L %d, #lev in U %d\n", lev.nlevL, lev.nlevU); printf(" time(s)=%f, Gflops=%5.3f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2); printf(" analysis time %f (%f) ", ta/REPEAT, ta/t2); } /*-------- copy x to host mem */ cudaMemcpy(x, d_x, n*sizeof(REAL), cudaMemcpyDeviceToHost); cudaFree(d_ia); cudaFree(d_ja); cudaFree(d_di); cudaFree(d_a); cudaFree(d_b); cudaFree(d_x); FreeLev(&lev); cudaFree(d_jlevL); cudaFree(d_jlevU); } // WARP __global__ void L_SOL_LEVR32(REAL *b, REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevL, int l1, int l2) { int i,k,jj; // num of warps int nw = gridDim.x*BLOCKDIM/WARP; // warp id int wid = (blockIdx.x*BLOCKDIM+threadIdx.x)/WARP; // thread lane in each warp int lane = threadIdx.x & (WARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+16]; for (i=l1+wid; i<l2; i+=nw) { jj = jlevL[i-1]-1; int p1 = ia[jj]; int q1 = di[jj]; REAL sum = 0.0; for (k=p1+lane; k<q1; k+=WARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+16]; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[q1-1]; x[jj] = t*(b[jj] - r[threadIdx.x]); } } } /*----------------- x = U^{-1}*x */ __global__ void U_SOL_LEVR32(REAL *x, REAL *a, int *ja, int *ia, int *di, int *jlevU, int l1, int l2) { int i,k,jj; // num of warps int nw = gridDim.x*BLOCKDIM/WARP; // warp id int wid = (blockIdx.x*BLOCKDIM+threadIdx.x)/WARP; // thread lane in each warp int lane = threadIdx.x & (WARP-1); // shared memory for patial result volatile __shared__ REAL r[BLOCKDIM+16]; for (i=l1+wid; i<l2; i+=nw) { jj = jlevU[i-1]-1; int p1 = di[jj]; int q1 = ia[jj+1]; REAL sum = 0.0; for (k=p1+1+lane; k<q1; k+=WARP) sum += a[k-1]*x[ja[k-1]-1]; // parallel reduction r[threadIdx.x] = sum; r[threadIdx.x] = sum = sum + r[threadIdx.x+16]; r[threadIdx.x] = sum = sum + r[threadIdx.x+8]; r[threadIdx.x] = sum = sum + r[threadIdx.x+4]; r[threadIdx.x] = sum = sum + r[threadIdx.x+2]; r[threadIdx.x] = sum = sum + r[threadIdx.x+1]; if (lane == 0) { REAL t = 1.0 / a[p1-1]; x[jj] = t*(x[jj] - r[threadIdx.x]); } } } //-------------------------------------------------------- void luSolvLevR32(int n, int nnz, struct csr_t *csr, REAL *x, REAL *b, int REPEAT, bool print) { int i, j, *d_ia, *d_ja, *d_di, *d_jlevL, *d_jlevU; REAL *d_a, *d_b, *d_x; double t1, t2, ta; struct level_t lev; allocLevel(n, &lev); cudaMalloc((void **)&d_jlevL, n*sizeof(int)); cudaMalloc((void **)&d_jlevU, n*sizeof(int)); /*------------------- allocate Device Memory */ cudaMalloc((void **)&d_ia, (n+1)*sizeof(int)); cudaMalloc((void **)&d_ja, nnz*sizeof(int)); cudaMalloc((void **)&d_di, n*sizeof(int)); cudaMalloc((void **)&d_a, nnz*sizeof(REAL)); cudaMalloc((void **)&d_b, n*sizeof(REAL)); cudaMalloc((void **)&d_x, n*sizeof(REAL)); /*------------------- Memcpy */ cudaMemcpy(d_ia, csr->ia, (n+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ja, csr->ja, nnz*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_a, csr->a, nnz*sizeof(REAL), cudaMemcpyHostToDevice); cudaMemcpy(d_di, csr->di, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n*sizeof(REAL), cudaMemcpyHostToDevice); /*------------------- analysis */ ta = wall_timer(); if (!GPU_LEVEL) { for (int j=0; j<REPEAT; j++) { cudaMemcpy(csr->ia, d_ia, (n+1)*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(csr->ja, d_ja, nnz*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(csr->di, d_di, n*sizeof(int), cudaMemcpyDeviceToHost); makeLevelCSR(n, csr->ia, csr->ja, csr->di, &lev); cudaMemcpy(d_jlevL, lev.jlevL, n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_jlevU, lev.jlevU, n*sizeof(int), cudaMemcpyHostToDevice); } } else { for (int j=0; j<REPEAT; j++) { int *d_dp, *d_ib, *d_jb, *d_db; cudaMalloc((void **)&d_dp, 2*n*sizeof(int)); cudaMalloc((void **)&d_jb, nnz*sizeof(int)); cudaMalloc((void **)&d_ib, (n+1)*sizeof(int)); cudaMalloc((void **)&d_db, n*sizeof(int)); int gDim = (n + BLOCKDIM - 1) / BLOCKDIM; LU_SOL_DYNR_INIT<<<gDim, BLOCKDIM>>>(n, d_ia, d_di, d_dp, d_dp+n); int nhwb = BLOCKDIM / HALFWARP; // number of half-warps per block gDim = (n + nhwb - 1) / nhwb; DYNR_ANA_1<<<gDim, BLOCKDIM>>>(n, d_ia, d_jb); //wrap raw pointer with a device_ptr to use with Thrust functions thrust::device_ptr<int> dev_data(d_jb); thrust::device_ptr<int> dev_keys(d_ja); thrust::stable_sort_by_key(dev_keys, dev_keys + nnz, dev_data); gDim = (nnz + BLOCKDIM-1) / BLOCKDIM; DYNR_ANA_2<<<gDim, BLOCKDIM>>>(n, nnz, d_jb, d_ja, d_ib, d_db); // double tt1 = wall_timer(); makeLevelCSC_SYNC(n, d_ib, d_jb, d_db, d_dp, d_jlevL, lev.ilevL, &lev.nlevL, d_jlevU, lev.ilevU, &lev.nlevU); // cudaThreadSynchronize(); // tt1 = wall_timer() - tt1; // printf("GPU LEVEL TIME %f\n", tt1); // copy again since it was changed by the sorting cudaMemcpy(d_ja, csr->ja, nnz*sizeof(int), cudaMemcpyHostToDevice); cudaFree(d_dp); cudaFree(d_ib); cudaFree(d_jb); cudaFree(d_db); } } ta = wall_timer() - ta; int ilev_shift; ilev_shift = lev.ilevL[0] == 0; for (int i=0; i < lev.nlevL+1; i++) { lev.ilevL[i] += ilev_shift; } ilev_shift = lev.ilevU[0] == 0; for (int i=0; i < lev.nlevU+1; i++) { lev.ilevU[i] += ilev_shift; } t1 = wall_timer(); for (j=0; j<REPEAT; j++) { // L-solve for (i=0; i<lev.nlevL; i++) { int l1 = lev.ilevL[i]; int l2 = lev.ilevL[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*WARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; L_SOL_LEVR32<<<gDim, bDim>>>(d_b, d_x, d_a, d_ja, d_ia, d_di, d_jlevL, l1, l2); } // U-solve for (i=0; i<lev.nlevU; i++) { int l1 = lev.ilevU[i]; int l2 = lev.ilevU[i+1]; int l_size = l2 - l1; int nthreads = min(l_size*WARP, MAXTHREADS); int gDim = (nthreads+BLOCKDIM-1)/BLOCKDIM; int bDim = BLOCKDIM; U_SOL_LEVR32<<<gDim, bDim>>>(d_x, d_a, d_ja, d_ia, d_di, d_jlevU, l1, l2); } } //Barrier for GPU calls cudaThreadSynchronize(); t2 = wall_timer() - t1; if (print) { printf("[GPU] level-scheduling R32, #lev in L %d, #lev in U %d\n", lev.nlevL, lev.nlevU); printf(" time(s)=%f, Gflops=%5.3f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2); printf(" analysis time %f (%f) ", ta/REPEAT, ta/t2); } /*-------- copy x to host mem */ cudaMemcpy(x, d_x, n*sizeof(REAL), cudaMemcpyDeviceToHost); cudaFree(d_ia); cudaFree(d_ja); cudaFree(d_di); cudaFree(d_a); cudaFree(d_b); cudaFree(d_x); FreeLev(&lev); cudaFree(d_jlevL); cudaFree(d_jlevU); }
94beebf6b97adf45fce55f62827dc9ac1631d218.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" %%cu #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void reverseWord(char *str, char *rev_str){ int space = 0, start_index, end_index, len_word; int id = blockIdx.x * blockDim.x + threadIdx.x; char *ptr_start = str, *ptr_end, *ptr_space; ptr_space = ptr_start; while((*ptr_space) != ' ' && (*ptr_space) != '\0'){ ptr_space++; } space++; while(space <= id){ ptr_start = ptr_space + 1; ptr_space = ptr_start; while((*ptr_space) != ' ' && (*ptr_space) != '\0'){ ptr_space++; } space++; } ptr_end = ptr_space - 1; start_index = ptr_start - str; end_index = ptr_end - str; len_word = end_index-start_index+1; for(int i=start_index,j=end_index; i<=end_index,j>=start_index; i++,j--){ rev_str[i] = str[j]; } if(id == gridDim.x-1){ rev_str[end_index+1] = '\0'; } else{ rev_str[end_index+1] = ' '; } printf("id: %d, st_ind: %d, end_ind: %d, len: %d\n",id,start_index,end_index,len_word); } //string should end with a space int main(){ char str[1000] = "A Quick Brown Fox Jumps Over The Lazy dog", rev_str[1000]; int num_words = 9; printf("%s\n",str); for(int i=0;i<strlen(str); i++){ printf("%d",i%10); } printf("\n"); /* printf("enter number of words\n"); scanf("%d",&num_words); printf("enter string\n"); scanf("%s",str); */ int size = sizeof(char); int len = strlen(str)+1; char *d_a, *d_b; hipMalloc((void **)&d_a,size*len); hipMalloc((void **)&d_b,size*len); hipMemcpy(d_a,str,size*len,hipMemcpyHostToDevice); hipLaunchKernelGGL(( reverseWord), dim3(num_words),dim3(1), 0, 0, d_a,d_b); hipMemcpy(rev_str,d_b,size*len,hipMemcpyDeviceToHost); printf("reversed string is: %s\n",rev_str); hipFree(d_a); hipFree(d_b); }
94beebf6b97adf45fce55f62827dc9ac1631d218.cu
%%cu #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void reverseWord(char *str, char *rev_str){ int space = 0, start_index, end_index, len_word; int id = blockIdx.x * blockDim.x + threadIdx.x; char *ptr_start = str, *ptr_end, *ptr_space; ptr_space = ptr_start; while((*ptr_space) != ' ' && (*ptr_space) != '\0'){ ptr_space++; } space++; while(space <= id){ ptr_start = ptr_space + 1; ptr_space = ptr_start; while((*ptr_space) != ' ' && (*ptr_space) != '\0'){ ptr_space++; } space++; } ptr_end = ptr_space - 1; start_index = ptr_start - str; end_index = ptr_end - str; len_word = end_index-start_index+1; for(int i=start_index,j=end_index; i<=end_index,j>=start_index; i++,j--){ rev_str[i] = str[j]; } if(id == gridDim.x-1){ rev_str[end_index+1] = '\0'; } else{ rev_str[end_index+1] = ' '; } printf("id: %d, st_ind: %d, end_ind: %d, len: %d\n",id,start_index,end_index,len_word); } //string should end with a space int main(){ char str[1000] = "A Quick Brown Fox Jumps Over The Lazy dog", rev_str[1000]; int num_words = 9; printf("%s\n",str); for(int i=0;i<strlen(str); i++){ printf("%d",i%10); } printf("\n"); /* printf("enter number of words\n"); scanf("%d",&num_words); printf("enter string\n"); scanf("%s",str); */ int size = sizeof(char); int len = strlen(str)+1; char *d_a, *d_b; cudaMalloc((void **)&d_a,size*len); cudaMalloc((void **)&d_b,size*len); cudaMemcpy(d_a,str,size*len,cudaMemcpyHostToDevice); reverseWord<<<num_words,1>>>(d_a,d_b); cudaMemcpy(rev_str,d_b,size*len,cudaMemcpyDeviceToHost); printf("reversed string is: %s\n",rev_str); cudaFree(d_a); cudaFree(d_b); }
623c32135aa4dca4bcd0c141aa23fb6452ca8e59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include "gaussian_smooth.h" #define CUDA_KERNEL_LOOP(i ,n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x) #define SQRT_2Pi 2.5066282746310002 #define Pi 3.1415926 const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N){ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ float gaussian_weights_gen( const float sigma, const int center_i, const int center_j, const int k, const int l ){ // float coefficient = 1 / (2 * Pi * sigma * sigma); float L2_distance = (k - center_i) * (k - center_i) + (l - center_j) * (l - center_j); float exp_item = exp(-(L2_distance) / (2 * sigma * sigma)); // return coefficient * exp_item; return exp_item; } __device__ float gaussian_derivative_gen( const float sigma, const int center_i, const int center_j, const int k, const int l ){ float L2_distance = (k - center_i) * (k - center_i) + (l - center_j) * (l - center_j); float part_1 = exp(-(L2_distance) / (2 * sigma * sigma)); // float part_2 = L2_distance / (Pi * 2 * sigma * sigma * sigma * sigma * sigma); // float part_3 = 1 / (Pi * sigma * sigma * sigma); float part_2 = L2_distance / (sigma * sigma * sigma); // float part_3 = 2 / sigma; // return part_1 * (part_2 - part_3); return part_1 * part_2; } __global__ void gaussian_smooth_im2col_kernel( int n, const float* data_im, const float* sigma_map, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int num_channels, const int height_col, const int width_col, float* output ){ CUDA_KERNEL_LOOP(index, n){ const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col / height_col) % num_channels; const float sigma = sigma_map[h_col * width_col + w_col]; const int h_in = h_col * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_col * stride_w + (int)((kernel_w - 1 ) / 2); float* output_ptr = output + (c_im * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im + c_im * height * width; float collective_val = static_cast<float>(0); float collective_weight = static_cast<float>(0); for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float val = static_cast<float>(0); float weight = static_cast<float>(0); const int h_im = h_in + i; const int w_im = w_in + j; if (h_im >= 0 && w_im >= 0 && h_im <= height - 1 && w_im <= width - 1) { val = data_im_ptr[h_im * width + w_im]; weight = gaussian_weights_gen(sigma, h_in, w_in, h_im, w_im); collective_val += val * weight; collective_weight += weight; } } } *output_ptr = collective_val / collective_weight; } } __global__ void gaussian_smooth_col2im_kernel( const int n, const float* out_grad, const float* data_im, const float* sigma_map, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int num_channels, const int height_col, const int width_col, float* grad_sigma_map, float* grad_im ){ CUDA_KERNEL_LOOP(index, n){ const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col / height_col) % num_channels; const int cur_index = h_col * width_col + w_col; const float sigma = sigma_map[cur_index]; const int h_in = h_col * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_col * stride_w + (int)((kernel_w - 1 ) / 2); const int output_ptr_pos = (c_im * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im + c_im * height * width; float* grad_im_ptr = grad_im + c_im * height * width; const float out_grad_cur = out_grad[output_ptr_pos]; float sum_gaussian = static_cast<float>(0); float sum_dgaussian = static_cast<float>(0); float sum_x_gaussian = static_cast<float>(0); float sum_x_dgaussian = static_cast<float>(0); for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float val = static_cast<float>(0); float dweight = static_cast<float>(0); float weight = static_cast<float>(0); const int h_im = h_in + i; const int w_im = w_in + j; if (h_im >= 0 && w_im >= 0 && h_im <= height - 1 && w_im <= width - 1) { val = data_im_ptr[h_im * width + w_im]; dweight = gaussian_derivative_gen(sigma, h_in, w_in, h_im, w_im); weight = gaussian_weights_gen(sigma, h_in, w_in, h_im, w_im); sum_gaussian += weight; sum_dgaussian += dweight; sum_x_gaussian += val * weight; sum_x_dgaussian += val * dweight; } } } float sigma_gradient = (sum_x_dgaussian * sum_gaussian - sum_x_gaussian * sum_dgaussian) / (sum_gaussian * sum_gaussian); atomicAdd(grad_sigma_map + cur_index, sigma_gradient); // grad_im for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float weight = static_cast<float>(0); const int h_im = h_in + i; const int w_im = w_in + j; if (h_im >= 0 && w_im >= 0 && h_im <= height - 1 && w_im <= width - 1) { weight = gaussian_weights_gen(sigma, h_in, w_in, h_im, w_im); atomicAdd(grad_im_ptr + h_im * width + w_im, out_grad_cur * weight / sum_gaussian); } } } } } __global__ void set_zeros_kernel(const int n, float* data){ CUDA_KERNEL_LOOP(index, n){ *data = 0; } } void gaussian_smooth_im2col(hipStream_t stream, const float* data_im, const float* sigma, const int num_channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_out, const int width_out, float* data_col){ int num_kernels = num_channels * height_out * width_out; hipLaunchKernelGGL(( gaussian_smooth_im2col_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im, sigma, height, width, kernel_h, kernel_w, stride_h, stride_w, num_channels, height_out, width_out, data_col ); } void gaussian_smooth_col2im( hipStream_t stream, const float* out_grad, const float* data_im, const float* sigma, const int num_channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_out, const int width_out, float* grad_sigma_map, float* grad_im ){ int num_kernels = num_channels * height_out * width_out; hipLaunchKernelGGL(( gaussian_smooth_col2im_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, out_grad, data_im, sigma, height, width, kernel_h, kernel_w, stride_h, stride_w, num_channels, height_out, width_out, grad_sigma_map, grad_im ); } void set_zeros(hipStream_t stream, const int n, float* data){ hipLaunchKernelGGL(( set_zeros_kernel), dim3(GET_BLOCKS(n)), dim3(CUDA_NUM_THREADS), 0, stream, n, data); }
623c32135aa4dca4bcd0c141aa23fb6452ca8e59.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include "gaussian_smooth.h" #define CUDA_KERNEL_LOOP(i ,n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x) #define SQRT_2Pi 2.5066282746310002 #define Pi 3.1415926 const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N){ return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ float gaussian_weights_gen( const float sigma, const int center_i, const int center_j, const int k, const int l ){ // float coefficient = 1 / (2 * Pi * sigma * sigma); float L2_distance = (k - center_i) * (k - center_i) + (l - center_j) * (l - center_j); float exp_item = exp(-(L2_distance) / (2 * sigma * sigma)); // return coefficient * exp_item; return exp_item; } __device__ float gaussian_derivative_gen( const float sigma, const int center_i, const int center_j, const int k, const int l ){ float L2_distance = (k - center_i) * (k - center_i) + (l - center_j) * (l - center_j); float part_1 = exp(-(L2_distance) / (2 * sigma * sigma)); // float part_2 = L2_distance / (Pi * 2 * sigma * sigma * sigma * sigma * sigma); // float part_3 = 1 / (Pi * sigma * sigma * sigma); float part_2 = L2_distance / (sigma * sigma * sigma); // float part_3 = 2 / sigma; // return part_1 * (part_2 - part_3); return part_1 * part_2; } __global__ void gaussian_smooth_im2col_kernel( int n, const float* data_im, const float* sigma_map, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int num_channels, const int height_col, const int width_col, float* output ){ CUDA_KERNEL_LOOP(index, n){ const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col / height_col) % num_channels; const float sigma = sigma_map[h_col * width_col + w_col]; const int h_in = h_col * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_col * stride_w + (int)((kernel_w - 1 ) / 2); float* output_ptr = output + (c_im * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im + c_im * height * width; float collective_val = static_cast<float>(0); float collective_weight = static_cast<float>(0); for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float val = static_cast<float>(0); float weight = static_cast<float>(0); const int h_im = h_in + i; const int w_im = w_in + j; if (h_im >= 0 && w_im >= 0 && h_im <= height - 1 && w_im <= width - 1) { val = data_im_ptr[h_im * width + w_im]; weight = gaussian_weights_gen(sigma, h_in, w_in, h_im, w_im); collective_val += val * weight; collective_weight += weight; } } } *output_ptr = collective_val / collective_weight; } } __global__ void gaussian_smooth_col2im_kernel( const int n, const float* out_grad, const float* data_im, const float* sigma_map, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int num_channels, const int height_col, const int width_col, float* grad_sigma_map, float* grad_im ){ CUDA_KERNEL_LOOP(index, n){ const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int c_im = (index / width_col / height_col) % num_channels; const int cur_index = h_col * width_col + w_col; const float sigma = sigma_map[cur_index]; const int h_in = h_col * stride_h + (int)((kernel_h - 1 ) / 2); const int w_in = w_col * stride_w + (int)((kernel_w - 1 ) / 2); const int output_ptr_pos = (c_im * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im + c_im * height * width; float* grad_im_ptr = grad_im + c_im * height * width; const float out_grad_cur = out_grad[output_ptr_pos]; float sum_gaussian = static_cast<float>(0); float sum_dgaussian = static_cast<float>(0); float sum_x_gaussian = static_cast<float>(0); float sum_x_dgaussian = static_cast<float>(0); for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float val = static_cast<float>(0); float dweight = static_cast<float>(0); float weight = static_cast<float>(0); const int h_im = h_in + i; const int w_im = w_in + j; if (h_im >= 0 && w_im >= 0 && h_im <= height - 1 && w_im <= width - 1) { val = data_im_ptr[h_im * width + w_im]; dweight = gaussian_derivative_gen(sigma, h_in, w_in, h_im, w_im); weight = gaussian_weights_gen(sigma, h_in, w_in, h_im, w_im); sum_gaussian += weight; sum_dgaussian += dweight; sum_x_gaussian += val * weight; sum_x_dgaussian += val * dweight; } } } float sigma_gradient = (sum_x_dgaussian * sum_gaussian - sum_x_gaussian * sum_dgaussian) / (sum_gaussian * sum_gaussian); atomicAdd(grad_sigma_map + cur_index, sigma_gradient); // grad_im for (int i = - (int)(kernel_h / 2); i <= (int)(kernel_h / 2); ++i) { for (int j = - (int)(kernel_w / 2); j <= (int)(kernel_w / 2); ++j) { float weight = static_cast<float>(0); const int h_im = h_in + i; const int w_im = w_in + j; if (h_im >= 0 && w_im >= 0 && h_im <= height - 1 && w_im <= width - 1) { weight = gaussian_weights_gen(sigma, h_in, w_in, h_im, w_im); atomicAdd(grad_im_ptr + h_im * width + w_im, out_grad_cur * weight / sum_gaussian); } } } } } __global__ void set_zeros_kernel(const int n, float* data){ CUDA_KERNEL_LOOP(index, n){ *data = 0; } } void gaussian_smooth_im2col(cudaStream_t stream, const float* data_im, const float* sigma, const int num_channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_out, const int width_out, float* data_col){ int num_kernels = num_channels * height_out * width_out; gaussian_smooth_im2col_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im, sigma, height, width, kernel_h, kernel_w, stride_h, stride_w, num_channels, height_out, width_out, data_col ); } void gaussian_smooth_col2im( cudaStream_t stream, const float* out_grad, const float* data_im, const float* sigma, const int num_channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int height_out, const int width_out, float* grad_sigma_map, float* grad_im ){ int num_kernels = num_channels * height_out * width_out; gaussian_smooth_col2im_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, out_grad, data_im, sigma, height, width, kernel_h, kernel_w, stride_h, stride_w, num_channels, height_out, width_out, grad_sigma_map, grad_im ); } void set_zeros(cudaStream_t stream, const int n, float* data){ set_zeros_kernel<<<GET_BLOCKS(n), CUDA_NUM_THREADS, 0, stream>>>(n, data); }
7c772d065e78d87961155068b7bc7277f972110f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cu_log.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *src = NULL; hipMalloc(&src, XSIZE*YSIZE); float *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cu_log), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cu_log), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cu_log), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dst,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7c772d065e78d87961155068b7bc7277f972110f.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cu_log.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); float *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); const int n = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cu_log<<<gridBlock,threadBlock>>>(src,dst,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cu_log<<<gridBlock,threadBlock>>>(src,dst,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cu_log<<<gridBlock,threadBlock>>>(src,dst,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0b5064bcc2073b9206765ac9313ba0f51f080551.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cu_testAtomic_impl.h" #include <bvh_common.h> __global__ void testAtomic_kernel(int * obin, int * idata, int h, int n) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= n) return; int d = idata[ind]; atomicAdd(&obin[d/h], 1); } __global__ void setZero_kernel(int * obin, int n) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= n) return; obin[ind] = 0; } __global__ void addOne_kernel(int * obin, int n) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= n) return; obin[ind] += 1; } extern "C" { void cu_testAtomic(int * obin, int * idata, int h, int n) { const int tpb = 512; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(n, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( testAtomic_kernel), dim3(grid), dim3(block), 0, 0, obin, idata, h, n); } void cu_setZero(int * obin, int n) { const int tpb = 512; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(n, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( setZero_kernel), dim3(grid), dim3(block), 0, 0, obin, n); } void cu_addOne(int * obin, int n) { const int tpb = 512; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(n, tpb); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( addOne_kernel), dim3(grid), dim3(block), 0, 0, obin, n); } }
0b5064bcc2073b9206765ac9313ba0f51f080551.cu
#include "cu_testAtomic_impl.h" #include <bvh_common.h> __global__ void testAtomic_kernel(int * obin, int * idata, int h, int n) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= n) return; int d = idata[ind]; atomicAdd(&obin[d/h], 1); } __global__ void setZero_kernel(int * obin, int n) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= n) return; obin[ind] = 0; } __global__ void addOne_kernel(int * obin, int n) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; if(ind >= n) return; obin[ind] += 1; } extern "C" { void cu_testAtomic(int * obin, int * idata, int h, int n) { const int tpb = 512; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(n, tpb); dim3 grid(nblk, 1, 1); testAtomic_kernel<<< grid, block>>>(obin, idata, h, n); } void cu_setZero(int * obin, int n) { const int tpb = 512; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(n, tpb); dim3 grid(nblk, 1, 1); setZero_kernel<<< grid, block>>>(obin, n); } void cu_addOne(int * obin, int n) { const int tpb = 512; dim3 block(tpb, 1, 1); unsigned nblk = iDivUp(n, tpb); dim3 grid(nblk, 1, 1); addOne_kernel<<< grid, block>>>(obin, n); } }
e0809c9da51c7709ef71ec02c3b189634becb346.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] * b[id]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 500000; // Host input vectors double *h_a; double *h_b; //Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] = 2.0f; h_b[i] = 3.0f; } // Copy host vectors to device hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); // Execute the kernel hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n); // Copy array back to host hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error double sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("final result: %f\n", sum/n); // Release device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
e0809c9da51c7709ef71ec02c3b189634becb346.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> // CUDA kernel. Each thread takes care of one element of c __global__ void vecAdd(double *a, double *b, double *c, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = a[id] * b[id]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 500000; // Host input vectors double *h_a; double *h_b; //Host output vector double *h_c; // Device input vectors double *d_a; double *d_b; //Device output vector double *d_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on host h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); int i; // Initialize vectors on host for( i = 0; i < n; i++ ) { h_a[i] = 2.0f; h_b[i] = 3.0f; } // Copy host vectors to device cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 1024; // Number of thread blocks in grid gridSize = (int)ceil((float)n/blockSize); // Execute the kernel vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); // Copy array back to host cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost ); // Sum up vector c and print result divided by n, this should equal 1 within error double sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("final result: %f\n", sum/n); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Release host memory free(h_a); free(h_b); free(h_c); return 0; }
18ff04c373826893bd89ecde91c5ddde1ff6488f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> __global__ void print_from_gpu(void) { printf("Hello World! from thread [%d,%d] \ From device\n", threadIdx.x,blockIdx.x); } int main(void) { printf("Hello World from host!\n"); hipLaunchKernelGGL(( print_from_gpu), dim3(1),dim3(1), 0, 0, ); hipDeviceSynchronize(); return 0; }
18ff04c373826893bd89ecde91c5ddde1ff6488f.cu
#include<stdio.h> #include<stdlib.h> __global__ void print_from_gpu(void) { printf("Hello World! from thread [%d,%d] \ From device\n", threadIdx.x,blockIdx.x); } int main(void) { printf("Hello World from host!\n"); print_from_gpu<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
6cfc20be90c7e52ea30253e0e02a22d62031f013.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Simplified simulation of life evolution * * Computacion Paralela, Grado en Informatica (Universidad de Valladolid) * 2019/2020 * * v1.5 * * CHANGES: * 1) Float values have been substituted by fixed point arithmetics * using integers. To simplify, the fixed point arithmetics are done * with PRECISION in base 10. See precision constant in int_float.h * 2) It uses a portable approximation to trigonometric functions using * Taylor polynomials. * 3) nrand48 function has been extracted from glibc source code and * its internal API simplified to allow its use in the GPU. * * (c) 2020, Arturo Gonzalez Escribano */ #include<stdio.h> #include<stdlib.h> #include<string.h> #include<math.h> #include<stdbool.h> #include<cputils.h> #include<cuda.h> #include<int_float.h> /* * Constants: Converted to fixed point with the given PRECISION */ #define ENERGY_NEEDED_TO_LIVE PRECISION / 10 // Equivalent to 0.1 #define ENERGY_NEEDED_TO_MOVE PRECISION // Equivalent to 1.0 #define ENERGY_SPENT_TO_LIVE PRECISION / 5 // Equivalent to 0.2 #define ENERGY_SPENT_TO_MOVE PRECISION // Equivalent to 1.0 #define ENERGY_NEEDED_TO_SPLIT PRECISION * 20 // Equivalent to 20.0 /* Structure to store data of a cell */ typedef struct { int pos_row, pos_col; // Position int mov_row, mov_col; // Direction of movement int choose_mov[3]; // Genes: Probabilities of 0 turning-left; 1 advance; 2 turning-right int storage; // Food/Energy stored int age; // Number of steps that the cell has been alive unsigned short random_seq[3]; // Status value of its particular random sequence bool alive; // Flag indicating if the cell is still alive } Cell; /* Structure for simulation statistics */ typedef struct { int history_total_cells; // Accumulated number of cells created int history_dead_cells; // Accumulated number of dead cells int history_max_alive_cells; // Maximum number of cells alive in a step int history_max_new_cells; // Maximum number of cells created in a step int history_max_dead_cells; // Maximum number of cells died in a step int history_max_age; // Maximum age achieved by a cell int history_max_food; // Maximum food level in a position of the culture } Statistics; /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * * USE THIS SPACE FOR YOUR KERNEL OR DEVICE FUNTIONS * */ #include "taylor_trig.h" #include "glibc_nrand48.h" /* * Get an uniformly distributed random number between 0 and max * It uses glibc_nrand, that returns a number between 0 and 2^31 */ #define int_urand48( max, seq ) (int)( (long)(max) * glibc_nrand48( seq ) / 2147483648 ) /* * Macro function to simplify accessing with two coordinates to a flattened array * This macro-function can be modified by the students if needed * */ #define accessMat( arr, exp1, exp2 ) arr[ (int)(exp1) * columns + (int)(exp2) ] /* * Function: Choose a new direction of movement for a cell * This function can be changed and/or optimized by the students */ void cell_new_direction( Cell *cell ) { int angle = int_urand48( INT_2PI, cell->random_seq ); cell->mov_row = taylor_sin( angle ); cell->mov_col = taylor_cos( angle ); } /* * Function: Mutation of the movement genes on a new cell * This function can be changed and/or optimized by the students */ void cell_mutation( Cell *cell ) { /* 1. Select which genes change: 0 Left grows taking part of the Advance part 1 Advance grows taking part of the Left part 2 Advance grows taking part of the Right part 3 Right grows taking part of the Advance part */ int mutation_type = int_urand48( 4, cell->random_seq ); /* 2. Select the amount of mutation (up to 50%) */ int mutation_percentage = int_urand48( PRECISION / 2, cell->random_seq ); /* 3. Apply the mutation */ int mutation_value; switch( mutation_type ) { case 0: mutation_value = intfloatMult( cell->choose_mov[1] , mutation_percentage ); cell->choose_mov[1] -= mutation_value; cell->choose_mov[0] += mutation_value; break; case 1: mutation_value = intfloatMult( cell->choose_mov[0] , mutation_percentage ); cell->choose_mov[0] -= mutation_value; cell->choose_mov[1] += mutation_value; break; case 2: mutation_value = intfloatMult( cell->choose_mov[2] , mutation_percentage ); cell->choose_mov[2] -= mutation_value; cell->choose_mov[1] += mutation_value; break; case 3: mutation_value = intfloatMult( cell->choose_mov[1] , mutation_percentage ); cell->choose_mov[1] -= mutation_value; cell->choose_mov[2] += mutation_value; break; } /* 4. Correct potential precision problems */ cell->choose_mov[2] = PRECISION - cell->choose_mov[1] - cell->choose_mov[0]; } /* * CUDA block reduction * Inputs: * Device pointer to an array of int of any size * Size of the array * Device pointer to an int to store the result * * Launching parameters: * One-dimesional grid of any size * Any valid block size * Dynamic shared memory size equal to: sizeof(int) * block size * * (c) 2020, Arturo Gonzalez-Escribano * Simplification for an assignment in a Parallel Computing course, * Computing Engineering Degree, Universidad de Valladolid * Academic year 2019/2020 */ __global__ void reductionMax(int* array, int size, int *result) { int globalPos = threadIdx.x + blockIdx.x * blockDim.x; extern __shared__ int buffer[ ]; if ( globalPos < size ) { buffer[ threadIdx.x ] = array[ globalPos ]; } else buffer[ threadIdx.x ] = 0.0f; __syncthreads(); for( int step=blockDim.x/2; step>=1; step /= 2 ) { if ( threadIdx.x < step ) if ( buffer[ threadIdx.x ] < buffer[ threadIdx.x + step ] ) buffer[ threadIdx.x ] = buffer[ threadIdx.x + step ]; if ( step > 32 ) __syncthreads(); } if ( threadIdx.x == 0 ) atomicMax( result, buffer[0] ); } /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ #ifdef DEBUG /* * Function: Print the current state of the simulation */ void print_status( int iteration, int rows, int columns, int *culture, int num_cells, Cell *cells, int num_cells_alive, Statistics sim_stat ) { /* * You don't need to optimize this function, it is only for pretty printing and debugging purposes. * It is not compiled in the production versions of the program. * Thus, it is never used when measuring times in the leaderboard */ int i,j; printf("Iteration: %d\n", iteration ); printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); for( i=0; i<rows; i++ ) { printf("|"); for( j=0; j<columns; j++ ) { char symbol; if ( accessMat( culture, i, j ) >= 20 * PRECISION ) symbol = '+'; else if ( accessMat( culture, i, j ) >= 10 * PRECISION ) symbol = '*'; else if ( accessMat( culture, i, j ) >= 5 * PRECISION ) symbol = '.'; else symbol = ' '; int t; int counter = 0; for( t=0; t<num_cells; t++ ) { int row = (int)(cells[t].pos_row / PRECISION); int col = (int)(cells[t].pos_col / PRECISION); if ( cells[t].alive && row == i && col == j ) { counter ++; } } if ( counter > 9 ) printf("(M)" ); else if ( counter > 0 ) printf("(%1d)", counter ); else printf(" %c ", symbol ); } printf("|\n"); } printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); printf("Num_cells_alive: %04d\nHistory( Cells: %04d, Dead: %04d, Max.alive: %04d, Max.new: %04d, Max.dead: %04d, Max.age: %04d, Max.food: %6f )\n\n", num_cells_alive, sim_stat.history_total_cells, sim_stat.history_dead_cells, sim_stat.history_max_alive_cells, sim_stat.history_max_new_cells, sim_stat.history_max_dead_cells, sim_stat.history_max_age, (float)sim_stat.history_max_food / PRECISION ); } #endif /* * Function: Print usage line in stderr */ void show_usage( char *program_name ) { fprintf(stderr,"Usage: %s ", program_name ); fprintf(stderr,"<rows> <columns> <maxIter> <max_food> <food_density> <food_level> <short_rnd1> <short_rnd2> <short_rnd3> <num_cells>\n"); fprintf(stderr,"\tOptional arguments for special food spot: [ <row> <col> <size_rows> <size_cols> <density> <level> ]\n"); fprintf(stderr,"\n"); } /* * MAIN PROGRAM */ int main(int argc, char *argv[]) { int i,j; // Simulation data int max_iter; // Maximum number of simulation steps int rows, columns; // Cultivation area sizes int *culture; // Cultivation area values int *culture_cells; // Ancillary structure to count the number of cells in a culture space float max_food; // Maximum level of food on any position float food_density; // Number of food sources introduced per step float food_level; // Maximum number of food level in a new source bool food_spot_active = false; // Special food spot: Active int food_spot_row = 0; // Special food spot: Initial row int food_spot_col = 0; // Special food spot: Initial row int food_spot_size_rows = 0; // Special food spot: Rows size int food_spot_size_cols = 0; // Special food spot: Cols size float food_spot_density = 0.0f; // Special food spot: Food density float food_spot_level = 0.0f; // Special food spot: Food level unsigned short init_random_seq[3]; // Status of the init random sequence unsigned short food_random_seq[3]; // Status of the food random sequence unsigned short food_spot_random_seq[3]; // Status of the special food spot random sequence int num_cells; // Number of cells currently stored in the list Cell *cells; // List to store cells information // Statistics Statistics sim_stat; sim_stat.history_total_cells = 0; sim_stat.history_dead_cells = 0; sim_stat.history_max_alive_cells = 0; sim_stat.history_max_new_cells = 0; sim_stat.history_max_dead_cells = 0; sim_stat.history_max_age = 0; sim_stat.history_max_food = 0.0f; /* 1. Read simulation arguments */ /* 1.1. Check minimum number of arguments */ if (argc < 11) { fprintf(stderr, "-- Error: Not enough arguments when reading configuration from the command line\n\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } /* 1.2. Read culture sizes, maximum number of iterations */ rows = atoi( argv[1] ); columns = atoi( argv[2] ); max_iter = atoi( argv[3] ); /* 1.3. Food data */ max_food = atof( argv[4] ); food_density = atof( argv[5] ); food_level = atof( argv[6] ); /* 1.4. Read random sequences initializer */ for( i=0; i<3; i++ ) { init_random_seq[i] = (unsigned short)atoi( argv[7+i] ); } /* 1.5. Read number of cells */ num_cells = atoi( argv[10] ); /* 1.6. Read special food spot */ if (argc > 11 ) { if ( argc < 17 ) { fprintf(stderr, "-- Error in number of special-food-spot arguments in the command line\n\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } else { food_spot_active = true; food_spot_row = atoi( argv[11] ); food_spot_col = atoi( argv[12] ); food_spot_size_rows = atoi( argv[13] ); food_spot_size_cols = atoi( argv[14] ); food_spot_density = atof( argv[15] ); food_spot_level = atof( argv[16] ); // Check non-used trailing arguments if ( argc > 17 ) { fprintf(stderr, "-- Error: too many arguments in the command line\n\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } } } #ifdef DEBUG /* 1.7. Print arguments */ printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter); printf("Arguments, Max.food: %f, Food density: %f, Food level: %f\n", max_food, food_density, food_level); printf("Arguments, Init Random Sequence: %hu,%hu,%hu\n", init_random_seq[0], init_random_seq[1], init_random_seq[2]); if ( food_spot_active ) { printf("Arguments, Food_spot, pos(%d,%d), size(%d,%d), Density: %f, Level: %f\n", food_spot_row, food_spot_col, food_spot_size_rows, food_spot_size_cols, food_spot_density, food_spot_level ); } printf("Initial cells: %d\n", num_cells ); #endif // DEBUG /* 1.8. Initialize random sequences for food dropping */ for( i=0; i<3; i++ ) { food_random_seq[i] = (unsigned short)glibc_nrand48( init_random_seq ); food_spot_random_seq[i] = (unsigned short)glibc_nrand48( init_random_seq ); } /* 1.9. Initialize random sequences of cells */ cells = (Cell *)malloc( sizeof(Cell) * (size_t)num_cells ); if ( cells == NULL ) { fprintf(stderr,"-- Error allocating: %d cells\n", num_cells ); exit( EXIT_FAILURE ); } for( i=0; i<num_cells; i++ ) { // Initialize the cell ramdom sequences for( j=0; j<3; j++ ) cells[i].random_seq[j] = (unsigned short)glibc_nrand48( init_random_seq ); } #ifdef DEBUG /* 1.10. Print random seed of the initial cells */ /* printf("Initial cells random seeds: %d\n", num_cells ); for( i=0; i<num_cells; i++ ) printf("\tCell %d, Random seq: %hu,%hu,%hu\n", i, cells[i].random_seq[0], cells[i].random_seq[1], cells[i].random_seq[2] ); */ #endif // DEBUG // CUDA start hipSetDevice(0); hipDeviceSynchronize(); /* 2. Start global timer */ double ttotal = cp_Wtime(); /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * */ #include "cuda_check.h" #include "cuda_time.h" /* 3. Initialize culture surface and initial cells */ culture = (int *)malloc( sizeof(int) * (size_t)rows * (size_t)columns ); culture_cells = (int *)malloc( sizeof(int) * (size_t)rows * (size_t)columns ); #ifdef DEVELOPMENT if ( culture == NULL || culture_cells == NULL ) { fprintf(stderr,"-- Error allocating culture structures for size: %d x %d \n", rows, columns ); exit( EXIT_FAILURE ); } #endif // DEVELOPMENT // 3.1 time_start(); for( i=0; i<rows; i++ ) for( j=0; j<columns; j++ ) accessMat( culture, i, j ) = 0; time_end(time3_1); // 3.2 time_start(); for( i=0; i<num_cells; i++ ) { cells[i].alive = true; // Initial age: Between 1 and 20 cells[i].age = 1 + int_urand48( 19, cells[i].random_seq ); // Initial storage: Between 10 and 20 units cells[i].storage = 10 * PRECISION + int_urand48( 10 * PRECISION, cells[i].random_seq ); // Initial position: Anywhere in the culture arena cells[i].pos_row = int_urand48( rows * PRECISION, cells[i].random_seq ); cells[i].pos_col = int_urand48( columns * PRECISION, cells[i].random_seq ); // Movement direction: Unity vector in a random direction cell_new_direction( &cells[i] ); // Movement genes: Probabilities of advancing or changing direction: The sum should be 1.00 cells[i].choose_mov[0] = PRECISION / 3; cells[i].choose_mov[2] = PRECISION / 3; cells[i].choose_mov[1] = PRECISION - cells[i].choose_mov[0] - cells[i].choose_mov[2]; } // Statistics: Initialize total number of cells, and max. alive sim_stat.history_total_cells = num_cells; sim_stat.history_max_alive_cells = num_cells; time_end(time3_2); #ifdef DEBUG /* Show initial cells data */ printf("Initial cells data: %d\n", num_cells ); for( i=0; i<num_cells; i++ ) { printf("\tCell %d, Pos(%f,%f), Mov(%f,%f), Choose_mov(%f,%f,%f), Storage: %f, Age: %d\n", i, (float)cells[i].pos_row / PRECISION, (float)cells[i].pos_col / PRECISION, (float)cells[i].mov_row / PRECISION, (float)cells[i].mov_col / PRECISION, (float)cells[i].choose_mov[0] / PRECISION, (float)cells[i].choose_mov[1] / PRECISION, (float)cells[i].choose_mov[2] / PRECISION, (float)cells[i].storage / PRECISION, cells[i].age ); } #endif // DEBUG /* 4. Simulation */ int current_max_food = 0; int num_cells_alive = num_cells; int iter; int max_food_int = max_food * PRECISION; int num_new_sources = (int)(rows * columns * food_density); int num_new_sources_spot = food_spot_active ? (int)(food_spot_size_rows * food_spot_size_cols * food_spot_density) : 0; for( iter=0; iter<max_iter && current_max_food <= max_food_int && num_cells_alive > 0; iter++ ) { update_times(); int step_new_cells = 0; int step_dead_cells = 0; /* 4.1. Spreading new food */ time_start(); // Across the whole culture for (i=0; i<num_new_sources; i++) { int row = int_urand48( rows, food_random_seq ); int col = int_urand48( columns, food_random_seq ); int food = int_urand48( food_level * PRECISION, food_random_seq ); accessMat( culture, row, col ) = accessMat( culture, row, col ) + food; } // In the special food spot if ( food_spot_active ) { for (i=0; i<num_new_sources_spot; i++) { int row = food_spot_row + int_urand48( food_spot_size_rows, food_spot_random_seq ); int col = food_spot_col + int_urand48( food_spot_size_cols, food_spot_random_seq ); int food = int_urand48( food_spot_level * PRECISION, food_spot_random_seq ); accessMat( culture, row, col ) = accessMat( culture, row, col ) + food; } } time_end(time4_1); /* 4.2. Prepare ancillary data structures */ time_start(); /* 4.2.1. Clear ancillary structure of the culture to account alive cells in a position after movement */ for( i=0; i<rows; i++ ) for( j=0; j<columns; j++ ) accessMat( culture_cells, i, j ) = 0; /* 4.2.2. Allocate ancillary structure to store the food level to be shared by cells in the same culture place */ int *food_to_share = (int *)malloc( sizeof(int) * num_cells ); #ifdef DEVELOPMENT if ( food_to_share == NULL ) { fprintf(stderr,"-- Error allocating food_to_share structures for size: %d x %d \n", rows, columns ); exit( EXIT_FAILURE ); } #endif // DEVELOPMENT time_end(time4_2); /* 4.3. Cell movements */ time_start(); for (i=0; i<num_cells; i++) { if ( cells[i].alive ) { cells[i].age ++; // Statistics: Max age of a cell in the simulation history if ( cells[i].age > sim_stat.history_max_age ) sim_stat.history_max_age = cells[i].age; /* 4.3.1. Check if the cell has the needed energy to move or keep alive */ if ( cells[i].storage < ENERGY_NEEDED_TO_LIVE ) { // Cell has died cells[i].alive = false; num_cells_alive --; step_dead_cells ++; continue; } if ( cells[i].storage < ENERGY_NEEDED_TO_MOVE ) { // Almost dying cell, it cannot move, only if enough food is dropped here it will survive cells[i].storage -= ENERGY_SPENT_TO_LIVE; } else { // Consume energy to move cells[i].storage -= ENERGY_SPENT_TO_MOVE; /* 4.3.2. Choose movement direction */ int prob = int_urand48( PRECISION, cells[i].random_seq ); if ( prob < cells[i].choose_mov[0] ) { // Turn left (90 degrees) int tmp = cells[i].mov_col; cells[i].mov_col = cells[i].mov_row; cells[i].mov_row = -tmp; } else if ( prob >= cells[i].choose_mov[0] + cells[i].choose_mov[1] ) { // Turn right (90 degrees) int tmp = cells[i].mov_row; cells[i].mov_row = cells[i].mov_col; cells[i].mov_col = -tmp; } // else do not change the direction /* 4.3.3. Update position moving in the choosen direction*/ cells[i].pos_row += cells[i].mov_row; cells[i].pos_col += cells[i].mov_col; // Periodic arena: Left/Rigth edges are connected, Top/Bottom edges are connected if ( cells[i].pos_row < 0 ) cells[i].pos_row += rows * PRECISION; if ( cells[i].pos_row >= rows * PRECISION) cells[i].pos_row -= rows * PRECISION; if ( cells[i].pos_col < 0 ) cells[i].pos_col += columns * PRECISION; if ( cells[i].pos_col >= columns * PRECISION) cells[i].pos_col -= columns * PRECISION; } /* 4.3.4. Annotate that there is one more cell in this culture position */ accessMat( culture_cells, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ) += 1; /* 4.3.5. Annotate the amount of food to be shared in this culture position */ food_to_share[i] = accessMat( culture, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ); } } // End cell movements time_end(time4_3); /* 4.4. Cell actions */ time_start(); // Space for the list of new cells (maximum number of new cells is num_cells) Cell *new_cells = (Cell *)malloc( sizeof(Cell) * num_cells ); #ifdef DEVELOPMENT if ( new_cells == NULL ) { fprintf(stderr,"-- Error allocating new cells structures for: %d cells\n", num_cells ); exit( EXIT_FAILURE ); } #endif // DEVELOPMENT for (i=0; i<num_cells; i++) { if ( cells[i].alive ) { /* 4.4.1. Food harvesting */ int food = food_to_share[i]; int count = accessMat( culture_cells, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ); int my_food = food / count; cells[i].storage += my_food; /* 4.4.2. Split cell if the conditions are met: Enough maturity and energy */ if ( cells[i].age > 30 && cells[i].storage > ENERGY_NEEDED_TO_SPLIT ) { // Split: Create new cell num_cells_alive ++; sim_stat.history_total_cells ++; step_new_cells ++; // New cell is a copy of parent cell new_cells[ step_new_cells-1 ] = cells[i]; // Split energy stored and update age in both cells cells[i].storage /= 2; new_cells[ step_new_cells-1 ].storage /= 2; cells[i].age = 1; new_cells[ step_new_cells-1 ].age = 1; // Random seed for the new cell, obtained using the parent random sequence new_cells[ step_new_cells-1 ].random_seq[0] = (unsigned short)glibc_nrand48( cells[i].random_seq ); new_cells[ step_new_cells-1 ].random_seq[1] = (unsigned short)glibc_nrand48( cells[i].random_seq ); new_cells[ step_new_cells-1 ].random_seq[2] = (unsigned short)glibc_nrand48( cells[i].random_seq ); // Both cells start in random directions cell_new_direction( &cells[i] ); cell_new_direction( &new_cells[ step_new_cells-1 ] ); // Mutations of the movement genes in both cells cell_mutation( &cells[i] ); cell_mutation( &new_cells[ step_new_cells-1 ] ); } } } // End cell actions time_end(time4_4); /* 4.5. Clean ancillary data structures */ time_start(); /* 4.5.1. Clean the food consumed by the cells in the culture data structure */ for (i=0; i<num_cells; i++) { if ( cells[i].alive ) { accessMat( culture, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ) = 0; } } /* 4.5.2. Free the ancillary data structure to store the food to be shared */ free( food_to_share ); time_end(time4_5); /* 4.6. Clean dead cells from the original list */ time_start(); // 4.6.1. Move alive cells to the left to substitute dead cells int free_position = 0; int alive_in_main_list = 0; for( i=0; i<num_cells; i++ ) { if ( cells[i].alive ) { alive_in_main_list ++; if ( free_position != i ) { cells[free_position] = cells[i]; } free_position ++; } } // 4.6.2. Reduce the storage space of the list to the current number of cells num_cells = alive_in_main_list; cells = (Cell *)realloc( cells, sizeof(Cell) * num_cells ); time_end(time4_6); /* 4.7. Join cell lists: Old and new cells list */ time_start(); if ( step_new_cells > 0 ) { cells = (Cell *)realloc( cells, sizeof(Cell) * ( num_cells + step_new_cells ) ); for (j=0; j<step_new_cells; j++) cells[ num_cells + j ] = new_cells[ j ]; num_cells += step_new_cells; } free( new_cells ); time_end(time4_7); /* 4.8. Decrease non-harvested food */ time_start(); current_max_food = 0; for( i=0; i<rows; i++ ) for( j=0; j<columns; j++ ) { accessMat( culture, i, j ) -= accessMat( culture, i, j ) / 20; if ( accessMat( culture, i, j ) > current_max_food ) current_max_food = accessMat( culture, i, j ); } time_end(time4_8); /* 4.9. Statistics */ time_start(); // Statistics: Max food if ( current_max_food > sim_stat.history_max_food ) sim_stat.history_max_food = current_max_food; // Statistics: Max new cells per step if ( step_new_cells > sim_stat.history_max_new_cells ) sim_stat.history_max_new_cells = step_new_cells; // Statistics: Accumulated dead and Max dead cells per step sim_stat.history_dead_cells += step_dead_cells; if ( step_dead_cells > sim_stat.history_max_dead_cells ) sim_stat.history_max_dead_cells = step_dead_cells; // Statistics: Max alive cells per step if ( num_cells_alive > sim_stat.history_max_alive_cells ) sim_stat.history_max_alive_cells = num_cells_alive; time_end(time4_9); #ifdef DEBUG /* 4.10. DEBUG: Print the current state of the simulation at the end of each iteration */ print_status( iter, rows, columns, culture, num_cells, cells, num_cells_alive, sim_stat ); #endif // DEBUG } print_times(); /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ // CUDA stop hipDeviceSynchronize(); /* 5. Stop global time */ ttotal = cp_Wtime() - ttotal; #ifdef DEBUG printf("List of cells at the end of the simulation: %d\n\n", num_cells ); for( i=0; i<num_cells; i++ ) { printf("Cell %d, Alive: %d, Pos(%f,%f), Mov(%f,%f), Choose_mov(%f,%f,%f), Storage: %f, Age: %d\n", i, cells[i].alive, (float)cells[i].pos_row / PRECISION, (float)cells[i].pos_col / PRECISION, (float)cells[i].mov_row / PRECISION, (float)cells[i].mov_col / PRECISION, (float)cells[i].choose_mov[0] / PRECISION, (float)cells[i].choose_mov[1] / PRECISION, (float)cells[i].choose_mov[2] / PRECISION, (float)cells[i].storage / PRECISION, cells[i].age ); } #endif // DEBUG /* 6. Output for leaderboard */ printf("\n"); /* 6.1. Total computation time */ printf("Time: %lf\n", ttotal ); /* 6.2. Results: Number of iterations and other statistics */ printf("Result: %d, ", iter); printf("%d, %d, %d, %d, %d, %d, %d, %f\n", num_cells_alive, sim_stat.history_total_cells, sim_stat.history_dead_cells, sim_stat.history_max_alive_cells, sim_stat.history_max_new_cells, sim_stat.history_max_dead_cells, sim_stat.history_max_age, (float)sim_stat.history_max_food / PRECISION ); /* 7. Free resources */ free( culture ); free( culture_cells ); free( cells ); /* 8. End */ return 0; }
6cfc20be90c7e52ea30253e0e02a22d62031f013.cu
/* * Simplified simulation of life evolution * * Computacion Paralela, Grado en Informatica (Universidad de Valladolid) * 2019/2020 * * v1.5 * * CHANGES: * 1) Float values have been substituted by fixed point arithmetics * using integers. To simplify, the fixed point arithmetics are done * with PRECISION in base 10. See precision constant in int_float.h * 2) It uses a portable approximation to trigonometric functions using * Taylor polynomials. * 3) nrand48 function has been extracted from glibc source code and * its internal API simplified to allow its use in the GPU. * * (c) 2020, Arturo Gonzalez Escribano */ #include<stdio.h> #include<stdlib.h> #include<string.h> #include<math.h> #include<stdbool.h> #include<cputils.h> #include<cuda.h> #include<int_float.h> /* * Constants: Converted to fixed point with the given PRECISION */ #define ENERGY_NEEDED_TO_LIVE PRECISION / 10 // Equivalent to 0.1 #define ENERGY_NEEDED_TO_MOVE PRECISION // Equivalent to 1.0 #define ENERGY_SPENT_TO_LIVE PRECISION / 5 // Equivalent to 0.2 #define ENERGY_SPENT_TO_MOVE PRECISION // Equivalent to 1.0 #define ENERGY_NEEDED_TO_SPLIT PRECISION * 20 // Equivalent to 20.0 /* Structure to store data of a cell */ typedef struct { int pos_row, pos_col; // Position int mov_row, mov_col; // Direction of movement int choose_mov[3]; // Genes: Probabilities of 0 turning-left; 1 advance; 2 turning-right int storage; // Food/Energy stored int age; // Number of steps that the cell has been alive unsigned short random_seq[3]; // Status value of its particular random sequence bool alive; // Flag indicating if the cell is still alive } Cell; /* Structure for simulation statistics */ typedef struct { int history_total_cells; // Accumulated number of cells created int history_dead_cells; // Accumulated number of dead cells int history_max_alive_cells; // Maximum number of cells alive in a step int history_max_new_cells; // Maximum number of cells created in a step int history_max_dead_cells; // Maximum number of cells died in a step int history_max_age; // Maximum age achieved by a cell int history_max_food; // Maximum food level in a position of the culture } Statistics; /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * * USE THIS SPACE FOR YOUR KERNEL OR DEVICE FUNTIONS * */ #include "taylor_trig.h" #include "glibc_nrand48.h" /* * Get an uniformly distributed random number between 0 and max * It uses glibc_nrand, that returns a number between 0 and 2^31 */ #define int_urand48( max, seq ) (int)( (long)(max) * glibc_nrand48( seq ) / 2147483648 ) /* * Macro function to simplify accessing with two coordinates to a flattened array * This macro-function can be modified by the students if needed * */ #define accessMat( arr, exp1, exp2 ) arr[ (int)(exp1) * columns + (int)(exp2) ] /* * Function: Choose a new direction of movement for a cell * This function can be changed and/or optimized by the students */ void cell_new_direction( Cell *cell ) { int angle = int_urand48( INT_2PI, cell->random_seq ); cell->mov_row = taylor_sin( angle ); cell->mov_col = taylor_cos( angle ); } /* * Function: Mutation of the movement genes on a new cell * This function can be changed and/or optimized by the students */ void cell_mutation( Cell *cell ) { /* 1. Select which genes change: 0 Left grows taking part of the Advance part 1 Advance grows taking part of the Left part 2 Advance grows taking part of the Right part 3 Right grows taking part of the Advance part */ int mutation_type = int_urand48( 4, cell->random_seq ); /* 2. Select the amount of mutation (up to 50%) */ int mutation_percentage = int_urand48( PRECISION / 2, cell->random_seq ); /* 3. Apply the mutation */ int mutation_value; switch( mutation_type ) { case 0: mutation_value = intfloatMult( cell->choose_mov[1] , mutation_percentage ); cell->choose_mov[1] -= mutation_value; cell->choose_mov[0] += mutation_value; break; case 1: mutation_value = intfloatMult( cell->choose_mov[0] , mutation_percentage ); cell->choose_mov[0] -= mutation_value; cell->choose_mov[1] += mutation_value; break; case 2: mutation_value = intfloatMult( cell->choose_mov[2] , mutation_percentage ); cell->choose_mov[2] -= mutation_value; cell->choose_mov[1] += mutation_value; break; case 3: mutation_value = intfloatMult( cell->choose_mov[1] , mutation_percentage ); cell->choose_mov[1] -= mutation_value; cell->choose_mov[2] += mutation_value; break; } /* 4. Correct potential precision problems */ cell->choose_mov[2] = PRECISION - cell->choose_mov[1] - cell->choose_mov[0]; } /* * CUDA block reduction * Inputs: * Device pointer to an array of int of any size * Size of the array * Device pointer to an int to store the result * * Launching parameters: * One-dimesional grid of any size * Any valid block size * Dynamic shared memory size equal to: sizeof(int) * block size * * (c) 2020, Arturo Gonzalez-Escribano * Simplification for an assignment in a Parallel Computing course, * Computing Engineering Degree, Universidad de Valladolid * Academic year 2019/2020 */ __global__ void reductionMax(int* array, int size, int *result) { int globalPos = threadIdx.x + blockIdx.x * blockDim.x; extern __shared__ int buffer[ ]; if ( globalPos < size ) { buffer[ threadIdx.x ] = array[ globalPos ]; } else buffer[ threadIdx.x ] = 0.0f; __syncthreads(); for( int step=blockDim.x/2; step>=1; step /= 2 ) { if ( threadIdx.x < step ) if ( buffer[ threadIdx.x ] < buffer[ threadIdx.x + step ] ) buffer[ threadIdx.x ] = buffer[ threadIdx.x + step ]; if ( step > 32 ) __syncthreads(); } if ( threadIdx.x == 0 ) atomicMax( result, buffer[0] ); } /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ #ifdef DEBUG /* * Function: Print the current state of the simulation */ void print_status( int iteration, int rows, int columns, int *culture, int num_cells, Cell *cells, int num_cells_alive, Statistics sim_stat ) { /* * You don't need to optimize this function, it is only for pretty printing and debugging purposes. * It is not compiled in the production versions of the program. * Thus, it is never used when measuring times in the leaderboard */ int i,j; printf("Iteration: %d\n", iteration ); printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); for( i=0; i<rows; i++ ) { printf("|"); for( j=0; j<columns; j++ ) { char symbol; if ( accessMat( culture, i, j ) >= 20 * PRECISION ) symbol = '+'; else if ( accessMat( culture, i, j ) >= 10 * PRECISION ) symbol = '*'; else if ( accessMat( culture, i, j ) >= 5 * PRECISION ) symbol = '.'; else symbol = ' '; int t; int counter = 0; for( t=0; t<num_cells; t++ ) { int row = (int)(cells[t].pos_row / PRECISION); int col = (int)(cells[t].pos_col / PRECISION); if ( cells[t].alive && row == i && col == j ) { counter ++; } } if ( counter > 9 ) printf("(M)" ); else if ( counter > 0 ) printf("(%1d)", counter ); else printf(" %c ", symbol ); } printf("|\n"); } printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); printf("Num_cells_alive: %04d\nHistory( Cells: %04d, Dead: %04d, Max.alive: %04d, Max.new: %04d, Max.dead: %04d, Max.age: %04d, Max.food: %6f )\n\n", num_cells_alive, sim_stat.history_total_cells, sim_stat.history_dead_cells, sim_stat.history_max_alive_cells, sim_stat.history_max_new_cells, sim_stat.history_max_dead_cells, sim_stat.history_max_age, (float)sim_stat.history_max_food / PRECISION ); } #endif /* * Function: Print usage line in stderr */ void show_usage( char *program_name ) { fprintf(stderr,"Usage: %s ", program_name ); fprintf(stderr,"<rows> <columns> <maxIter> <max_food> <food_density> <food_level> <short_rnd1> <short_rnd2> <short_rnd3> <num_cells>\n"); fprintf(stderr,"\tOptional arguments for special food spot: [ <row> <col> <size_rows> <size_cols> <density> <level> ]\n"); fprintf(stderr,"\n"); } /* * MAIN PROGRAM */ int main(int argc, char *argv[]) { int i,j; // Simulation data int max_iter; // Maximum number of simulation steps int rows, columns; // Cultivation area sizes int *culture; // Cultivation area values int *culture_cells; // Ancillary structure to count the number of cells in a culture space float max_food; // Maximum level of food on any position float food_density; // Number of food sources introduced per step float food_level; // Maximum number of food level in a new source bool food_spot_active = false; // Special food spot: Active int food_spot_row = 0; // Special food spot: Initial row int food_spot_col = 0; // Special food spot: Initial row int food_spot_size_rows = 0; // Special food spot: Rows size int food_spot_size_cols = 0; // Special food spot: Cols size float food_spot_density = 0.0f; // Special food spot: Food density float food_spot_level = 0.0f; // Special food spot: Food level unsigned short init_random_seq[3]; // Status of the init random sequence unsigned short food_random_seq[3]; // Status of the food random sequence unsigned short food_spot_random_seq[3]; // Status of the special food spot random sequence int num_cells; // Number of cells currently stored in the list Cell *cells; // List to store cells information // Statistics Statistics sim_stat; sim_stat.history_total_cells = 0; sim_stat.history_dead_cells = 0; sim_stat.history_max_alive_cells = 0; sim_stat.history_max_new_cells = 0; sim_stat.history_max_dead_cells = 0; sim_stat.history_max_age = 0; sim_stat.history_max_food = 0.0f; /* 1. Read simulation arguments */ /* 1.1. Check minimum number of arguments */ if (argc < 11) { fprintf(stderr, "-- Error: Not enough arguments when reading configuration from the command line\n\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } /* 1.2. Read culture sizes, maximum number of iterations */ rows = atoi( argv[1] ); columns = atoi( argv[2] ); max_iter = atoi( argv[3] ); /* 1.3. Food data */ max_food = atof( argv[4] ); food_density = atof( argv[5] ); food_level = atof( argv[6] ); /* 1.4. Read random sequences initializer */ for( i=0; i<3; i++ ) { init_random_seq[i] = (unsigned short)atoi( argv[7+i] ); } /* 1.5. Read number of cells */ num_cells = atoi( argv[10] ); /* 1.6. Read special food spot */ if (argc > 11 ) { if ( argc < 17 ) { fprintf(stderr, "-- Error in number of special-food-spot arguments in the command line\n\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } else { food_spot_active = true; food_spot_row = atoi( argv[11] ); food_spot_col = atoi( argv[12] ); food_spot_size_rows = atoi( argv[13] ); food_spot_size_cols = atoi( argv[14] ); food_spot_density = atof( argv[15] ); food_spot_level = atof( argv[16] ); // Check non-used trailing arguments if ( argc > 17 ) { fprintf(stderr, "-- Error: too many arguments in the command line\n\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } } } #ifdef DEBUG /* 1.7. Print arguments */ printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter); printf("Arguments, Max.food: %f, Food density: %f, Food level: %f\n", max_food, food_density, food_level); printf("Arguments, Init Random Sequence: %hu,%hu,%hu\n", init_random_seq[0], init_random_seq[1], init_random_seq[2]); if ( food_spot_active ) { printf("Arguments, Food_spot, pos(%d,%d), size(%d,%d), Density: %f, Level: %f\n", food_spot_row, food_spot_col, food_spot_size_rows, food_spot_size_cols, food_spot_density, food_spot_level ); } printf("Initial cells: %d\n", num_cells ); #endif // DEBUG /* 1.8. Initialize random sequences for food dropping */ for( i=0; i<3; i++ ) { food_random_seq[i] = (unsigned short)glibc_nrand48( init_random_seq ); food_spot_random_seq[i] = (unsigned short)glibc_nrand48( init_random_seq ); } /* 1.9. Initialize random sequences of cells */ cells = (Cell *)malloc( sizeof(Cell) * (size_t)num_cells ); if ( cells == NULL ) { fprintf(stderr,"-- Error allocating: %d cells\n", num_cells ); exit( EXIT_FAILURE ); } for( i=0; i<num_cells; i++ ) { // Initialize the cell ramdom sequences for( j=0; j<3; j++ ) cells[i].random_seq[j] = (unsigned short)glibc_nrand48( init_random_seq ); } #ifdef DEBUG /* 1.10. Print random seed of the initial cells */ /* printf("Initial cells random seeds: %d\n", num_cells ); for( i=0; i<num_cells; i++ ) printf("\tCell %d, Random seq: %hu,%hu,%hu\n", i, cells[i].random_seq[0], cells[i].random_seq[1], cells[i].random_seq[2] ); */ #endif // DEBUG // CUDA start cudaSetDevice(0); cudaDeviceSynchronize(); /* 2. Start global timer */ double ttotal = cp_Wtime(); /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * */ #include "cuda_check.h" #include "cuda_time.h" /* 3. Initialize culture surface and initial cells */ culture = (int *)malloc( sizeof(int) * (size_t)rows * (size_t)columns ); culture_cells = (int *)malloc( sizeof(int) * (size_t)rows * (size_t)columns ); #ifdef DEVELOPMENT if ( culture == NULL || culture_cells == NULL ) { fprintf(stderr,"-- Error allocating culture structures for size: %d x %d \n", rows, columns ); exit( EXIT_FAILURE ); } #endif // DEVELOPMENT // 3.1 time_start(); for( i=0; i<rows; i++ ) for( j=0; j<columns; j++ ) accessMat( culture, i, j ) = 0; time_end(time3_1); // 3.2 time_start(); for( i=0; i<num_cells; i++ ) { cells[i].alive = true; // Initial age: Between 1 and 20 cells[i].age = 1 + int_urand48( 19, cells[i].random_seq ); // Initial storage: Between 10 and 20 units cells[i].storage = 10 * PRECISION + int_urand48( 10 * PRECISION, cells[i].random_seq ); // Initial position: Anywhere in the culture arena cells[i].pos_row = int_urand48( rows * PRECISION, cells[i].random_seq ); cells[i].pos_col = int_urand48( columns * PRECISION, cells[i].random_seq ); // Movement direction: Unity vector in a random direction cell_new_direction( &cells[i] ); // Movement genes: Probabilities of advancing or changing direction: The sum should be 1.00 cells[i].choose_mov[0] = PRECISION / 3; cells[i].choose_mov[2] = PRECISION / 3; cells[i].choose_mov[1] = PRECISION - cells[i].choose_mov[0] - cells[i].choose_mov[2]; } // Statistics: Initialize total number of cells, and max. alive sim_stat.history_total_cells = num_cells; sim_stat.history_max_alive_cells = num_cells; time_end(time3_2); #ifdef DEBUG /* Show initial cells data */ printf("Initial cells data: %d\n", num_cells ); for( i=0; i<num_cells; i++ ) { printf("\tCell %d, Pos(%f,%f), Mov(%f,%f), Choose_mov(%f,%f,%f), Storage: %f, Age: %d\n", i, (float)cells[i].pos_row / PRECISION, (float)cells[i].pos_col / PRECISION, (float)cells[i].mov_row / PRECISION, (float)cells[i].mov_col / PRECISION, (float)cells[i].choose_mov[0] / PRECISION, (float)cells[i].choose_mov[1] / PRECISION, (float)cells[i].choose_mov[2] / PRECISION, (float)cells[i].storage / PRECISION, cells[i].age ); } #endif // DEBUG /* 4. Simulation */ int current_max_food = 0; int num_cells_alive = num_cells; int iter; int max_food_int = max_food * PRECISION; int num_new_sources = (int)(rows * columns * food_density); int num_new_sources_spot = food_spot_active ? (int)(food_spot_size_rows * food_spot_size_cols * food_spot_density) : 0; for( iter=0; iter<max_iter && current_max_food <= max_food_int && num_cells_alive > 0; iter++ ) { update_times(); int step_new_cells = 0; int step_dead_cells = 0; /* 4.1. Spreading new food */ time_start(); // Across the whole culture for (i=0; i<num_new_sources; i++) { int row = int_urand48( rows, food_random_seq ); int col = int_urand48( columns, food_random_seq ); int food = int_urand48( food_level * PRECISION, food_random_seq ); accessMat( culture, row, col ) = accessMat( culture, row, col ) + food; } // In the special food spot if ( food_spot_active ) { for (i=0; i<num_new_sources_spot; i++) { int row = food_spot_row + int_urand48( food_spot_size_rows, food_spot_random_seq ); int col = food_spot_col + int_urand48( food_spot_size_cols, food_spot_random_seq ); int food = int_urand48( food_spot_level * PRECISION, food_spot_random_seq ); accessMat( culture, row, col ) = accessMat( culture, row, col ) + food; } } time_end(time4_1); /* 4.2. Prepare ancillary data structures */ time_start(); /* 4.2.1. Clear ancillary structure of the culture to account alive cells in a position after movement */ for( i=0; i<rows; i++ ) for( j=0; j<columns; j++ ) accessMat( culture_cells, i, j ) = 0; /* 4.2.2. Allocate ancillary structure to store the food level to be shared by cells in the same culture place */ int *food_to_share = (int *)malloc( sizeof(int) * num_cells ); #ifdef DEVELOPMENT if ( food_to_share == NULL ) { fprintf(stderr,"-- Error allocating food_to_share structures for size: %d x %d \n", rows, columns ); exit( EXIT_FAILURE ); } #endif // DEVELOPMENT time_end(time4_2); /* 4.3. Cell movements */ time_start(); for (i=0; i<num_cells; i++) { if ( cells[i].alive ) { cells[i].age ++; // Statistics: Max age of a cell in the simulation history if ( cells[i].age > sim_stat.history_max_age ) sim_stat.history_max_age = cells[i].age; /* 4.3.1. Check if the cell has the needed energy to move or keep alive */ if ( cells[i].storage < ENERGY_NEEDED_TO_LIVE ) { // Cell has died cells[i].alive = false; num_cells_alive --; step_dead_cells ++; continue; } if ( cells[i].storage < ENERGY_NEEDED_TO_MOVE ) { // Almost dying cell, it cannot move, only if enough food is dropped here it will survive cells[i].storage -= ENERGY_SPENT_TO_LIVE; } else { // Consume energy to move cells[i].storage -= ENERGY_SPENT_TO_MOVE; /* 4.3.2. Choose movement direction */ int prob = int_urand48( PRECISION, cells[i].random_seq ); if ( prob < cells[i].choose_mov[0] ) { // Turn left (90 degrees) int tmp = cells[i].mov_col; cells[i].mov_col = cells[i].mov_row; cells[i].mov_row = -tmp; } else if ( prob >= cells[i].choose_mov[0] + cells[i].choose_mov[1] ) { // Turn right (90 degrees) int tmp = cells[i].mov_row; cells[i].mov_row = cells[i].mov_col; cells[i].mov_col = -tmp; } // else do not change the direction /* 4.3.3. Update position moving in the choosen direction*/ cells[i].pos_row += cells[i].mov_row; cells[i].pos_col += cells[i].mov_col; // Periodic arena: Left/Rigth edges are connected, Top/Bottom edges are connected if ( cells[i].pos_row < 0 ) cells[i].pos_row += rows * PRECISION; if ( cells[i].pos_row >= rows * PRECISION) cells[i].pos_row -= rows * PRECISION; if ( cells[i].pos_col < 0 ) cells[i].pos_col += columns * PRECISION; if ( cells[i].pos_col >= columns * PRECISION) cells[i].pos_col -= columns * PRECISION; } /* 4.3.4. Annotate that there is one more cell in this culture position */ accessMat( culture_cells, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ) += 1; /* 4.3.5. Annotate the amount of food to be shared in this culture position */ food_to_share[i] = accessMat( culture, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ); } } // End cell movements time_end(time4_3); /* 4.4. Cell actions */ time_start(); // Space for the list of new cells (maximum number of new cells is num_cells) Cell *new_cells = (Cell *)malloc( sizeof(Cell) * num_cells ); #ifdef DEVELOPMENT if ( new_cells == NULL ) { fprintf(stderr,"-- Error allocating new cells structures for: %d cells\n", num_cells ); exit( EXIT_FAILURE ); } #endif // DEVELOPMENT for (i=0; i<num_cells; i++) { if ( cells[i].alive ) { /* 4.4.1. Food harvesting */ int food = food_to_share[i]; int count = accessMat( culture_cells, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ); int my_food = food / count; cells[i].storage += my_food; /* 4.4.2. Split cell if the conditions are met: Enough maturity and energy */ if ( cells[i].age > 30 && cells[i].storage > ENERGY_NEEDED_TO_SPLIT ) { // Split: Create new cell num_cells_alive ++; sim_stat.history_total_cells ++; step_new_cells ++; // New cell is a copy of parent cell new_cells[ step_new_cells-1 ] = cells[i]; // Split energy stored and update age in both cells cells[i].storage /= 2; new_cells[ step_new_cells-1 ].storage /= 2; cells[i].age = 1; new_cells[ step_new_cells-1 ].age = 1; // Random seed for the new cell, obtained using the parent random sequence new_cells[ step_new_cells-1 ].random_seq[0] = (unsigned short)glibc_nrand48( cells[i].random_seq ); new_cells[ step_new_cells-1 ].random_seq[1] = (unsigned short)glibc_nrand48( cells[i].random_seq ); new_cells[ step_new_cells-1 ].random_seq[2] = (unsigned short)glibc_nrand48( cells[i].random_seq ); // Both cells start in random directions cell_new_direction( &cells[i] ); cell_new_direction( &new_cells[ step_new_cells-1 ] ); // Mutations of the movement genes in both cells cell_mutation( &cells[i] ); cell_mutation( &new_cells[ step_new_cells-1 ] ); } } } // End cell actions time_end(time4_4); /* 4.5. Clean ancillary data structures */ time_start(); /* 4.5.1. Clean the food consumed by the cells in the culture data structure */ for (i=0; i<num_cells; i++) { if ( cells[i].alive ) { accessMat( culture, cells[i].pos_row / PRECISION, cells[i].pos_col / PRECISION ) = 0; } } /* 4.5.2. Free the ancillary data structure to store the food to be shared */ free( food_to_share ); time_end(time4_5); /* 4.6. Clean dead cells from the original list */ time_start(); // 4.6.1. Move alive cells to the left to substitute dead cells int free_position = 0; int alive_in_main_list = 0; for( i=0; i<num_cells; i++ ) { if ( cells[i].alive ) { alive_in_main_list ++; if ( free_position != i ) { cells[free_position] = cells[i]; } free_position ++; } } // 4.6.2. Reduce the storage space of the list to the current number of cells num_cells = alive_in_main_list; cells = (Cell *)realloc( cells, sizeof(Cell) * num_cells ); time_end(time4_6); /* 4.7. Join cell lists: Old and new cells list */ time_start(); if ( step_new_cells > 0 ) { cells = (Cell *)realloc( cells, sizeof(Cell) * ( num_cells + step_new_cells ) ); for (j=0; j<step_new_cells; j++) cells[ num_cells + j ] = new_cells[ j ]; num_cells += step_new_cells; } free( new_cells ); time_end(time4_7); /* 4.8. Decrease non-harvested food */ time_start(); current_max_food = 0; for( i=0; i<rows; i++ ) for( j=0; j<columns; j++ ) { accessMat( culture, i, j ) -= accessMat( culture, i, j ) / 20; if ( accessMat( culture, i, j ) > current_max_food ) current_max_food = accessMat( culture, i, j ); } time_end(time4_8); /* 4.9. Statistics */ time_start(); // Statistics: Max food if ( current_max_food > sim_stat.history_max_food ) sim_stat.history_max_food = current_max_food; // Statistics: Max new cells per step if ( step_new_cells > sim_stat.history_max_new_cells ) sim_stat.history_max_new_cells = step_new_cells; // Statistics: Accumulated dead and Max dead cells per step sim_stat.history_dead_cells += step_dead_cells; if ( step_dead_cells > sim_stat.history_max_dead_cells ) sim_stat.history_max_dead_cells = step_dead_cells; // Statistics: Max alive cells per step if ( num_cells_alive > sim_stat.history_max_alive_cells ) sim_stat.history_max_alive_cells = num_cells_alive; time_end(time4_9); #ifdef DEBUG /* 4.10. DEBUG: Print the current state of the simulation at the end of each iteration */ print_status( iter, rows, columns, culture, num_cells, cells, num_cells_alive, sim_stat ); #endif // DEBUG } print_times(); /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ // CUDA stop cudaDeviceSynchronize(); /* 5. Stop global time */ ttotal = cp_Wtime() - ttotal; #ifdef DEBUG printf("List of cells at the end of the simulation: %d\n\n", num_cells ); for( i=0; i<num_cells; i++ ) { printf("Cell %d, Alive: %d, Pos(%f,%f), Mov(%f,%f), Choose_mov(%f,%f,%f), Storage: %f, Age: %d\n", i, cells[i].alive, (float)cells[i].pos_row / PRECISION, (float)cells[i].pos_col / PRECISION, (float)cells[i].mov_row / PRECISION, (float)cells[i].mov_col / PRECISION, (float)cells[i].choose_mov[0] / PRECISION, (float)cells[i].choose_mov[1] / PRECISION, (float)cells[i].choose_mov[2] / PRECISION, (float)cells[i].storage / PRECISION, cells[i].age ); } #endif // DEBUG /* 6. Output for leaderboard */ printf("\n"); /* 6.1. Total computation time */ printf("Time: %lf\n", ttotal ); /* 6.2. Results: Number of iterations and other statistics */ printf("Result: %d, ", iter); printf("%d, %d, %d, %d, %d, %d, %d, %f\n", num_cells_alive, sim_stat.history_total_cells, sim_stat.history_dead_cells, sim_stat.history_max_alive_cells, sim_stat.history_max_new_cells, sim_stat.history_max_dead_cells, sim_stat.history_max_age, (float)sim_stat.history_max_food / PRECISION ); /* 7. Free resources */ free( culture ); free( culture_cells ); free( cells ); /* 8. End */ return 0; }
17899c732434854b1bb63cbf26cae7db56a7ea9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! Equal <T = float32, Device = CUDA> */ template <typename T> __global__ void _Equal( const int count, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? (T)1 : (T)0; } } template <> void Equal<float, CUDAContext>( const int count, const float* a, const float* b, float* y, CUDAContext* ctx) { _Equal<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, a, b, y); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
17899c732434854b1bb63cbf26cae7db56a7ea9e.cu
#ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! Equal <T = float32, Device = CUDA> */ template <typename T> __global__ void _Equal( const int count, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? (T)1 : (T)0; } } template <> void Equal<float, CUDAContext>( const int count, const float* a, const float* b, float* y, CUDAContext* ctx) { _Equal<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, a, b, y); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
fb36c6f5061893cbe71c9848147f7a1b27ae4fef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include"scrImagePgmPpmPackage.h" const float SCALE_RATIO = 0.5; //Step 1: Texture Memory texture<unsigned char, 2, hipReadModeElementType> text; //Kernel to calculate resized size __global__ void resized(unsigned char *imgData, int width, float scale_factor, hipTextureObject_t texObj) { const unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int tidY = blockIdx.y * blockDim.y + threadIdx.y; const unsigned idx = tidY * width + tidX; //Read texture mem to CUDA Kernel imgData[idx] = tex2D<unsigned char>(texObj,(float)(tidX*scale_factor),(float)(tidY*scale_factor)); } int main(int argc, char *argv[]) { int h = 0; int w = 0; int scaled_h = 0; int scaled_w = 0; char inputStr[1024] = {"aerosmith-double.pgm"}; char outputStr[1024] = {"aerosmith-double-scaled.pgm"}; float ratio = SCALE_RATIO; unsigned char *data; unsigned char *scaled_data; unsigned char *dscaled_data; //for GPU hipError_t returnValue; //channel description to link with texture hipArray* cu_array; hipChannelFormatKind kind = hipChannelFormatKindUnsigned; hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8,0,0,0,kind); get_PgmPpmParams(inputStr,&h,&w); //function to get width and height of image; data = (unsigned char*)malloc(h*w*sizeof(unsigned char)); printf("\n Reading image width height and width [%d][%d]", h, w); scr_read_pgm(inputStr,data,h,w); //load an image scaled_h = (int)(h *ratio); scaled_w = (int)(w *ratio); scaled_data = (unsigned char*) malloc(scaled_h*scaled_w*sizeof(unsigned char)); printf("\n scaled image width height and width [%d][%d]", scaled_h, scaled_w); //CUDA MALLOC returnValue = hipMallocArray(&cu_array,&channelDesc,w,h); returnValue = (hipError_t)(returnValue | hipMemcpyToArray(cu_array,0,0,data,h*w*sizeof(unsigned char),hipMemcpyHostToDevice)); if(returnValue != hipSuccess) printf("\n Got error while running CUDA API Array Copy"); //texture specify struct hipResourceDesc resDesc; memset(&resDesc,0,sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = cu_array; //object params of the texture struct hipTextureDesc texDesc; memset(&texDesc,0,sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; texDesc.normalizedCoords = 0; //texture object creation hipTextureObject_t texObj = 0; hipCreateTextureObject(&texObj,&resDesc,&texDesc,NULL); if(returnValue != hipSuccess) printf("\n Got error while running CUDA API Bind Texture"); hipMalloc(&dscaled_data, scaled_h*scaled_w*sizeof(unsigned char) ); dim3 dimBlock(32,32,1); dim3 dimGrid(scaled_w/dimBlock.x,scaled_h/dimBlock.y,1); printf("\n Launching grid with blocks [%d][%d] ", dimGrid.x,dimGrid.y); hipLaunchKernelGGL(( resized), dim3(dimGrid),dim3(dimBlock), 0, 0, dscaled_data,scaled_w,1/ratio,texObj); returnValue = (hipError_t)(returnValue | hipDeviceSynchronize()); returnValue = (hipError_t)(returnValue |hipMemcpy (scaled_data , dscaled_data, scaled_h*scaled_w*sizeof(unsigned char), hipMemcpyDeviceToHost )); if(returnValue != hipSuccess) printf("\n Got error while running CUDA API kernel"); //destroy texture object hipDestroyTextureObject(texObj); scr_write_pgm(outputStr,scaled_data,scaled_h,scaled_w,"####"); //storing image with detections if(data!=NULL) free(data); if(cu_array!=NULL) hipFreeArray(cu_array); if(scaled_data!=NULL) free(scaled_data); if(dscaled_data!=NULL) free(dscaled_data); return 0; }
fb36c6f5061893cbe71c9848147f7a1b27ae4fef.cu
#include<stdio.h> #include"scrImagePgmPpmPackage.h" const float SCALE_RATIO = 0.5; //Step 1: Texture Memory texture<unsigned char, 2, cudaReadModeElementType> text; //Kernel to calculate resized size __global__ void resized(unsigned char *imgData, int width, float scale_factor, cudaTextureObject_t texObj) { const unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int tidY = blockIdx.y * blockDim.y + threadIdx.y; const unsigned idx = tidY * width + tidX; //Read texture mem to CUDA Kernel imgData[idx] = tex2D<unsigned char>(texObj,(float)(tidX*scale_factor),(float)(tidY*scale_factor)); } int main(int argc, char *argv[]) { int h = 0; int w = 0; int scaled_h = 0; int scaled_w = 0; char inputStr[1024] = {"aerosmith-double.pgm"}; char outputStr[1024] = {"aerosmith-double-scaled.pgm"}; float ratio = SCALE_RATIO; unsigned char *data; unsigned char *scaled_data; unsigned char *dscaled_data; //for GPU cudaError_t returnValue; //channel description to link with texture cudaArray* cu_array; cudaChannelFormatKind kind = cudaChannelFormatKindUnsigned; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8,0,0,0,kind); get_PgmPpmParams(inputStr,&h,&w); //function to get width and height of image; data = (unsigned char*)malloc(h*w*sizeof(unsigned char)); printf("\n Reading image width height and width [%d][%d]", h, w); scr_read_pgm(inputStr,data,h,w); //load an image scaled_h = (int)(h *ratio); scaled_w = (int)(w *ratio); scaled_data = (unsigned char*) malloc(scaled_h*scaled_w*sizeof(unsigned char)); printf("\n scaled image width height and width [%d][%d]", scaled_h, scaled_w); //CUDA MALLOC returnValue = cudaMallocArray(&cu_array,&channelDesc,w,h); returnValue = (cudaError_t)(returnValue | cudaMemcpyToArray(cu_array,0,0,data,h*w*sizeof(unsigned char),cudaMemcpyHostToDevice)); if(returnValue != cudaSuccess) printf("\n Got error while running CUDA API Array Copy"); //texture specify struct cudaResourceDesc resDesc; memset(&resDesc,0,sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cu_array; //object params of the texture struct cudaTextureDesc texDesc; memset(&texDesc,0,sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; //texture object creation cudaTextureObject_t texObj = 0; cudaCreateTextureObject(&texObj,&resDesc,&texDesc,NULL); if(returnValue != cudaSuccess) printf("\n Got error while running CUDA API Bind Texture"); cudaMalloc(&dscaled_data, scaled_h*scaled_w*sizeof(unsigned char) ); dim3 dimBlock(32,32,1); dim3 dimGrid(scaled_w/dimBlock.x,scaled_h/dimBlock.y,1); printf("\n Launching grid with blocks [%d][%d] ", dimGrid.x,dimGrid.y); resized<<<dimGrid,dimBlock>>>(dscaled_data,scaled_w,1/ratio,texObj); returnValue = (cudaError_t)(returnValue | cudaThreadSynchronize()); returnValue = (cudaError_t)(returnValue |cudaMemcpy (scaled_data , dscaled_data, scaled_h*scaled_w*sizeof(unsigned char), cudaMemcpyDeviceToHost )); if(returnValue != cudaSuccess) printf("\n Got error while running CUDA API kernel"); //destroy texture object cudaDestroyTextureObject(texObj); scr_write_pgm(outputStr,scaled_data,scaled_h,scaled_w,"####"); //storing image with detections if(data!=NULL) free(data); if(cu_array!=NULL) cudaFreeArray(cu_array); if(scaled_data!=NULL) free(scaled_data); if(dscaled_data!=NULL) free(dscaled_data); return 0; }
e70e6e1514d7a9f5fe3d6c13464e77d792a5019b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/pair.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> template<typename Iterator1, typename Iterator2, typename Iterator3> __global__ void stable_sort_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; thrust::stable_sort_by_key(thrust::seq, keys_first, keys_last, values_first); #else *is_supported = false; #endif } struct make_pair_functor { template<typename T1, typename T2> __host__ __device__ thrust::pair<T1,T2> operator()(const T1 &x, const T2 &y) { return thrust::make_pair(x,y); } // end operator()() }; // end make_pair_functor template <typename T> struct TestPairStableSortByKeyDeviceSeq { void operator()(const size_t n) { typedef thrust::pair<T,T> P; // host arrays thrust::host_vector<T> h_p1 = unittest::random_integers<T>(n); thrust::host_vector<T> h_p2 = unittest::random_integers<T>(n); thrust::host_vector<P> h_pairs(n); thrust::host_vector<int> h_values(n); thrust::sequence(h_values.begin(), h_values.end()); // zip up pairs on the host thrust::transform(h_p1.begin(), h_p1.end(), h_p2.begin(), h_pairs.begin(), make_pair_functor()); // device arrays thrust::device_vector<P> d_pairs = h_pairs; thrust::device_vector<int> d_values = h_values; thrust::device_vector<bool> is_supported(1); // sort on the device hipLaunchKernelGGL(( stable_sort_by_key_kernel), dim3(1),dim3(1), 0, 0, d_pairs.begin(), d_pairs.end(), d_values.begin(), is_supported.begin()); if(is_supported[0]) { // sort on the host thrust::stable_sort_by_key(h_pairs.begin(), h_pairs.end(), h_values.begin()); ASSERT_EQUAL_QUIET(h_pairs, d_pairs); ASSERT_EQUAL(h_values, d_values); } } }; VariableUnitTest<TestPairStableSortByKeyDeviceSeq, unittest::type_list<unittest::int8_t,unittest::int16_t,unittest::int32_t> > TestPairStableSortByKeyDeviceSeqInstance;
e70e6e1514d7a9f5fe3d6c13464e77d792a5019b.cu
#include <unittest/unittest.h> #include <thrust/pair.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> template<typename Iterator1, typename Iterator2, typename Iterator3> __global__ void stable_sort_by_key_kernel(Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 is_supported) { #if (__CUDA_ARCH__ >= 200) *is_supported = true; thrust::stable_sort_by_key(thrust::seq, keys_first, keys_last, values_first); #else *is_supported = false; #endif } struct make_pair_functor { template<typename T1, typename T2> __host__ __device__ thrust::pair<T1,T2> operator()(const T1 &x, const T2 &y) { return thrust::make_pair(x,y); } // end operator()() }; // end make_pair_functor template <typename T> struct TestPairStableSortByKeyDeviceSeq { void operator()(const size_t n) { typedef thrust::pair<T,T> P; // host arrays thrust::host_vector<T> h_p1 = unittest::random_integers<T>(n); thrust::host_vector<T> h_p2 = unittest::random_integers<T>(n); thrust::host_vector<P> h_pairs(n); thrust::host_vector<int> h_values(n); thrust::sequence(h_values.begin(), h_values.end()); // zip up pairs on the host thrust::transform(h_p1.begin(), h_p1.end(), h_p2.begin(), h_pairs.begin(), make_pair_functor()); // device arrays thrust::device_vector<P> d_pairs = h_pairs; thrust::device_vector<int> d_values = h_values; thrust::device_vector<bool> is_supported(1); // sort on the device stable_sort_by_key_kernel<<<1,1>>>(d_pairs.begin(), d_pairs.end(), d_values.begin(), is_supported.begin()); if(is_supported[0]) { // sort on the host thrust::stable_sort_by_key(h_pairs.begin(), h_pairs.end(), h_values.begin()); ASSERT_EQUAL_QUIET(h_pairs, d_pairs); ASSERT_EQUAL(h_values, d_values); } } }; VariableUnitTest<TestPairStableSortByKeyDeviceSeq, unittest::type_list<unittest::int8_t,unittest::int16_t,unittest::int32_t> > TestPairStableSortByKeyDeviceSeqInstance;
d25e5e106779392c774653a1dcacf728b2400066.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" #include "utils/cast.h" namespace dragon { namespace kernel { /******************** activation.dropout ********************/ template<typename T> __global__ void _Dropout( const int count, const uint32_t thresh, const T scale, const T* x, const uint32_t* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] * (mask[idx] > thresh) * scale; } } template<> void Dropout<float, CUDAContext>( const int count, float prob, float scale, const float* x, uint32_t* mask, float* y, CUDAContext* ctx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); math::RandomUniform<uint32_t, CUDAContext>( count, float(0), float(UINT_MAX), mask, ctx); _Dropout<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, thresh, scale, x, mask, y); } template <typename T> __global__ void _DropoutGrad( const int count, const uint32_t thresh, const T scale, const T* dy, const uint32_t* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * (mask[idx] > thresh) * scale; } } template<> void DropoutGrad<float, CUDAContext>( const int count, float prob, float scale, const float* dy, const uint32_t* mask, float* dx, CUDAContext* ctx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); _DropoutGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, thresh, scale, dy, mask, dx); } /******************** activation.prelu ********************/ template <typename T> __global__ void _PRelu( const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[0]; } } template <typename T> __global__ void _PReluNCHW( const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template <typename T> __global__ void _PReluNHWC( const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template<> void PRelu<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* x, const float* w, float* y, CUDAContext* ctx) { if (channel_shared) { _PRelu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, x, w, y); } else { if (data_format == "NCHW") { _PReluNCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, x, w, y); } else if (data_format == "NHWC") { _PReluNHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, x, w, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } } template <typename T> __global__ void _PReluGrad( const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ( (x[idx] > 0) + (x[idx] <= 0) * w[0] ); } } template <typename T> __global__ void _PReluGradNCHW( const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; dx[idx] = dy[idx] * ( (x[idx] > 0) + (x[idx] <= 0) * w[c] ); } } template <typename T> __global__ void _PReluGradNHWC( const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template<> void PReluGrad<float, CUDAContext>( const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* w, float* dx, CUDAContext* ctx) { if (channel_shared) { _PReluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, dy, x, w, dx); } else { if (data_format == "NCHW") { _PReluGradNCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, dy, x, w, dx); } else if (data_format == "NHWC") { _PReluGradNHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, dy, x, w, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } } template <typename T> __global__ void _PReluWGradBcast( const int count, const int rows, const int row_offset, const T* dy, const T* x, T* bcast_dw) { CUDA_1D_KERNEL_LOOP(idx, count) { bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0); for (int n = 1; n < rows; n++) { const int cur_idx = idx + n * row_offset; bcast_dw[idx] += dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0); } } } template<> void PReluWGrad<float, CUDAContext>( const int rows, const int row_offset, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* multiplier, float* bcast_dw, float* dw, CUDAContext* ctx) { const int cdim = channels * dim; _PReluWGradBcast<float> << < CUDA_BLOCKS(cdim), CUDA_THREADS, 0, ctx->cuda_stream() >> >( cdim, rows, row_offset, dy, x, bcast_dw); if (channel_shared) { float w_sum; math::Dot<float, CUDAContext>(channels * dim, bcast_dw, multiplier, &w_sum, ctx); math::AddScalar<float, CUDAContext>(1, w_sum, dw, ctx); } else { if (data_format == "NCHW") { math::Gemv<float, CUDAContext>( CblasNoTrans, channels, dim, 1.0, bcast_dw, multiplier, 1.0, dw, ctx); } else if (data_format == "NHWC") { math::Gemv<float, CUDAContext>( CblasTrans, dim, channels, 1.0, bcast_dw, multiplier, 1.0, dw, ctx); } else LOG(FATAL) << "Unknown data format: " << data_format; } } /******************** activation.elu ********************/ template <typename T> __global__ void _Elu( const int count, const T* x, const float alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : alpha * (exp(x[idx]) - 1); } } template<> void Elu<float, CUDAContext>( const int count, const float alpha, const float* x, float* y, CUDAContext* ctx) { _Elu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, alpha, y); } template <typename T> __global__ void _EluGrad( const int count, const float alpha, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ( (y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0) ); } } template<> void EluGrad<float, CUDAContext>( const int count, const float alpha, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _EluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, alpha, dy, y, dx); } /******************** activation.relu ********************/ template <typename T> __global__ void _Relu( const int count, const float slope, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope; } } template<> void Relu<float, CUDAContext>( const int count, const float slope, const float* x, float* y, CUDAContext* ctx) { _Relu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, slope, x, y); } template <typename T> __global__ void _ReluGrad( const int count, const float slope, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ( (y[idx] > 0) + slope * (y[idx] <= 0) ); } } template<> void ReluGrad<float, CUDAContext>( const int count, const float slope, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _ReluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, slope, dy, y, dx); } /******************** activation.selu ********************/ template <typename T> __global__ void _SElu( const int count, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? 1.0507 * x[idx] : 1.7581 * (exp(x[idx]) - 1); } } template<> void SElu<float, CUDAContext>( const int count, const float* x, float* y, CUDAContext* ctx) { _SElu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, y); } template <typename T> __global__ void _SEluGrad( const int count, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] : (1.7581 + y[idx]) * dy[idx]; } } template<> void SEluGrad<float, CUDAContext>( const int count, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _SEluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, y, dx); } /******************** activation.sigmoid ********************/ template <typename T> __device__ T _SigmoidUnit(const T x) { return T(1) / (T(1) + exp(-x)); } template <typename T> __global__ void _Sigmoid( const int n, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = _SigmoidUnit<T>(x[idx]); } } template<> void Sigmoid<float, CUDAContext>( const int count, const float* x, float* y, CUDAContext* ctx) { _Sigmoid<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, y); } template <typename T> __global__ void _SigmoidGrad( const int count, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * y[idx] * (1 - y[idx]); } } template<> void SigmoidGrad<float, CUDAContext>( const int count, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _SigmoidGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, y, dx); } /******************** activation.softmax ********************/ template <typename T> __global__ void _SoftmaxMaxClass( const int outer_dim, const int classes, const int inner_dim, const T* x, T* scale) { CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T max_val = -FLT_MAX; for (int c = 0; c < classes; c++) max_val = max( x[(o_idx * classes + c) * inner_dim + i_idx], max_val ); scale[idx] = max_val; } } template <typename T> __global__ void _SoftmaxSubtract( const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] -= scale[o_idx * inner_dim + i_idx]; } } template <typename T> __global__ void _SoftmaxExp( const int count, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = exp(y[idx]); } } template <typename T> __global__ void _SoftmaxSumClass( const int outer_dim, const int classes, const int inner_dim, const T* y, T* scale) { CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T sum = 0; for (int c = 0; c < classes; c++) sum += y[(o_idx * classes + c) * inner_dim + i_idx]; scale[idx] = sum; } } template <typename T> __global__ void _SoftmaxDiv( const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] /= scale[o_idx * inner_dim + i_idx]; } } template<> void Softmax<float, CUDAContext>( const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* x, float* scale, float* y, CUDAContext* ctx) { const int num_preds = inner_dim * outer_dim; _SoftmaxMaxClass<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( outer_dim, classes, inner_dim, x, scale); _SoftmaxSubtract<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, classes, inner_dim, scale, y); _SoftmaxExp<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, y); _SoftmaxSumClass<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( outer_dim, classes, inner_dim, y, scale); _SoftmaxDiv<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, classes, inner_dim, scale, y); } template <typename T> __global__ void _SoftmaxDot( const int outer_dim, const int classes, const int inner_dim, const T* dy, const T* y, T* scale) { CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T dot = 0; for (int c = 0; c < classes; c++) dot += ( y[(o_idx * classes + c) * inner_dim + i_idx] * dy[(o_idx * classes + c) * inner_dim + i_idx] ); scale[idx] = dot; } } template<> void SoftmaxGrad<float, CUDAContext>( const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* dy, const float* y, float* scale, float* dx, CUDAContext* ctx) { const int num_preds = inner_dim * outer_dim; _SoftmaxDot<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( outer_dim, classes, inner_dim, dy, y, scale); _SoftmaxSubtract<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, classes,inner_dim, scale, dx); math::Mul<float, CUDAContext>(count, dx, y, dx, ctx); } /******************** activation.tanh ********************/ template <typename T> __global__ void _Tanh( const int count, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, count) { y[i] = tanh(x[i]); } } template<> void Tanh<float, CUDAContext>( const int count, const float* x, float* y, CUDAContext* ctx) { _Tanh<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, y); } template <typename T> __global__ void _TanhGrad( const int count, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, count) { dx[i] = dy[i] * (1 - y[i] * y[i]); } } template<> void TanhGrad<float, CUDAContext>( const int count, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _TanhGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, y, dx); } /******************** arithmetic.scale ********************/ template <typename T> __global__ void _AffineWithOBias( const int count, const int scale_dim, const int inner_dim, const T* x, const T* alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = alpha[scale_idx] * x[idx]; } } template <typename T> __global__ void _AffineWithBias( const int count, const int scale_dim, const int inner_dim, const T* x, const T* alpha, const T* beta, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = alpha[scale_idx] * x[idx] + beta[scale_idx]; } } template<> void Affine<float, CUDAContext>( const int count, const int outer_dim, const int scale_dim, const int inner_dim, const float* x, const float* alpha, const float* beta, const float* beta_multiplier, float* y, CUDAContext* ctx) { if (beta != nullptr) { _AffineWithBias<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, scale_dim, inner_dim, x, alpha, beta, y); } else { _AffineWithOBias<float> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, scale_dim, inner_dim, x, alpha, y); } } template <> void AffineGrad<float, CUDAContext>( const int count, const int outer_dim, const int scale_dim, const int inner_dim, const float* dy, const float* alpha, float* dx, CUDAContext* ctx) { _AffineWithOBias<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, scale_dim, inner_dim, dy, alpha, dx); } /******************** arithmetic.clip ********************/ template <typename T> __global__ void _Clip( const int count, const T low, const T high, const T* x, T* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { mask[idx] = 1.0; if (x[idx] > high || x[idx] < low) mask[idx] = 0.0; y[idx] = x[idx] > high ? high : x[idx]; y[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>( const int count, const float low, const float high, const float* x, float* mask, float* y, CUDAContext* ctx) { _Clip<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, low, high, x, mask, y); } /******************** control_flow.compare ********************/ template <typename T> __global__ void _Equal( const int count, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0; } } template <> void Equal<float, CUDAContext>( const int count, const float* a, const float* b, float* y, CUDAContext* ctx) { _Equal<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, a, b, y); } /******************** loss.l1_loss ********************/ template <typename T> __global__ void _AbsGrad( const int count, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const T val = dy[idx]; // val > 0: 1 | val == 0: 0 | val < 0: -1 dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void AbsGrad<float, CUDAContext>( const int count, const float* dy, float* dx, CUDAContext* ctx) { _AbsGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, dx); } /******************** loss.sigmoid_cross_entropy ********************/ template <typename T> __global__ void _SigmoidCrossEntropy( const int count, const T* logits, const T* targets, T* losses, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { losses[idx] = flags[idx] = 0; } else { losses[idx] = log(1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)) ) + logits[idx] * ((logits[idx] >= 0) - targets[idx]); flags[idx] = 1; } } } template <> void SigmoidCrossEntropy<float, CUDAContext>( const int count, const float* logits, const float* targets, float* losses, float* flags, CUDAContext* ctx) { _SigmoidCrossEntropy<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, logits, targets, losses, flags); } template <typename T> __global__ void _SigmoidCrossEntropyGrad( const int count, const T* logits, const T* targets, T* dlogits, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { dlogits[idx] = flags[idx] = 0; } else { dlogits[idx] = 1 / (1 + exp(-logits[idx])) - targets[idx]; flags[idx] = 1; } } } template <> void SigmoidCrossEntropyGrad<float, CUDAContext>( const int count, const float* logits, const float* targets, float* dlogits, float* flags, CUDAContext* ctx) { _SigmoidCrossEntropyGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, logits, targets, dlogits, flags); } /******************** loss.sigmoid_focal_loss ********************/ template <typename T> __global__ void _SigmoidFocalLoss( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* logits, const T* targets, T* losses, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int iix = idx % inner_dim; const int aix = (idx / inner_dim) % axis_dim; const int oix = idx / inner_dim / axis_dim; const int t = targets[oix * inner_dim + iix]; // ``0`` is reserved for targets if neg id is zero // use ``aix + 1`` to match the targets T c1 = (t == (aix + (neg_id ? 0 : 1))); T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1))); T p = 1 / (1 + exp(-logits[idx])); // logit -> prob // (1 - p)^{gamma} * log(p) T pos_term = pow(1 - p, gamma) * log(max(p, FLT_MIN)); // p^{gamma} * log(1 - p) T neg_term = pow(p, gamma) * ( -logits[idx] * (logits[idx] >= 0) - log( 1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0))) ); losses[idx] = 0.0; losses[idx] += -c1 * pos_term * pos_alpha; losses[idx] += -c2 * neg_term * neg_alpha; flags[idx] = c1; } } template <> void SigmoidFocalLoss<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* logits, const float* targets, float* losses, float* flags, CUDAContext* ctx) { TIndex count = outer_dim * axis_dim * inner_dim; _SigmoidFocalLoss<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, logits, targets, losses, flags); } template <typename T> __global__ void _SigmoidFocalLossGradient( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* logits, const T* targets, T* dlogits, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int iix = idx % inner_dim; const int aix = (idx / inner_dim) % axis_dim; const int oix = idx / inner_dim / axis_dim; const int t = targets[oix * inner_dim + iix]; // ``0`` is reserved for targets if neg id is zero // use ``aix + 1`` to match the targets T c1 = (t == (aix + (neg_id ? 0 : 1))); T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1))); T p = 1 / (1 + exp(-logits[idx])); // logit -> prob // (1 - p)^{gamma} * (1 - p - gamma * p * log(p)) T pos_term = pow((1 - p), gamma) * ( 1 - p - p * gamma * log(max(p, FLT_MIN)) ); // p^{gamma} * (gamma * (1 - p) * log(1-p) - p) T neg_term = pow(p, gamma) * ( (-logits[idx] * (logits[idx] >= 0) - log( 1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0))) ) * (1 - p) * gamma - p ); dlogits[idx] = 0.0; dlogits[idx] += -c1 * pos_term * pos_alpha; dlogits[idx] += -c2 * neg_term * neg_alpha; flags[idx] = c1; } } template <> void SigmoidFocalLossGradient<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* logits, const float* targets, float* dlogits, float* flags, CUDAContext* ctx) { TIndex count = outer_dim * axis_dim * inner_dim; _SigmoidFocalLossGradient<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, logits, targets, dlogits, flags); } /******************** loss.smooth_l1_loss ********************/ template <typename T> __global__ void _SmoothL1( const int count, const float beta, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const T val = x[idx]; const T abs_val = abs(val); if (abs_val < beta) y[idx] = 0.5 * val * val / beta; else y[idx] = abs_val - 0.5 * beta; } } template<> void SmoothL1<float, CUDAContext>( const int count, const float beta, const float* x, float* y, CUDAContext* ctx) { _SmoothL1<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, beta, x, y); } template <typename T> __global__ void _SmoothL1Grad( const int count, const float beta, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const T val = dy[idx]; const T abs_val = abs(val); if (abs_val < beta) dx[idx] = val / beta; // val > 0: 1 | val == 0: 0 | val < 0: -1 else dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void SmoothL1Grad<float, CUDAContext>( const int count, const float beta, const float* dy, float* dx, CUDAContext* ctx) { _SmoothL1Grad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, beta, dy, dx); } /******************** loss.softmax_cross_entropy ********************/ template <typename T> __global__ void _SoftmaxCrossEntropy( const int count, const T* prob, const T* target, T* loss) { CUDA_1D_KERNEL_LOOP(idx, count) { loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN)); } } template <> void SoftmaxCrossEntropy<float, CUDAContext>( const int count, const float* prob, const float* target, float* loss, CUDAContext* ctx) { _SoftmaxCrossEntropy<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, prob, target, loss); } /******************** loss.softmax_focal_loss ********************/ template <typename T> __global__ void _SoftmaxFocalLoss( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* prob, const T* labels, const int* ignores, const int num_ignores, T* losses, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) { if (label == ignores[k]) { losses[idx] = flags[idx] = 0; break; } } if (k == num_ignores) { const int t = (oix * axis_dim + label) * inner_dim + iix; T scale = pow(1.f - prob[t], gamma); scale = label > neg_id ? pos_alpha * scale : neg_alpha * scale; losses[idx] = -scale * log(max(prob[t], FLT_MIN)); flags[idx] = label > neg_id ? 1 : 0; } } } template <> void SoftmaxFocalLoss<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* losses, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SoftmaxFocalLoss<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, prob, labels, ignores, num_ignores, losses, flags); } template <typename T> __global__ void _SoftmaxFocalLossGrad( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* prob, const T* labels, const int* ignores, const int num_ignores, T* dx, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) if (label == ignores[k]) break; if (k != num_ignores) { for (int c = 0; c < axis_dim; c++) dx[(oix * axis_dim + c) * inner_dim + iix] = 0; flags[idx] = 0; } else { const int t = (oix * axis_dim + label) * inner_dim + iix; T onemp = 1. - prob[t]; // unstable if gamma is 0 T grad = -gamma * pow(onemp, gamma - 1) * log(max(prob[t], FLT_MIN)) * prob[t] + pow(onemp, gamma); grad = label > neg_id ? pos_alpha * grad : neg_alpha * grad; for (int c = 0; c < axis_dim; c++) { const int i = (oix * axis_dim + c) * inner_dim + iix; if (c == label) { dx[i] = grad * (prob[t] - 1); } else { dx[i] = grad * prob[i]; } } flags[idx] = label > neg_id ? 1 : 0; } } } template<> void SoftmaxFocalLossGrad<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* dx, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SoftmaxFocalLossGrad<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, prob, labels, ignores, num_ignores, dx, flags); } /******************** loss.sparse_softmax_cross_entropy ********************/ template <typename Tx, typename Ty> __global__ void _SparseSoftmaxCrossEntropy( const int count, const int axis_dim, const int inner_dim, const Tx* prob, const Ty* labels, const int* ignores, const int num_ignores, Tx* losses, Tx* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) { if (label == ignores[k]) { losses[idx] = flags[idx] = 0; break; } } if (k == num_ignores) { losses[idx] = -log( max(prob[(oix * axis_dim + label) * inner_dim + iix], FLT_MIN) ); flags[idx] = 1; } } } template <> void SparseSoftmaxCrossEntropy<float, float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* losses, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float, float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, losses, flags); } template <> void SparseSoftmaxCrossEntropy<float, int64_t, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const int64_t* labels, const int* ignores, const int num_ignores, float* losses, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float, int64_t> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, losses, flags); } template <typename Tx, typename Ty> __global__ void _SparseSoftmaxCrossEntropyGrad( const int count, const int axis_dim, const int inner_dim, const Tx* prob, const Ty* labels, const int* ignores, const int num_ignores, Tx* dx, Tx* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) if (label == ignores[k]) break; if (k != num_ignores) { for (int c = 0; c < axis_dim; c++) dx[(oix * axis_dim + c) * inner_dim + iix] = 0; flags[idx] = 0; } else { dx[(oix * axis_dim + label) * inner_dim + iix] -= 1; flags[idx] = 1; } } } template<> void SparseSoftmaxCrossEntropyGrad<float, float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* dx, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float, float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, dx, flags); } template<> void SparseSoftmaxCrossEntropyGrad<float, int64_t, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const int64_t* labels, const int* ignores, const int num_ignores, float* dx, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float, int64_t> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, dx, flags); } /******************** misc.astype ********************/ template <typename Ta, typename Tb> __global__ void _TypeA2B( const int count, const Ta* a, Tb* b) { CUDA_1D_KERNEL_LOOP(idx, count) { b[idx] = a[idx]; } } #define DEFINE_TYPE_A2B(type_a, type_b) \ template <> void TypeA2B<type_a, type_b, CUDAContext>( \ const int count, \ const type_a* a, \ type_b* b, \ CUDAContext* ctx) { \ _TypeA2B<type_a, type_b> \ << < CUDA_BLOCKS(count), CUDA_THREADS, \ 0, ctx->cuda_stream() >> >(count, a, b); \ } #define DEFINE_TYPE_A2ALL(type_a) \ DEFINE_TYPE_A2B(type_a, float); \ DEFINE_TYPE_A2B(type_a, double); \ DEFINE_TYPE_A2B(type_a, int); \ DEFINE_TYPE_A2B(type_a, int64_t); \ DEFINE_TYPE_A2B(type_a, uint8_t); DEFINE_TYPE_A2ALL(float); DEFINE_TYPE_A2ALL(double); DEFINE_TYPE_A2ALL(int); DEFINE_TYPE_A2ALL(int64_t); DEFINE_TYPE_A2ALL(uint8_t); /******************** misc.image_data ********************/ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <> void ImageData<float, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <> void ImageData<uint8_t, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** ndarray.arange ********************/ template <typename T> __global__ void _Arange( const int count, const int start, const int step, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = start + idx * step; } } template<> void Arange<float, CUDAContext>( const int count, const int start, const int step, float* y, CUDAContext* ctx) { _Arange<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, start, step, y); } template<> void Arange<int, CUDAContext>( const int count, const int start, const int step, int* y, CUDAContext* ctx) { _Arange<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, start, step, y); } /******************** ndarray.argreduce ********************/ template <typename T> __global__ void _Argmax( const int count, const int axis_dim, const int inner_dim, const T neg_bound, const T* x, int64_t* indices) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int max_idx = -1; T max_val = neg_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val > max_val) { max_val = val; max_idx = j; } } indices[idx] = max_idx; } } template <typename T> __global__ void _Argmax_v2( const int count, const int axis_dim, const int inner_dim, const T neg_bound, const T* x, int64_t* indices, T* values) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int max_idx = -1; T max_val = neg_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val > max_val) { max_val = val; max_idx = j; } } indices[idx] = max_idx; values[idx] = max_val; } } template<> void Argmax<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, int64_t* indices, float* values, CUDAContext* ctx) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; if (values == nullptr) { _Argmax<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, -FLT_MAX, x, indices); } else { _Argmax_v2<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, -FLT_MAX, x, indices, values); } } template <typename T> __global__ void _Argmin( const int count, const int axis_dim, const int inner_dim, const T pos_bound, const T* x, int64_t* indices) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int min_idx = -1; T min_val = pos_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val < min_val) { min_val = val; min_idx = j; } } indices[idx] = min_idx; } } template <typename T> __global__ void _Argmin_v2( const int count, const int axis_dim, const int inner_dim, const T pos_bound, const T* x, int64_t* indices, T* values) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int min_idx = -1; T min_val = pos_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val < min_val) { min_val = val; min_idx = j; } } indices[idx] = min_idx; values[idx] = min_val; } } template<> void Argmin<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, int64_t* indices, float* values, CUDAContext* ctx) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; if (values == nullptr) { _Argmin<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, FLT_MAX, x, indices); } else { _Argmin_v2<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, FLT_MAX, x, indices, values); } } /******************** ndarray.gather ********************/ template <typename T> __global__ void _CanonicalAxis( const int count, const int dim, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<int, CUDAContext>( const int count, const int dim, int* y, CUDAContext* ctx) { _CanonicalAxis<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, y); } template <typename T> __global__ void _Gather( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Gather<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* x, float* y, CUDAContext* ctx) { _Gather<float> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } template <> void Gather<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* x, int* y, CUDAContext* ctx) { _Gather<int> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } template <typename T> __global__ void _GatherGrad( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } template <> void GatherGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* dy, float* dx, CUDAContext* ctx) { _GatherGrad<float> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } template <> void GatherGrad<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* dy, int* dx, CUDAContext* ctx) { _GatherGrad<int> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } /******************** ndarray.concat ********************/ template <typename T> __global__ void _Concat( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; y[y_idx] = x[idx]; } } template <> void Concat<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* x, float* y, CUDAContext* ctx) { _Concat<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, x, y); } template <typename T> __global__ void _ConcatGrad( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; dx[idx] = dy[y_idx]; } } template <> void ConcatGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* dy, float* dx, CUDAContext* ctx) { _ConcatGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, dy, dx); } /******************** ndarray.crop ********************/ template<typename T> __global__ void _Crop1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; y[idx] = x[(o * dim + ex_d + start) * inner_dim + i]; } } template<> void Crop1D<int, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int* x, int* y, CUDAContext* ctx) { _Crop1D<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, x, y); } template<> void Crop1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const float* x, float* y, CUDAContext* ctx) { _Crop1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, x, y); } template<typename T> __global__ void _Crop1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int d = (idx / inner_dim) % dim; const int o = idx / inner_dim / dim; dx[idx] = (d < start || d >= end) ? 0 : dy[(o * ex_dim + d - start) * inner_dim + i]; } } template<> void Crop1DGrad<int, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const int* dy, int* dx, CUDAContext* ctx) { _Crop1DGrad<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); } template<> void Crop1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const float* dy, float* dx, CUDAContext* ctx) { _Crop1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); } /******************** ndarray.pad ********************/ template <typename T> __global__ void _ConstPad1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T value, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = ex_d - pad_l; y[idx] = (d < 0 || d >= dim) ? value : x[(o * dim + d) * inner_dim + i]; } } template <> void ConstPad1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float value, const float* x, float* y, CUDAContext* ctx) { _ConstPad1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, value, x, y); } template <typename T> __global__ void _ReflectPad1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void ReflectPad1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* ctx) { _ReflectPad1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _EdgePad1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void EdgePad1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* ctx) { _EdgePad1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _ConstPad1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % dim + pad_l; const int o = idx / inner_dim / dim; dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i]; } } template <> void ConstPad1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* ctx) { _ConstPad1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _ReflectPad1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void ReflectPad1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* ctx) { _ReflectPad1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _EdgePad1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void EdgePad1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* ctx) { _EdgePad1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } /******************** ndarray.one_hot ********************/ template <typename T> __global__ void _OneHot( const int count, const int depth, const int on_value, const float* x, float* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int val = x[idx]; y[idx * depth + val] = on_value; } } template <> void OneHot<float, CUDAContext>( const int count, const int depth, const int on_value, const float* x, float* y, CUDAContext* ctx) { _OneHot<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, depth, on_value, x, y); } /******************** ndarray.reduce ********************/ template <typename T> __global__ void _Sum( const int count, const int axis_dim, const int inner_dim, const T* x, float* y) { CUDA_1D_KERNEL_LOOP(idx, count) { T sum_val = 0.0; const int offset = (idx / inner_dim * axis_dim) * inner_dim + idx % inner_dim; for (int j = 0; j < axis_dim; j++) sum_val += x[offset + j * inner_dim]; y[idx] = sum_val; } } template<> void Sum<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const float* x, float* y, CUDAContext* ctx) { _Sum<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, x, y); } template <typename T> __global__ void _SumGrad( const int count, const int axis_dim, const int inner_dim, const T coeff, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int offset = (idx / inner_dim * axis_dim) * inner_dim + idx % inner_dim; for (int j = 0; j < axis_dim; j++) dx[offset + j * inner_dim] = dy[idx] * coeff; } } template<> void SumGrad<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const float coeff, const float* dy, float* dx, CUDAContext* ctx) { _SumGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, coeff, dy, dx); } /******************** ndarray.repeat ********************/ template <typename T> __global__ void _Repeat( const int count, const int inner_dim, const int repeats, const int dim, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim / repeats) % dim; const int n = idx / inner_dim / repeats / dim; const int x_idx = (n * dim + b) * inner_dim + d; y[idx] = x[x_idx]; } } template <> void Repeat<float, CUDAContext>( const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* x, float* y, CUDAContext* ctx) { _Repeat<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, inner_dim, repeats, dim, x, y); } template <typename T> __global__ void _RepeatGrad( const int count, const int inner_dim, const int repeats, const int dim, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim) % dim; const int n = idx / inner_dim / dim; T gradient = 0; for (int t = 0; t < repeats; t++) gradient += dy[ (((n * dim + b) * repeats) + t) * inner_dim + d]; dx[idx] = gradient; } } template <> void RepeatGrad<float, CUDAContext>( const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* dy, float* dx, CUDAContext* ctx) { _RepeatGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, inner_dim, repeats, dim, dy, dx); } /******************** ndarray.slice ********************/ template <typename T> __global__ void _Slice( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Slice<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* x, float* y, CUDAContext* ctx) { _Slice<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, x, y); } template <typename T> __global__ void _SliceGrad( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; dx[x_idx] = dy[idx]; } } template <> void SliceGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* dy, float* dx, CUDAContext* ctx) { _SliceGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, dy, dx); } /******************** ndarray.tile ********************/ template <typename T> __global__ void _Tile( const int count, const int ex_inner_dim, const int multiple, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim / multiple; const int x_idx = n * ex_inner_dim + d; y[idx] = x[x_idx]; } } template <> void Tile<float, CUDAContext>( const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* x, float* y, CUDAContext* ctx) { _Tile<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, ex_inner_dim, multiple, x, y); } template <typename T> __global__ void _TileGrad( const int count, const int ex_inner_dim, const int multiple, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { T gradient = 0; const int offset = (idx / ex_inner_dim * multiple) * ex_inner_dim + idx % ex_inner_dim; for (int t = 0; t < multiple; t++) gradient += dy[offset + t * ex_inner_dim]; dx[idx] = gradient; } } template <> void TileGrad<float, CUDAContext>( const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* dy, float* dx, CUDAContext* ctx) { _TileGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, ex_inner_dim, multiple, dy, dx); } /******************** ndarray.transpose ********************/ template <typename T> __global__ void _Transpose( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } y[idx] = x[x_idx]; } } template <> void Transpose<float, CUDAContext>( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* x, float* y, CUDAContext* ctx) { _Transpose<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, ndim, order, old_steps, new_steps, x, y); } template <typename T> __global__ void _TransposeGrad( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } dx[x_idx] = dy[idx]; } } template <> void TransposeGrad<float, CUDAContext>( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* dy, float* dx, CUDAContext* ctx) { _TransposeGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, ndim, order, old_steps, new_steps, dy, dx); } /******************** recurrent.lstm_cell ********************/ template <typename T> __global__ void _LSTMCellAct( const int count, const int c_offset, const int x_offset, T* xact) { CUDA_1D_KERNEL_LOOP(idx, count) { const int offset = idx % x_offset; xact[idx] = offset < c_offset ? _SigmoidUnit<float>(xact[idx]) : tanh(xact[idx]); } } template <typename T> __global__ void _LSTMCellGate( const int count, const int hidden_size, const int o_offset, // 2 * hidden_size const int c_offset, // 3 * hidden_size const int x_offset, // 4 * hidden_size const T* cx, const T* xact, T* c, T* h) { CUDA_1D_KERNEL_LOOP(idx, count) { const int n = idx / hidden_size; const int offset = idx % hidden_size; const T* x = xact + n * x_offset; const T i = x[offset]; const T f = x[offset + hidden_size]; const T o = x[offset + o_offset]; T c_ = x[offset + c_offset]; c_ = c[idx] = f * cx[idx] + i * c_; h[idx] = o * tanh(c_); } } template <> void LSTMCell<float, CUDAContext>( const int count, const int N, const int C, const float* cx, float* xact, float* c, float* h, CUDAContext* ctx) { const int o_offset = 2 * C, c_offset = 3 * C, x_offset = 4 * C; _LSTMCellAct<float> << < CUDA_BLOCKS(count * 4), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count * 4, c_offset, x_offset, xact); _LSTMCellGate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, o_offset, c_offset, x_offset, cx, xact, c, h); } template <typename T> __global__ void _LSTMCellGateGrad( const int count, const int hidden_size, const int o_offset, const int c_offset, const int x_offset, const T* cx, const T* xact, const T* c, const T* dc, const T* dh, T* dcx, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int n = idx / hidden_size; const int offset = idx % hidden_size; const T* xact_ = xact + n * x_offset; T* dx_ = dx + n * x_offset; const T i = xact_[offset]; const T f = xact_[offset + hidden_size]; const T o = xact_[offset + o_offset]; const T g = xact_[offset + c_offset]; const T tanh_c = tanh(c[idx]); const T dcx_sum_term = dh[idx] * o * (1 - tanh_c * tanh_c) + dc[idx]; dcx[idx] = dcx_sum_term * f; dx_[offset] = dcx_sum_term * g; dx_[offset + hidden_size] = dcx_sum_term * cx[idx]; dx_[offset + o_offset] = dh[idx] * tanh_c; dx_[offset + c_offset] = dcx_sum_term * i; } } template <typename T> __global__ void _LSTMCellActGrad( const int count, const int c_offset, const int x_offset, const T* xact, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int offset = idx % x_offset; const T val = xact[idx]; if (offset < c_offset) dx[idx] = dx[idx] * val * (T(1) - val); else dx[idx] = dx[idx] * (T(1) - val * val); } } template <> void LSTMCellGrad<float, CUDAContext>( const int count, const int N, const int C, const float* cx, const float* xact, const float* c, const float* dc, const float* dh, float* dcx, float* dx, CUDAContext* ctx) { const int o_offset = 2 * C, c_offset = 3 * C, x_offset = 4 * C; _LSTMCellGateGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, o_offset, c_offset, x_offset, cx, xact, c, dc, dh, dcx, dx); _LSTMCellActGrad<float> << < CUDA_BLOCKS(count * 4), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count * 4, c_offset, x_offset, xact, dx); } /******************** update.adam_update ********************/ template <typename T> __global__ void _AdamUpdate( const int count, const T lr, const T beta1, const T beta2, const T eps, T* g, T* m, T* v) { CUDA_1D_KERNEL_LOOP(i, count) { T gi = g[i]; T mi = m[i] = m[i] * beta1 + gi * (1 - beta1); T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); g[i] = lr * mi / (sqrt(vi) + eps); } } template <> void AdamUpdate<float, CUDAContext>( const int count, const float lr, const float beta1, const float beta2, const float eps, float* g, float* m, float* v, CUDAContext* ctx) { _AdamUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, lr, beta1, beta2, eps, g, m, v); } /******************** update.nesterov_update ********************/ template <typename T> __global__ void _NesterovUpdate( const int count, const T lr, const T momentum, T* g, T* h) { CUDA_1D_KERNEL_LOOP(i, count) { T hi = h[i]; T hi_new = h[i] = momentum * hi + lr * g[i]; g[i] = (1 + momentum) * hi_new - momentum * hi; } } template <> void NesterovUpdate<float, CUDAContext>( const int count, const float lr, const float momentum, float* g, float* h, CUDAContext* ctx) { _NesterovUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, lr, momentum, g, h); } /******************** update.rmsprop_update ********************/ template <typename T> __global__ void _RMSPropUpdate( const int count, const T lr, const T decay, const T eps, T* g, T* h) { CUDA_1D_KERNEL_LOOP(i, count) { T gi = g[i]; T hi = h[i] = decay * h[i] + (1 - decay) * gi * gi; g[i] = lr * g[i] / (sqrt(hi) + eps); } } template <> void RMSPropUpdate<float, CUDAContext>( const int count, const float lr, const float decay, const float eps, float* g, float* h, CUDAContext* ctx) { _RMSPropUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, lr, decay, eps, g, h); } /******************** update.sgd_update ********************/ template <typename T> __global__ void _SGDUpdate( const int count, const T lr, const T momentum, T* g, T* h) { CUDA_1D_KERNEL_LOOP(i, count) { T hi = h[i]; g[i] = h[i] = momentum * hi + lr * g[i]; } } template <> void SGDUpdate<float, CUDAContext>( const int count, const float lr, const float momentum, float* g, float* h, CUDAContext* ctx) { _SGDUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, lr, momentum, g, h); } /******************** vision.bias_add ********************/ template <typename T> __global__ void _BiasAdd_NCHW( const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int bias_idx = (idx / inner_dim) % dim; y[idx] += bias[bias_idx]; } } template <typename T> __global__ void _BiasAdd_NHWC( const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] += bias[idx % dim]; } } template<> void BiasAdd<float, CUDAContext>( const int count, const int outer_dim, const int dim, const int inner_dim, const string& data_format, const float* bias, const float* bias_multiplier, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _BiasAdd_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, dim, inner_dim, bias, y); } else if (data_format == "NHWC") { _BiasAdd_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, dim, inner_dim, bias, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.bilinear_resize ********************/ template <typename T> __global__ void _BilinearResize_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float top_left(x[NCHT * W + left_x_idx]); const float top_right(x[NCHT * W + right_x_idx]); const float bottom_left(x[NCHB * W + left_x_idx]); const float bottom_right(x[NCHB * W + right_x_idx]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <typename T> __global__ void _BilinearResize_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float top_left(x[(NHT * W + left_x_idx) * C + c]); const float top_right(x[(NHT * W + right_x_idx) * C + c]); const float bottom_left(x[(NHB * W + left_x_idx) * C + c]); const float bottom_right(x[(NHB * W + right_x_idx) * C + c]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <> void BilinearResize<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _BilinearResize_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _BilinearResize_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <typename T> __global__ void _BilinearResizeGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[NCHT * W + left_x_idx], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[NCHT * W + right_x_idx], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[NCHB * W + left_x_idx], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[NCHB * W + right_x_idx], static_cast<T>(x_lerp * dbottom)); } } template <typename T> __global__ void _BilinearResizeGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[(NHT * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[(NHT * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[(NHB * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[(NHB * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dbottom)); } } template <> void BilinearResizeGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _BilinearResizeGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _BilinearResizeGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.conv ********************/ template<typename T> __global__ void _Im2Col2d_NCHW( const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % col_w; const int h_idx = idx / col_w; const int h = h_idx % col_h; const int im_c = h_idx / col_h; const int c = im_c * kernel_h * kernel_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; T* col_ptr = col; col_ptr += ((c * col_h + h) * col_w + w); const T* im_ptr = im; im_ptr += ((im_c * H + im_h_off) * W + im_w_off); for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; *col_ptr = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im_ptr[kh * dilation_h * W + kw * dilation_w] : 0; col_ptr += (col_h * col_w); } } } } template<typename T> __global__ void _Im2Col2d_NHWC( const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % col_w; const int h = idx / C / col_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; const int base_col_idx = (h * col_w) + w; for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; const int col_idx = ( ((base_col_idx * kernel_h + kh) * kernel_w + kw) * C + c ); col[col_idx] = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im[(im_h * W + im_w) * C + c] : 0; } } } } template <> void Im2Col2d<float, CUDAContext>( const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* im, float* col, CUDAContext* ctx) { if (data_format == "NCHW") { const int count = (C * col_h * col_w); _Im2Col2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else if (data_format == "NHWC") { const int count = (col_h * col_w * C); _Im2Col2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _Col2Im2d_NCHW( const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_1D_KERNEL_LOOP(idx, count) { T val = 0; const int im_w = idx % W + pad_w; const int im_h = (idx / W) % H + pad_h; const int im_c = idx / W / H; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (( (im_c * kernel_h + kh_off) * kernel_w + kw_off) * col_h + h ) * col_w + w; val += col[col_idx]; } } } im[idx] = val; } } template<typename T> __global__ void _Col2Im2d_NHWC( const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_1D_KERNEL_LOOP(idx, count) { T val = 0; const int im_c = idx % C; const int im_w = (idx / C) % W + pad_w; const int im_h = (idx / C / W) + pad_h; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = ( ((h * col_w + w) * kernel_h + kh_off) * kernel_w + kw_off ) * C + im_c; val += col[col_idx]; } } } im[idx] = val; } } template <> void Col2Im2d<float, CUDAContext>( const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* col, float* im, CUDAContext* ctx) { if (data_format == "NCHW") { const int count = (C * H * W); _Col2Im2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else if (data_format == "NHWC") { const int count = (H * W * C); _Col2Im2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.nn_resize ********************/ template <typename T> __global__ void _NNResize_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * C + c) * H + h_in) * W + w_in]; } } template <typename T> __global__ void _NNResize_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * H + h_in) * W + w_in) * C + c]; } } template <> void NNResize<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _NNResize_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _NNResize_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <typename T> __global__ void _NNResizeGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[idx]); } } template <typename T> __global__ void _NNResizeGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[idx]); } } template <> void NNResizeGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _NNResizeGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _NNResizeGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.pooling ********************/ template<typename T> __global__ void _MAXPooling2d_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; const T* x_ptr = x + (pn * C + pc) * H * W; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { if (x_ptr[h * W + w] > max_val) { max_idx = h * W + w; max_val = x_ptr[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<typename T> __global__ void _MAXPooling2d_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { const int x_idx = ((pn * H + h) * W + w) * C + pc; if (x[x_idx] > max_val) { max_idx = x_idx; max_val = x[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void MAXPooling2d<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, int* mask, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _MAXPooling2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else if (data_format == "NHWC") { _MAXPooling2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _AVGPooling2d_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const T* x_ptr = x + (pn * C + pc) * H * W; const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { avg_val += x_ptr[h * W + w]; } } y[idx] = avg_val / pool_area; } } template<typename T> __global__ void _AVGPooling2d_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) for (int w = start_w; w < end_w; ++w) avg_val += x[((pn * H + h) * W + w) * C + pc]; y[idx] = avg_val / pool_area; } } template<> void AVGPooling2d<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _AVGPooling2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else if (data_format == "NHWC") { _AVGPooling2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _MAXPooling2dGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; const int offset = (n * C + c) * pool_h * pool_w; const T* dy_ptr = dy + offset; const int* mask_ptr = mask + offset; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { if (mask_ptr[ph * pool_w + pw] == (h * W + w)) { grad += dy_ptr[ph * pool_w + pw]; } } } dx[idx] = grad; } } template<typename T> __global__ void _MAXPooling2dGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { const int x_idx = ((n * H + h) * W + w) * C + c; const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; if (mask[y_idx] == x_idx) grad += dy[y_idx]; } } dx[idx] = grad; } } template<> void MAXPooling2dGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, const int* mask, float* dx, CUDAContext* ctx) { if (data_format == "NCHW") { _MAXPooling2dGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy,mask, dx); } else if (data_format == "NHWC") { _MAXPooling2dGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _AVGPooling2dGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; const T* dy_ptr = dy + (n * C + c) * pool_h * pool_w; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); grad += (dy_ptr[ph * pool_w + pw] / pool_area); } } dx[idx] = grad; } } template<typename T> __global__ void _AVGPooling2dGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; grad += (dy[y_idx] / pool_area); } } dx[idx] = grad; } } template<> void AVGPooling2dGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, float* dx, CUDAContext* ctx) { if (data_format == "NCHW") { _AVGPooling2dGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else if (data_format == "NHWC") { _AVGPooling2dGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.roi_pooling ********************/ template <typename T> __global__ void _ROIPooling( const int count, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* x, const T* rois, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) { y[idx] = 0; mask[idx] = -1; continue; } int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); const T bin_size_h = (T)roi_height / (T)pool_h; const T bin_size_w = (T)roi_width / (T)pool_w; int hstart = floor(bin_size_h * ph); int wstart = floor(bin_size_w * pw); int hend = ceil(bin_size_h * (ph + 1)); int wend = ceil(bin_size_w * (pw + 1)); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); float max_val = is_empty ? 0 : -FLT_MAX; int max_idx = -1; x += ((roi_batch_ind * channels + c) * height * width); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int x_idx = h * width + w; if (x[x_idx] > max_val) { max_val = x[x_idx]; max_idx = x_idx; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void ROIPooling<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const float* x, const float* rois, int* mask, float* y, CUDAContext* ctx) { _ROIPooling<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, spatial_scale, C, H, W, pool_h, pool_w, x, rois, mask, y); } template <typename T> __global__ void _ROIPoolingGrad( const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* dy, const T* rois, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { int w = idx % width; int h = (idx / width) % height; int c = (idx / width / height) % channels; int n = idx / width / height / channels; T gradient = 0; for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const T* offset_rois = rois + roi_n * 5; int roi_batch_ind = offset_rois[0]; if (n != roi_batch_ind) continue; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) continue; int y_offset = (roi_n * channels + c) * pool_h * pool_w; const T* offset_dy = dy + y_offset; const int* offset_mask = mask + y_offset; int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); const T bin_size_h = (T)roi_height / (T)pool_h; const T bin_size_w = (T)roi_width / (T)pool_w; int phstart = floor(static_cast<T>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<T>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<T>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<T>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pool_h); phend = min(max(phend, 0), pool_h); pwstart = min(max(pwstart, 0), pool_w); pwend = min(max(pwend, 0), pool_w); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int pool_idx = ph * pool_w + pw; if (offset_mask[pool_idx] == (h * width + w)) { gradient += offset_dy[pool_idx]; } } } } dx[idx] = gradient; } } template<> void ROIPoolingGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const float* dy, const float* rois, const int* mask, float* dx, CUDAContext* ctx) { _ROIPoolingGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, num_rois, spatial_scale, C, H, W, pool_h, pool_w, dy, rois, mask, dx); } /******************** vision.roi_align ********************/ template <typename T> __device__ T _ROIAlignInterpolate( const T* Xdata, const int height, const int width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) return 0; if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; T v1 = Xdata[y_low * width + x_low]; T v2 = Xdata[y_low * width + x_high]; T v3 = Xdata[y_high * width + x_low]; T v4 = Xdata[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void _ROIAlign( const int count, const float spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const int sampling_ratio, const T* Xdata, const T* rois, T* Ydata) { CUDA_1D_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) { Ydata[idx] = 0; continue; } T roi_start_w = offset_rois[1] * spatial_scale; T roi_start_h = offset_rois[2] * spatial_scale; T roi_end_w = offset_rois[3] * spatial_scale; T roi_end_h = offset_rois[4] * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); const T* offset_Xdata = Xdata +(roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pool_h); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pool_w); const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w; T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = _ROIAlignInterpolate(offset_Xdata, height, width, y, x); output_val += val; } } output_val /= num_bin_grids; Ydata[idx] = output_val; } } template<> void ROIAlign<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const int sampling_ratio, const float* x, const float* rois, float* y, CUDAContext* ctx) { _ROIAlign<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, spatial_scale, C, H, W, pool_h, pool_w, sampling_ratio, x, rois, y); } template <typename T> __device__ void _ROIAlignInterpolateGrad( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void _ROIAlignGrad( const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const int sampling_ratio, const T* dYdata, const T* rois, T* dXdata) { CUDA_1D_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) continue; T roi_start_w = offset_rois[1] * spatial_scale; T roi_start_h = offset_rois[2] * spatial_scale; T roi_end_w = offset_rois[3] * spatial_scale; T roi_end_h = offset_rois[4] * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); T* offset_dXdata = dXdata + (roi_batch_ind * channels + c) * height * width; int y_offset = (n * channels + c) * pool_h * pool_w; const T* offset_dYdata = dYdata + y_offset; const T dYdata_this_bin = offset_dYdata[ph * pool_w + pw]; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pool_h); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pool_w); const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; _ROIAlignInterpolateGrad( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); T g1 = dYdata_this_bin * w1 / num_bin_grids; T g2 = dYdata_this_bin * w2 / num_bin_grids; T g3 = dYdata_this_bin * w3 / num_bin_grids; T g4 = dYdata_this_bin * w4 / num_bin_grids; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd( offset_dXdata + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_dXdata + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_dXdata + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_dXdata + y_high * width + x_high, static_cast<T>(g4)); } } } } } template<> void ROIAlignGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const int sampling_ratio, const float* dy, const float* rois, float* dx, CUDAContext* ctx) { _ROIAlignGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, num_rois, spatial_scale, C, H, W, pool_h, pool_w, sampling_ratio, dy, rois, dx); } } // namespace kernel } // namespace dragon #endif // WITH_CUDA
d25e5e106779392c774653a1dcacf728b2400066.cu
#ifdef WITH_CUDA #include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" #include "utils/cast.h" namespace dragon { namespace kernel { /******************** activation.dropout ********************/ template<typename T> __global__ void _Dropout( const int count, const uint32_t thresh, const T scale, const T* x, const uint32_t* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] * (mask[idx] > thresh) * scale; } } template<> void Dropout<float, CUDAContext>( const int count, float prob, float scale, const float* x, uint32_t* mask, float* y, CUDAContext* ctx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); math::RandomUniform<uint32_t, CUDAContext>( count, float(0), float(UINT_MAX), mask, ctx); _Dropout<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, thresh, scale, x, mask, y); } template <typename T> __global__ void _DropoutGrad( const int count, const uint32_t thresh, const T scale, const T* dy, const uint32_t* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * (mask[idx] > thresh) * scale; } } template<> void DropoutGrad<float, CUDAContext>( const int count, float prob, float scale, const float* dy, const uint32_t* mask, float* dx, CUDAContext* ctx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); _DropoutGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, thresh, scale, dy, mask, dx); } /******************** activation.prelu ********************/ template <typename T> __global__ void _PRelu( const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[0]; } } template <typename T> __global__ void _PReluNCHW( const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template <typename T> __global__ void _PReluNHWC( const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template<> void PRelu<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* x, const float* w, float* y, CUDAContext* ctx) { if (channel_shared) { _PRelu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, x, w, y); } else { if (data_format == "NCHW") { _PReluNCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, x, w, y); } else if (data_format == "NHWC") { _PReluNHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, x, w, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } } template <typename T> __global__ void _PReluGrad( const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ( (x[idx] > 0) + (x[idx] <= 0) * w[0] ); } } template <typename T> __global__ void _PReluGradNCHW( const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; dx[idx] = dy[idx] * ( (x[idx] > 0) + (x[idx] <= 0) * w[c] ); } } template <typename T> __global__ void _PReluGradNHWC( const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template<> void PReluGrad<float, CUDAContext>( const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* w, float* dx, CUDAContext* ctx) { if (channel_shared) { _PReluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, dy, x, w, dx); } else { if (data_format == "NCHW") { _PReluGradNCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, dy, x, w, dx); } else if (data_format == "NHWC") { _PReluGradNHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, channels, dim, dy, x, w, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } } template <typename T> __global__ void _PReluWGradBcast( const int count, const int rows, const int row_offset, const T* dy, const T* x, T* bcast_dw) { CUDA_1D_KERNEL_LOOP(idx, count) { bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0); for (int n = 1; n < rows; n++) { const int cur_idx = idx + n * row_offset; bcast_dw[idx] += dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0); } } } template<> void PReluWGrad<float, CUDAContext>( const int rows, const int row_offset, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* multiplier, float* bcast_dw, float* dw, CUDAContext* ctx) { const int cdim = channels * dim; _PReluWGradBcast<float> << < CUDA_BLOCKS(cdim), CUDA_THREADS, 0, ctx->cuda_stream() >> >( cdim, rows, row_offset, dy, x, bcast_dw); if (channel_shared) { float w_sum; math::Dot<float, CUDAContext>(channels * dim, bcast_dw, multiplier, &w_sum, ctx); math::AddScalar<float, CUDAContext>(1, w_sum, dw, ctx); } else { if (data_format == "NCHW") { math::Gemv<float, CUDAContext>( CblasNoTrans, channels, dim, 1.0, bcast_dw, multiplier, 1.0, dw, ctx); } else if (data_format == "NHWC") { math::Gemv<float, CUDAContext>( CblasTrans, dim, channels, 1.0, bcast_dw, multiplier, 1.0, dw, ctx); } else LOG(FATAL) << "Unknown data format: " << data_format; } } /******************** activation.elu ********************/ template <typename T> __global__ void _Elu( const int count, const T* x, const float alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : alpha * (exp(x[idx]) - 1); } } template<> void Elu<float, CUDAContext>( const int count, const float alpha, const float* x, float* y, CUDAContext* ctx) { _Elu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, alpha, y); } template <typename T> __global__ void _EluGrad( const int count, const float alpha, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ( (y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0) ); } } template<> void EluGrad<float, CUDAContext>( const int count, const float alpha, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _EluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, alpha, dy, y, dx); } /******************** activation.relu ********************/ template <typename T> __global__ void _Relu( const int count, const float slope, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope; } } template<> void Relu<float, CUDAContext>( const int count, const float slope, const float* x, float* y, CUDAContext* ctx) { _Relu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, slope, x, y); } template <typename T> __global__ void _ReluGrad( const int count, const float slope, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ( (y[idx] > 0) + slope * (y[idx] <= 0) ); } } template<> void ReluGrad<float, CUDAContext>( const int count, const float slope, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _ReluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, slope, dy, y, dx); } /******************** activation.selu ********************/ template <typename T> __global__ void _SElu( const int count, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? 1.0507 * x[idx] : 1.7581 * (exp(x[idx]) - 1); } } template<> void SElu<float, CUDAContext>( const int count, const float* x, float* y, CUDAContext* ctx) { _SElu<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, y); } template <typename T> __global__ void _SEluGrad( const int count, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] : (1.7581 + y[idx]) * dy[idx]; } } template<> void SEluGrad<float, CUDAContext>( const int count, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _SEluGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, y, dx); } /******************** activation.sigmoid ********************/ template <typename T> __device__ T _SigmoidUnit(const T x) { return T(1) / (T(1) + exp(-x)); } template <typename T> __global__ void _Sigmoid( const int n, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, n) { y[idx] = _SigmoidUnit<T>(x[idx]); } } template<> void Sigmoid<float, CUDAContext>( const int count, const float* x, float* y, CUDAContext* ctx) { _Sigmoid<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, y); } template <typename T> __global__ void _SigmoidGrad( const int count, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * y[idx] * (1 - y[idx]); } } template<> void SigmoidGrad<float, CUDAContext>( const int count, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _SigmoidGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, y, dx); } /******************** activation.softmax ********************/ template <typename T> __global__ void _SoftmaxMaxClass( const int outer_dim, const int classes, const int inner_dim, const T* x, T* scale) { CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T max_val = -FLT_MAX; for (int c = 0; c < classes; c++) max_val = max( x[(o_idx * classes + c) * inner_dim + i_idx], max_val ); scale[idx] = max_val; } } template <typename T> __global__ void _SoftmaxSubtract( const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] -= scale[o_idx * inner_dim + i_idx]; } } template <typename T> __global__ void _SoftmaxExp( const int count, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = exp(y[idx]); } } template <typename T> __global__ void _SoftmaxSumClass( const int outer_dim, const int classes, const int inner_dim, const T* y, T* scale) { CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T sum = 0; for (int c = 0; c < classes; c++) sum += y[(o_idx * classes + c) * inner_dim + i_idx]; scale[idx] = sum; } } template <typename T> __global__ void _SoftmaxDiv( const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] /= scale[o_idx * inner_dim + i_idx]; } } template<> void Softmax<float, CUDAContext>( const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* x, float* scale, float* y, CUDAContext* ctx) { const int num_preds = inner_dim * outer_dim; _SoftmaxMaxClass<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( outer_dim, classes, inner_dim, x, scale); _SoftmaxSubtract<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, classes, inner_dim, scale, y); _SoftmaxExp<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, y); _SoftmaxSumClass<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( outer_dim, classes, inner_dim, y, scale); _SoftmaxDiv<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, classes, inner_dim, scale, y); } template <typename T> __global__ void _SoftmaxDot( const int outer_dim, const int classes, const int inner_dim, const T* dy, const T* y, T* scale) { CUDA_1D_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T dot = 0; for (int c = 0; c < classes; c++) dot += ( y[(o_idx * classes + c) * inner_dim + i_idx] * dy[(o_idx * classes + c) * inner_dim + i_idx] ); scale[idx] = dot; } } template<> void SoftmaxGrad<float, CUDAContext>( const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* dy, const float* y, float* scale, float* dx, CUDAContext* ctx) { const int num_preds = inner_dim * outer_dim; _SoftmaxDot<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( outer_dim, classes, inner_dim, dy, y, scale); _SoftmaxSubtract<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, classes,inner_dim, scale, dx); math::Mul<float, CUDAContext>(count, dx, y, dx, ctx); } /******************** activation.tanh ********************/ template <typename T> __global__ void _Tanh( const int count, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, count) { y[i] = tanh(x[i]); } } template<> void Tanh<float, CUDAContext>( const int count, const float* x, float* y, CUDAContext* ctx) { _Tanh<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, x, y); } template <typename T> __global__ void _TanhGrad( const int count, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, count) { dx[i] = dy[i] * (1 - y[i] * y[i]); } } template<> void TanhGrad<float, CUDAContext>( const int count, const float* dy, const float* y, float* dx, CUDAContext* ctx) { _TanhGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, y, dx); } /******************** arithmetic.scale ********************/ template <typename T> __global__ void _AffineWithOBias( const int count, const int scale_dim, const int inner_dim, const T* x, const T* alpha, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = alpha[scale_idx] * x[idx]; } } template <typename T> __global__ void _AffineWithBias( const int count, const int scale_dim, const int inner_dim, const T* x, const T* alpha, const T* beta, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = alpha[scale_idx] * x[idx] + beta[scale_idx]; } } template<> void Affine<float, CUDAContext>( const int count, const int outer_dim, const int scale_dim, const int inner_dim, const float* x, const float* alpha, const float* beta, const float* beta_multiplier, float* y, CUDAContext* ctx) { if (beta != nullptr) { _AffineWithBias<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, scale_dim, inner_dim, x, alpha, beta, y); } else { _AffineWithOBias<float> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, scale_dim, inner_dim, x, alpha, y); } } template <> void AffineGrad<float, CUDAContext>( const int count, const int outer_dim, const int scale_dim, const int inner_dim, const float* dy, const float* alpha, float* dx, CUDAContext* ctx) { _AffineWithOBias<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, scale_dim, inner_dim, dy, alpha, dx); } /******************** arithmetic.clip ********************/ template <typename T> __global__ void _Clip( const int count, const T low, const T high, const T* x, T* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { mask[idx] = 1.0; if (x[idx] > high || x[idx] < low) mask[idx] = 0.0; y[idx] = x[idx] > high ? high : x[idx]; y[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>( const int count, const float low, const float high, const float* x, float* mask, float* y, CUDAContext* ctx) { _Clip<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, low, high, x, mask, y); } /******************** control_flow.compare ********************/ template <typename T> __global__ void _Equal( const int count, const T* a, const T* b, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0; } } template <> void Equal<float, CUDAContext>( const int count, const float* a, const float* b, float* y, CUDAContext* ctx) { _Equal<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, a, b, y); } /******************** loss.l1_loss ********************/ template <typename T> __global__ void _AbsGrad( const int count, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const T val = dy[idx]; // val > 0: 1 | val == 0: 0 | val < 0: -1 dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void AbsGrad<float, CUDAContext>( const int count, const float* dy, float* dx, CUDAContext* ctx) { _AbsGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dy, dx); } /******************** loss.sigmoid_cross_entropy ********************/ template <typename T> __global__ void _SigmoidCrossEntropy( const int count, const T* logits, const T* targets, T* losses, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { losses[idx] = flags[idx] = 0; } else { losses[idx] = log(1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0)) ) + logits[idx] * ((logits[idx] >= 0) - targets[idx]); flags[idx] = 1; } } } template <> void SigmoidCrossEntropy<float, CUDAContext>( const int count, const float* logits, const float* targets, float* losses, float* flags, CUDAContext* ctx) { _SigmoidCrossEntropy<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, logits, targets, losses, flags); } template <typename T> __global__ void _SigmoidCrossEntropyGrad( const int count, const T* logits, const T* targets, T* dlogits, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { if (targets[idx] < 0) { dlogits[idx] = flags[idx] = 0; } else { dlogits[idx] = 1 / (1 + exp(-logits[idx])) - targets[idx]; flags[idx] = 1; } } } template <> void SigmoidCrossEntropyGrad<float, CUDAContext>( const int count, const float* logits, const float* targets, float* dlogits, float* flags, CUDAContext* ctx) { _SigmoidCrossEntropyGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, logits, targets, dlogits, flags); } /******************** loss.sigmoid_focal_loss ********************/ template <typename T> __global__ void _SigmoidFocalLoss( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* logits, const T* targets, T* losses, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int iix = idx % inner_dim; const int aix = (idx / inner_dim) % axis_dim; const int oix = idx / inner_dim / axis_dim; const int t = targets[oix * inner_dim + iix]; // ``0`` is reserved for targets if neg id is zero // use ``aix + 1`` to match the targets T c1 = (t == (aix + (neg_id ? 0 : 1))); T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1))); T p = 1 / (1 + exp(-logits[idx])); // logit -> prob // (1 - p)^{gamma} * log(p) T pos_term = pow(1 - p, gamma) * log(max(p, FLT_MIN)); // p^{gamma} * log(1 - p) T neg_term = pow(p, gamma) * ( -logits[idx] * (logits[idx] >= 0) - log( 1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0))) ); losses[idx] = 0.0; losses[idx] += -c1 * pos_term * pos_alpha; losses[idx] += -c2 * neg_term * neg_alpha; flags[idx] = c1; } } template <> void SigmoidFocalLoss<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* logits, const float* targets, float* losses, float* flags, CUDAContext* ctx) { TIndex count = outer_dim * axis_dim * inner_dim; _SigmoidFocalLoss<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, logits, targets, losses, flags); } template <typename T> __global__ void _SigmoidFocalLossGradient( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* logits, const T* targets, T* dlogits, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int iix = idx % inner_dim; const int aix = (idx / inner_dim) % axis_dim; const int oix = idx / inner_dim / axis_dim; const int t = targets[oix * inner_dim + iix]; // ``0`` is reserved for targets if neg id is zero // use ``aix + 1`` to match the targets T c1 = (t == (aix + (neg_id ? 0 : 1))); T c2 = (t != -1) & (t != (aix + (neg_id ? 0 : 1))); T p = 1 / (1 + exp(-logits[idx])); // logit -> prob // (1 - p)^{gamma} * (1 - p - gamma * p * log(p)) T pos_term = pow((1 - p), gamma) * ( 1 - p - p * gamma * log(max(p, FLT_MIN)) ); // p^{gamma} * (gamma * (1 - p) * log(1-p) - p) T neg_term = pow(p, gamma) * ( (-logits[idx] * (logits[idx] >= 0) - log( 1 + exp(logits[idx] - 2 * logits[idx] * (logits[idx] >= 0))) ) * (1 - p) * gamma - p ); dlogits[idx] = 0.0; dlogits[idx] += -c1 * pos_term * pos_alpha; dlogits[idx] += -c2 * neg_term * neg_alpha; flags[idx] = c1; } } template <> void SigmoidFocalLossGradient<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* logits, const float* targets, float* dlogits, float* flags, CUDAContext* ctx) { TIndex count = outer_dim * axis_dim * inner_dim; _SigmoidFocalLossGradient<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, logits, targets, dlogits, flags); } /******************** loss.smooth_l1_loss ********************/ template <typename T> __global__ void _SmoothL1( const int count, const float beta, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const T val = x[idx]; const T abs_val = abs(val); if (abs_val < beta) y[idx] = 0.5 * val * val / beta; else y[idx] = abs_val - 0.5 * beta; } } template<> void SmoothL1<float, CUDAContext>( const int count, const float beta, const float* x, float* y, CUDAContext* ctx) { _SmoothL1<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, beta, x, y); } template <typename T> __global__ void _SmoothL1Grad( const int count, const float beta, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const T val = dy[idx]; const T abs_val = abs(val); if (abs_val < beta) dx[idx] = val / beta; // val > 0: 1 | val == 0: 0 | val < 0: -1 else dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void SmoothL1Grad<float, CUDAContext>( const int count, const float beta, const float* dy, float* dx, CUDAContext* ctx) { _SmoothL1Grad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, beta, dy, dx); } /******************** loss.softmax_cross_entropy ********************/ template <typename T> __global__ void _SoftmaxCrossEntropy( const int count, const T* prob, const T* target, T* loss) { CUDA_1D_KERNEL_LOOP(idx, count) { loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN)); } } template <> void SoftmaxCrossEntropy<float, CUDAContext>( const int count, const float* prob, const float* target, float* loss, CUDAContext* ctx) { _SoftmaxCrossEntropy<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, prob, target, loss); } /******************** loss.softmax_focal_loss ********************/ template <typename T> __global__ void _SoftmaxFocalLoss( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* prob, const T* labels, const int* ignores, const int num_ignores, T* losses, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) { if (label == ignores[k]) { losses[idx] = flags[idx] = 0; break; } } if (k == num_ignores) { const int t = (oix * axis_dim + label) * inner_dim + iix; T scale = pow(1.f - prob[t], gamma); scale = label > neg_id ? pos_alpha * scale : neg_alpha * scale; losses[idx] = -scale * log(max(prob[t], FLT_MIN)); flags[idx] = label > neg_id ? 1 : 0; } } } template <> void SoftmaxFocalLoss<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* losses, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SoftmaxFocalLoss<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, prob, labels, ignores, num_ignores, losses, flags); } template <typename T> __global__ void _SoftmaxFocalLossGrad( const int count, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const T* prob, const T* labels, const int* ignores, const int num_ignores, T* dx, T* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) if (label == ignores[k]) break; if (k != num_ignores) { for (int c = 0; c < axis_dim; c++) dx[(oix * axis_dim + c) * inner_dim + iix] = 0; flags[idx] = 0; } else { const int t = (oix * axis_dim + label) * inner_dim + iix; T onemp = 1. - prob[t]; // unstable if gamma is 0 T grad = -gamma * pow(onemp, gamma - 1) * log(max(prob[t], FLT_MIN)) * prob[t] + pow(onemp, gamma); grad = label > neg_id ? pos_alpha * grad : neg_alpha * grad; for (int c = 0; c < axis_dim; c++) { const int i = (oix * axis_dim + c) * inner_dim + iix; if (c == label) { dx[i] = grad * (prob[t] - 1); } else { dx[i] = grad * prob[i]; } } flags[idx] = label > neg_id ? 1 : 0; } } } template<> void SoftmaxFocalLossGrad<float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* dx, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SoftmaxFocalLossGrad<float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, pos_alpha, neg_alpha, gamma, neg_id, prob, labels, ignores, num_ignores, dx, flags); } /******************** loss.sparse_softmax_cross_entropy ********************/ template <typename Tx, typename Ty> __global__ void _SparseSoftmaxCrossEntropy( const int count, const int axis_dim, const int inner_dim, const Tx* prob, const Ty* labels, const int* ignores, const int num_ignores, Tx* losses, Tx* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) { if (label == ignores[k]) { losses[idx] = flags[idx] = 0; break; } } if (k == num_ignores) { losses[idx] = -log( max(prob[(oix * axis_dim + label) * inner_dim + iix], FLT_MIN) ); flags[idx] = 1; } } } template <> void SparseSoftmaxCrossEntropy<float, float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* losses, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float, float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, losses, flags); } template <> void SparseSoftmaxCrossEntropy<float, int64_t, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const int64_t* labels, const int* ignores, const int num_ignores, float* losses, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float, int64_t> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, losses, flags); } template <typename Tx, typename Ty> __global__ void _SparseSoftmaxCrossEntropyGrad( const int count, const int axis_dim, const int inner_dim, const Tx* prob, const Ty* labels, const int* ignores, const int num_ignores, Tx* dx, Tx* flags) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; const int label = labels[oix * inner_dim + iix]; int k; for (k = 0; k < num_ignores; k++) if (label == ignores[k]) break; if (k != num_ignores) { for (int c = 0; c < axis_dim; c++) dx[(oix * axis_dim + c) * inner_dim + iix] = 0; flags[idx] = 0; } else { dx[(oix * axis_dim + label) * inner_dim + iix] -= 1; flags[idx] = 1; } } } template<> void SparseSoftmaxCrossEntropyGrad<float, float, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const float* labels, const int* ignores, const int num_ignores, float* dx, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float, float> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, dx, flags); } template<> void SparseSoftmaxCrossEntropyGrad<float, int64_t, CUDAContext>( const int outer_dim, const int axis_dim, const int inner_dim, const float* prob, const int64_t* labels, const int* ignores, const int num_ignores, float* dx, float* flags, CUDAContext* ctx) { const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float, int64_t> << < CUDA_BLOCKS(num_preds), CUDA_THREADS, 0, ctx->cuda_stream() >> >( num_preds, axis_dim, inner_dim, prob, labels, ignores, num_ignores, dx, flags); } /******************** misc.astype ********************/ template <typename Ta, typename Tb> __global__ void _TypeA2B( const int count, const Ta* a, Tb* b) { CUDA_1D_KERNEL_LOOP(idx, count) { b[idx] = a[idx]; } } #define DEFINE_TYPE_A2B(type_a, type_b) \ template <> void TypeA2B<type_a, type_b, CUDAContext>( \ const int count, \ const type_a* a, \ type_b* b, \ CUDAContext* ctx) { \ _TypeA2B<type_a, type_b> \ << < CUDA_BLOCKS(count), CUDA_THREADS, \ 0, ctx->cuda_stream() >> >(count, a, b); \ } #define DEFINE_TYPE_A2ALL(type_a) \ DEFINE_TYPE_A2B(type_a, float); \ DEFINE_TYPE_A2B(type_a, double); \ DEFINE_TYPE_A2B(type_a, int); \ DEFINE_TYPE_A2B(type_a, int64_t); \ DEFINE_TYPE_A2B(type_a, uint8_t); DEFINE_TYPE_A2ALL(float); DEFINE_TYPE_A2ALL(double); DEFINE_TYPE_A2ALL(int); DEFINE_TYPE_A2ALL(int64_t); DEFINE_TYPE_A2ALL(uint8_t); /******************** misc.image_data ********************/ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <> void ImageData<float, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <> void ImageData<uint8_t, float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** ndarray.arange ********************/ template <typename T> __global__ void _Arange( const int count, const int start, const int step, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] = start + idx * step; } } template<> void Arange<float, CUDAContext>( const int count, const int start, const int step, float* y, CUDAContext* ctx) { _Arange<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, start, step, y); } template<> void Arange<int, CUDAContext>( const int count, const int start, const int step, int* y, CUDAContext* ctx) { _Arange<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, start, step, y); } /******************** ndarray.argreduce ********************/ template <typename T> __global__ void _Argmax( const int count, const int axis_dim, const int inner_dim, const T neg_bound, const T* x, int64_t* indices) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int max_idx = -1; T max_val = neg_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val > max_val) { max_val = val; max_idx = j; } } indices[idx] = max_idx; } } template <typename T> __global__ void _Argmax_v2( const int count, const int axis_dim, const int inner_dim, const T neg_bound, const T* x, int64_t* indices, T* values) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int max_idx = -1; T max_val = neg_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val > max_val) { max_val = val; max_idx = j; } } indices[idx] = max_idx; values[idx] = max_val; } } template<> void Argmax<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, int64_t* indices, float* values, CUDAContext* ctx) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; if (values == nullptr) { _Argmax<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, -FLT_MAX, x, indices); } else { _Argmax_v2<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, -FLT_MAX, x, indices, values); } } template <typename T> __global__ void _Argmin( const int count, const int axis_dim, const int inner_dim, const T pos_bound, const T* x, int64_t* indices) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int min_idx = -1; T min_val = pos_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val < min_val) { min_val = val; min_idx = j; } } indices[idx] = min_idx; } } template <typename T> __global__ void _Argmin_v2( const int count, const int axis_dim, const int inner_dim, const T pos_bound, const T* x, int64_t* indices, T* values) { CUDA_1D_KERNEL_LOOP(idx, count) { const int oix = idx / inner_dim; const int iix = idx % inner_dim; int min_idx = -1; T min_val = pos_bound; for (int j = 0; j < axis_dim; ++j) { const T val = x[(oix * axis_dim + j) * inner_dim + iix]; if (val < min_val) { min_val = val; min_idx = j; } } indices[idx] = min_idx; values[idx] = min_val; } } template<> void Argmin<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, int64_t* indices, float* values, CUDAContext* ctx) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; if (values == nullptr) { _Argmin<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, FLT_MAX, x, indices); } else { _Argmin_v2<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, FLT_MAX, x, indices, values); } } /******************** ndarray.gather ********************/ template <typename T> __global__ void _CanonicalAxis( const int count, const int dim, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<int, CUDAContext>( const int count, const int dim, int* y, CUDAContext* ctx) { _CanonicalAxis<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, y); } template <typename T> __global__ void _Gather( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Gather<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* x, float* y, CUDAContext* ctx) { _Gather<float> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } template <> void Gather<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* x, int* y, CUDAContext* ctx) { _Gather<int> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); } template <typename T> __global__ void _GatherGrad( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } template <> void GatherGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* dy, float* dx, CUDAContext* ctx) { _GatherGrad<float> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } template <> void GatherGrad<int, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* dy, int* dx, CUDAContext* ctx) { _GatherGrad<int> << <CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); } /******************** ndarray.concat ********************/ template <typename T> __global__ void _Concat( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; y[y_idx] = x[idx]; } } template <> void Concat<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* x, float* y, CUDAContext* ctx) { _Concat<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, x, y); } template <typename T> __global__ void _ConcatGrad( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; dx[idx] = dy[y_idx]; } } template <> void ConcatGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* dy, float* dx, CUDAContext* ctx) { _ConcatGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, dy, dx); } /******************** ndarray.crop ********************/ template<typename T> __global__ void _Crop1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; y[idx] = x[(o * dim + ex_d + start) * inner_dim + i]; } } template<> void Crop1D<int, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int* x, int* y, CUDAContext* ctx) { _Crop1D<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, x, y); } template<> void Crop1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const float* x, float* y, CUDAContext* ctx) { _Crop1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, x, y); } template<typename T> __global__ void _Crop1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int d = (idx / inner_dim) % dim; const int o = idx / inner_dim / dim; dx[idx] = (d < start || d >= end) ? 0 : dy[(o * ex_dim + d - start) * inner_dim + i]; } } template<> void Crop1DGrad<int, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const int* dy, int* dx, CUDAContext* ctx) { _Crop1DGrad<int> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); } template<> void Crop1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const float* dy, float* dx, CUDAContext* ctx) { _Crop1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); } /******************** ndarray.pad ********************/ template <typename T> __global__ void _ConstPad1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T value, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = ex_d - pad_l; y[idx] = (d < 0 || d >= dim) ? value : x[(o * dim + d) * inner_dim + i]; } } template <> void ConstPad1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float value, const float* x, float* y, CUDAContext* ctx) { _ConstPad1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, value, x, y); } template <typename T> __global__ void _ReflectPad1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void ReflectPad1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* ctx) { _ReflectPad1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _EdgePad1D( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void EdgePad1D<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* ctx) { _EdgePad1D<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _ConstPad1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % dim + pad_l; const int o = idx / inner_dim / dim; dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i]; } } template <> void ConstPad1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* ctx) { _ConstPad1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _ReflectPad1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void ReflectPad1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* ctx) { _ReflectPad1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _EdgePad1DGrad( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void EdgePad1DGrad<float, CUDAContext>( const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* ctx) { _EdgePad1DGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } /******************** ndarray.one_hot ********************/ template <typename T> __global__ void _OneHot( const int count, const int depth, const int on_value, const float* x, float* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int val = x[idx]; y[idx * depth + val] = on_value; } } template <> void OneHot<float, CUDAContext>( const int count, const int depth, const int on_value, const float* x, float* y, CUDAContext* ctx) { _OneHot<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, depth, on_value, x, y); } /******************** ndarray.reduce ********************/ template <typename T> __global__ void _Sum( const int count, const int axis_dim, const int inner_dim, const T* x, float* y) { CUDA_1D_KERNEL_LOOP(idx, count) { T sum_val = 0.0; const int offset = (idx / inner_dim * axis_dim) * inner_dim + idx % inner_dim; for (int j = 0; j < axis_dim; j++) sum_val += x[offset + j * inner_dim]; y[idx] = sum_val; } } template<> void Sum<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const float* x, float* y, CUDAContext* ctx) { _Sum<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, x, y); } template <typename T> __global__ void _SumGrad( const int count, const int axis_dim, const int inner_dim, const T coeff, const T* dy, float* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int offset = (idx / inner_dim * axis_dim) * inner_dim + idx % inner_dim; for (int j = 0; j < axis_dim; j++) dx[offset + j * inner_dim] = dy[idx] * coeff; } } template<> void SumGrad<float, CUDAContext>( const int count, const int axis_dim, const int inner_dim, const float coeff, const float* dy, float* dx, CUDAContext* ctx) { _SumGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, axis_dim, inner_dim, coeff, dy, dx); } /******************** ndarray.repeat ********************/ template <typename T> __global__ void _Repeat( const int count, const int inner_dim, const int repeats, const int dim, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim / repeats) % dim; const int n = idx / inner_dim / repeats / dim; const int x_idx = (n * dim + b) * inner_dim + d; y[idx] = x[x_idx]; } } template <> void Repeat<float, CUDAContext>( const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* x, float* y, CUDAContext* ctx) { _Repeat<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, inner_dim, repeats, dim, x, y); } template <typename T> __global__ void _RepeatGrad( const int count, const int inner_dim, const int repeats, const int dim, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim) % dim; const int n = idx / inner_dim / dim; T gradient = 0; for (int t = 0; t < repeats; t++) gradient += dy[ (((n * dim + b) * repeats) + t) * inner_dim + d]; dx[idx] = gradient; } } template <> void RepeatGrad<float, CUDAContext>( const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* dy, float* dx, CUDAContext* ctx) { _RepeatGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, inner_dim, repeats, dim, dy, dx); } /******************** ndarray.slice ********************/ template <typename T> __global__ void _Slice( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Slice<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* x, float* y, CUDAContext* ctx) { _Slice<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, x, y); } template <typename T> __global__ void _SliceGrad( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; dx[x_idx] = dy[idx]; } } template <> void SliceGrad<float, CUDAContext>( const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* dy, float* dx, CUDAContext* ctx) { _SliceGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, dy, dx); } /******************** ndarray.tile ********************/ template <typename T> __global__ void _Tile( const int count, const int ex_inner_dim, const int multiple, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim / multiple; const int x_idx = n * ex_inner_dim + d; y[idx] = x[x_idx]; } } template <> void Tile<float, CUDAContext>( const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* x, float* y, CUDAContext* ctx) { _Tile<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, ex_inner_dim, multiple, x, y); } template <typename T> __global__ void _TileGrad( const int count, const int ex_inner_dim, const int multiple, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { T gradient = 0; const int offset = (idx / ex_inner_dim * multiple) * ex_inner_dim + idx % ex_inner_dim; for (int t = 0; t < multiple; t++) gradient += dy[offset + t * ex_inner_dim]; dx[idx] = gradient; } } template <> void TileGrad<float, CUDAContext>( const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* dy, float* dx, CUDAContext* ctx) { _TileGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, ex_inner_dim, multiple, dy, dx); } /******************** ndarray.transpose ********************/ template <typename T> __global__ void _Transpose( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } y[idx] = x[x_idx]; } } template <> void Transpose<float, CUDAContext>( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* x, float* y, CUDAContext* ctx) { _Transpose<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, ndim, order, old_steps, new_steps, x, y); } template <typename T> __global__ void _TransposeGrad( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } dx[x_idx] = dy[idx]; } } template <> void TransposeGrad<float, CUDAContext>( const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* dy, float* dx, CUDAContext* ctx) { _TransposeGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, ndim, order, old_steps, new_steps, dy, dx); } /******************** recurrent.lstm_cell ********************/ template <typename T> __global__ void _LSTMCellAct( const int count, const int c_offset, const int x_offset, T* xact) { CUDA_1D_KERNEL_LOOP(idx, count) { const int offset = idx % x_offset; xact[idx] = offset < c_offset ? _SigmoidUnit<float>(xact[idx]) : tanh(xact[idx]); } } template <typename T> __global__ void _LSTMCellGate( const int count, const int hidden_size, const int o_offset, // 2 * hidden_size const int c_offset, // 3 * hidden_size const int x_offset, // 4 * hidden_size const T* cx, const T* xact, T* c, T* h) { CUDA_1D_KERNEL_LOOP(idx, count) { const int n = idx / hidden_size; const int offset = idx % hidden_size; const T* x = xact + n * x_offset; const T i = x[offset]; const T f = x[offset + hidden_size]; const T o = x[offset + o_offset]; T c_ = x[offset + c_offset]; c_ = c[idx] = f * cx[idx] + i * c_; h[idx] = o * tanh(c_); } } template <> void LSTMCell<float, CUDAContext>( const int count, const int N, const int C, const float* cx, float* xact, float* c, float* h, CUDAContext* ctx) { const int o_offset = 2 * C, c_offset = 3 * C, x_offset = 4 * C; _LSTMCellAct<float> << < CUDA_BLOCKS(count * 4), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count * 4, c_offset, x_offset, xact); _LSTMCellGate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, o_offset, c_offset, x_offset, cx, xact, c, h); } template <typename T> __global__ void _LSTMCellGateGrad( const int count, const int hidden_size, const int o_offset, const int c_offset, const int x_offset, const T* cx, const T* xact, const T* c, const T* dc, const T* dh, T* dcx, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int n = idx / hidden_size; const int offset = idx % hidden_size; const T* xact_ = xact + n * x_offset; T* dx_ = dx + n * x_offset; const T i = xact_[offset]; const T f = xact_[offset + hidden_size]; const T o = xact_[offset + o_offset]; const T g = xact_[offset + c_offset]; const T tanh_c = tanh(c[idx]); const T dcx_sum_term = dh[idx] * o * (1 - tanh_c * tanh_c) + dc[idx]; dcx[idx] = dcx_sum_term * f; dx_[offset] = dcx_sum_term * g; dx_[offset + hidden_size] = dcx_sum_term * cx[idx]; dx_[offset + o_offset] = dh[idx] * tanh_c; dx_[offset + c_offset] = dcx_sum_term * i; } } template <typename T> __global__ void _LSTMCellActGrad( const int count, const int c_offset, const int x_offset, const T* xact, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int offset = idx % x_offset; const T val = xact[idx]; if (offset < c_offset) dx[idx] = dx[idx] * val * (T(1) - val); else dx[idx] = dx[idx] * (T(1) - val * val); } } template <> void LSTMCellGrad<float, CUDAContext>( const int count, const int N, const int C, const float* cx, const float* xact, const float* c, const float* dc, const float* dh, float* dcx, float* dx, CUDAContext* ctx) { const int o_offset = 2 * C, c_offset = 3 * C, x_offset = 4 * C; _LSTMCellGateGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, o_offset, c_offset, x_offset, cx, xact, c, dc, dh, dcx, dx); _LSTMCellActGrad<float> << < CUDA_BLOCKS(count * 4), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count * 4, c_offset, x_offset, xact, dx); } /******************** update.adam_update ********************/ template <typename T> __global__ void _AdamUpdate( const int count, const T lr, const T beta1, const T beta2, const T eps, T* g, T* m, T* v) { CUDA_1D_KERNEL_LOOP(i, count) { T gi = g[i]; T mi = m[i] = m[i] * beta1 + gi * (1 - beta1); T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); g[i] = lr * mi / (sqrt(vi) + eps); } } template <> void AdamUpdate<float, CUDAContext>( const int count, const float lr, const float beta1, const float beta2, const float eps, float* g, float* m, float* v, CUDAContext* ctx) { _AdamUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, lr, beta1, beta2, eps, g, m, v); } /******************** update.nesterov_update ********************/ template <typename T> __global__ void _NesterovUpdate( const int count, const T lr, const T momentum, T* g, T* h) { CUDA_1D_KERNEL_LOOP(i, count) { T hi = h[i]; T hi_new = h[i] = momentum * hi + lr * g[i]; g[i] = (1 + momentum) * hi_new - momentum * hi; } } template <> void NesterovUpdate<float, CUDAContext>( const int count, const float lr, const float momentum, float* g, float* h, CUDAContext* ctx) { _NesterovUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, lr, momentum, g, h); } /******************** update.rmsprop_update ********************/ template <typename T> __global__ void _RMSPropUpdate( const int count, const T lr, const T decay, const T eps, T* g, T* h) { CUDA_1D_KERNEL_LOOP(i, count) { T gi = g[i]; T hi = h[i] = decay * h[i] + (1 - decay) * gi * gi; g[i] = lr * g[i] / (sqrt(hi) + eps); } } template <> void RMSPropUpdate<float, CUDAContext>( const int count, const float lr, const float decay, const float eps, float* g, float* h, CUDAContext* ctx) { _RMSPropUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, lr, decay, eps, g, h); } /******************** update.sgd_update ********************/ template <typename T> __global__ void _SGDUpdate( const int count, const T lr, const T momentum, T* g, T* h) { CUDA_1D_KERNEL_LOOP(i, count) { T hi = h[i]; g[i] = h[i] = momentum * hi + lr * g[i]; } } template <> void SGDUpdate<float, CUDAContext>( const int count, const float lr, const float momentum, float* g, float* h, CUDAContext* ctx) { _SGDUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, lr, momentum, g, h); } /******************** vision.bias_add ********************/ template <typename T> __global__ void _BiasAdd_NCHW( const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int bias_idx = (idx / inner_dim) % dim; y[idx] += bias[bias_idx]; } } template <typename T> __global__ void _BiasAdd_NHWC( const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { y[idx] += bias[idx % dim]; } } template<> void BiasAdd<float, CUDAContext>( const int count, const int outer_dim, const int dim, const int inner_dim, const string& data_format, const float* bias, const float* bias_multiplier, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _BiasAdd_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, dim, inner_dim, bias, y); } else if (data_format == "NHWC") { _BiasAdd_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, dim, inner_dim, bias, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.bilinear_resize ********************/ template <typename T> __global__ void _BilinearResize_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float top_left(x[NCHT * W + left_x_idx]); const float top_right(x[NCHT * W + right_x_idx]); const float bottom_left(x[NCHB * W + left_x_idx]); const float bottom_right(x[NCHB * W + right_x_idx]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <typename T> __global__ void _BilinearResize_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float top_left(x[(NHT * W + left_x_idx) * C + c]); const float top_right(x[(NHT * W + right_x_idx) * C + c]); const float bottom_left(x[(NHB * W + left_x_idx) * C + c]); const float bottom_right(x[(NHB * W + right_x_idx) * C + c]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <> void BilinearResize<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _BilinearResize_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _BilinearResize_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <typename T> __global__ void _BilinearResizeGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[NCHT * W + left_x_idx], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[NCHT * W + right_x_idx], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[NCHB * W + left_x_idx], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[NCHB * W + right_x_idx], static_cast<T>(x_lerp * dbottom)); } } template <typename T> __global__ void _BilinearResizeGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[(NHT * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[(NHT * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[(NHB * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[(NHB * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dbottom)); } } template <> void BilinearResizeGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _BilinearResizeGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _BilinearResizeGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.conv ********************/ template<typename T> __global__ void _Im2Col2d_NCHW( const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % col_w; const int h_idx = idx / col_w; const int h = h_idx % col_h; const int im_c = h_idx / col_h; const int c = im_c * kernel_h * kernel_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; T* col_ptr = col; col_ptr += ((c * col_h + h) * col_w + w); const T* im_ptr = im; im_ptr += ((im_c * H + im_h_off) * W + im_w_off); for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; *col_ptr = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im_ptr[kh * dilation_h * W + kw * dilation_w] : 0; col_ptr += (col_h * col_w); } } } } template<typename T> __global__ void _Im2Col2d_NHWC( const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % col_w; const int h = idx / C / col_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; const int base_col_idx = (h * col_w) + w; for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; const int col_idx = ( ((base_col_idx * kernel_h + kh) * kernel_w + kw) * C + c ); col[col_idx] = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im[(im_h * W + im_w) * C + c] : 0; } } } } template <> void Im2Col2d<float, CUDAContext>( const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* im, float* col, CUDAContext* ctx) { if (data_format == "NCHW") { const int count = (C * col_h * col_w); _Im2Col2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else if (data_format == "NHWC") { const int count = (col_h * col_w * C); _Im2Col2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _Col2Im2d_NCHW( const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_1D_KERNEL_LOOP(idx, count) { T val = 0; const int im_w = idx % W + pad_w; const int im_h = (idx / W) % H + pad_h; const int im_c = idx / W / H; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (( (im_c * kernel_h + kh_off) * kernel_w + kw_off) * col_h + h ) * col_w + w; val += col[col_idx]; } } } im[idx] = val; } } template<typename T> __global__ void _Col2Im2d_NHWC( const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_1D_KERNEL_LOOP(idx, count) { T val = 0; const int im_c = idx % C; const int im_w = (idx / C) % W + pad_w; const int im_h = (idx / C / W) + pad_h; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = ( ((h * col_w + w) * kernel_h + kh_off) * kernel_w + kw_off ) * C + im_c; val += col[col_idx]; } } } im[idx] = val; } } template <> void Col2Im2d<float, CUDAContext>( const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* col, float* im, CUDAContext* ctx) { if (data_format == "NCHW") { const int count = (C * H * W); _Col2Im2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else if (data_format == "NHWC") { const int count = (H * W * C); _Col2Im2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.nn_resize ********************/ template <typename T> __global__ void _NNResize_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * C + c) * H + h_in) * W + w_in]; } } template <typename T> __global__ void _NNResize_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * H + h_in) * W + w_in) * C + c]; } } template <> void NNResize<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _NNResize_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _NNResize_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template <typename T> __global__ void _NNResizeGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[idx]); } } template <typename T> __global__ void _NNResizeGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[idx]); } } template <> void NNResizeGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx, CUDAContext* ctx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _NNResizeGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _NNResizeGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >( count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.pooling ********************/ template<typename T> __global__ void _MAXPooling2d_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; const T* x_ptr = x + (pn * C + pc) * H * W; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { if (x_ptr[h * W + w] > max_val) { max_idx = h * W + w; max_val = x_ptr[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<typename T> __global__ void _MAXPooling2d_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { const int x_idx = ((pn * H + h) * W + w) * C + pc; if (x[x_idx] > max_val) { max_idx = x_idx; max_val = x[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void MAXPooling2d<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, int* mask, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _MAXPooling2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else if (data_format == "NHWC") { _MAXPooling2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _AVGPooling2d_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const T* x_ptr = x + (pn * C + pc) * H * W; const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { avg_val += x_ptr[h * W + w]; } } y[idx] = avg_val / pool_area; } } template<typename T> __global__ void _AVGPooling2d_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) for (int w = start_w; w < end_w; ++w) avg_val += x[((pn * H + h) * W + w) * C + pc]; y[idx] = avg_val / pool_area; } } template<> void AVGPooling2d<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, float* y, CUDAContext* ctx) { if (data_format == "NCHW") { _AVGPooling2d_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else if (data_format == "NHWC") { _AVGPooling2d_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _MAXPooling2dGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; const int offset = (n * C + c) * pool_h * pool_w; const T* dy_ptr = dy + offset; const int* mask_ptr = mask + offset; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { if (mask_ptr[ph * pool_w + pw] == (h * W + w)) { grad += dy_ptr[ph * pool_w + pw]; } } } dx[idx] = grad; } } template<typename T> __global__ void _MAXPooling2dGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { const int x_idx = ((n * H + h) * W + w) * C + c; const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; if (mask[y_idx] == x_idx) grad += dy[y_idx]; } } dx[idx] = grad; } } template<> void MAXPooling2dGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, const int* mask, float* dx, CUDAContext* ctx) { if (data_format == "NCHW") { _MAXPooling2dGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy,mask, dx); } else if (data_format == "NHWC") { _MAXPooling2dGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } template<typename T> __global__ void _AVGPooling2dGrad_NCHW( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; const T* dy_ptr = dy + (n * C + c) * pool_h * pool_w; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); grad += (dy_ptr[ph * pool_w + pw] / pool_area); } } dx[idx] = grad; } } template<typename T> __global__ void _AVGPooling2dGrad_NHWC( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; grad += (dy[y_idx] / pool_area); } } dx[idx] = grad; } } template<> void AVGPooling2dGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, float* dx, CUDAContext* ctx) { if (data_format == "NCHW") { _AVGPooling2dGrad_NCHW<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else if (data_format == "NHWC") { _AVGPooling2dGrad_NHWC<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** vision.roi_pooling ********************/ template <typename T> __global__ void _ROIPooling( const int count, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* x, const T* rois, int* mask, T* y) { CUDA_1D_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) { y[idx] = 0; mask[idx] = -1; continue; } int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); const T bin_size_h = (T)roi_height / (T)pool_h; const T bin_size_w = (T)roi_width / (T)pool_w; int hstart = floor(bin_size_h * ph); int wstart = floor(bin_size_w * pw); int hend = ceil(bin_size_h * (ph + 1)); int wend = ceil(bin_size_w * (pw + 1)); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); float max_val = is_empty ? 0 : -FLT_MAX; int max_idx = -1; x += ((roi_batch_ind * channels + c) * height * width); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int x_idx = h * width + w; if (x[x_idx] > max_val) { max_val = x[x_idx]; max_idx = x_idx; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void ROIPooling<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const float* x, const float* rois, int* mask, float* y, CUDAContext* ctx) { _ROIPooling<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, spatial_scale, C, H, W, pool_h, pool_w, x, rois, mask, y); } template <typename T> __global__ void _ROIPoolingGrad( const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* dy, const T* rois, const int* mask, T* dx) { CUDA_1D_KERNEL_LOOP(idx, count) { int w = idx % width; int h = (idx / width) % height; int c = (idx / width / height) % channels; int n = idx / width / height / channels; T gradient = 0; for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const T* offset_rois = rois + roi_n * 5; int roi_batch_ind = offset_rois[0]; if (n != roi_batch_ind) continue; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) continue; int y_offset = (roi_n * channels + c) * pool_h * pool_w; const T* offset_dy = dy + y_offset; const int* offset_mask = mask + y_offset; int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); const T bin_size_h = (T)roi_height / (T)pool_h; const T bin_size_w = (T)roi_width / (T)pool_w; int phstart = floor(static_cast<T>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<T>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<T>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<T>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pool_h); phend = min(max(phend, 0), pool_h); pwstart = min(max(pwstart, 0), pool_w); pwend = min(max(pwend, 0), pool_w); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int pool_idx = ph * pool_w + pw; if (offset_mask[pool_idx] == (h * width + w)) { gradient += offset_dy[pool_idx]; } } } } dx[idx] = gradient; } } template<> void ROIPoolingGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const float* dy, const float* rois, const int* mask, float* dx, CUDAContext* ctx) { _ROIPoolingGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, num_rois, spatial_scale, C, H, W, pool_h, pool_w, dy, rois, mask, dx); } /******************** vision.roi_align ********************/ template <typename T> __device__ T _ROIAlignInterpolate( const T* Xdata, const int height, const int width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) return 0; if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; T v1 = Xdata[y_low * width + x_low]; T v2 = Xdata[y_low * width + x_high]; T v3 = Xdata[y_high * width + x_low]; T v4 = Xdata[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void _ROIAlign( const int count, const float spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const int sampling_ratio, const T* Xdata, const T* rois, T* Ydata) { CUDA_1D_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) { Ydata[idx] = 0; continue; } T roi_start_w = offset_rois[1] * spatial_scale; T roi_start_h = offset_rois[2] * spatial_scale; T roi_end_w = offset_rois[3] * spatial_scale; T roi_end_h = offset_rois[4] * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); const T* offset_Xdata = Xdata +(roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pool_h); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pool_w); const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w; T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = _ROIAlignInterpolate(offset_Xdata, height, width, y, x); output_val += val; } } output_val /= num_bin_grids; Ydata[idx] = output_val; } } template<> void ROIAlign<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const int sampling_ratio, const float* x, const float* rois, float* y, CUDAContext* ctx) { _ROIAlign<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, spatial_scale, C, H, W, pool_h, pool_w, sampling_ratio, x, rois, y); } template <typename T> __device__ void _ROIAlignInterpolateGrad( const int height, const int width, T y, T x, T& w1, T& w2, T& w3, T& w4, int& x_low, int& x_high, int& y_low, int& y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void _ROIAlignGrad( const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const int sampling_ratio, const T* dYdata, const T* rois, T* dXdata) { CUDA_1D_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) continue; T roi_start_w = offset_rois[1] * spatial_scale; T roi_start_h = offset_rois[2] * spatial_scale; T roi_end_w = offset_rois[3] * spatial_scale; T roi_end_h = offset_rois[4] * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); T* offset_dXdata = dXdata + (roi_batch_ind * channels + c) * height * width; int y_offset = (n * channels + c) * pool_h * pool_w; const T* offset_dYdata = dYdata + y_offset; const T dYdata_this_bin = offset_dYdata[ph * pool_w + pw]; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pool_h); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pool_w); const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; _ROIAlignInterpolateGrad( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); T g1 = dYdata_this_bin * w1 / num_bin_grids; T g2 = dYdata_this_bin * w2 / num_bin_grids; T g3 = dYdata_this_bin * w3 / num_bin_grids; T g4 = dYdata_this_bin * w4 / num_bin_grids; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd( offset_dXdata + y_low * width + x_low, static_cast<T>(g1)); atomicAdd( offset_dXdata + y_low * width + x_high, static_cast<T>(g2)); atomicAdd( offset_dXdata + y_high * width + x_low, static_cast<T>(g3)); atomicAdd( offset_dXdata + y_high * width + x_high, static_cast<T>(g4)); } } } } } template<> void ROIAlignGrad<float, CUDAContext>( const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int num_rois, const float spatial_scale, const int sampling_ratio, const float* dy, const float* rois, float* dx, CUDAContext* ctx) { _ROIAlignGrad<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> >(count, num_rois, spatial_scale, C, H, W, pool_h, pool_w, sampling_ratio, dy, rois, dx); } } // namespace kernel } // namespace dragon #endif // WITH_CUDA
bc3d4c93310ff2447c6fd8ba4758c2f2dca46e80.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <sys/time.h> #include <hip/hip_runtime.h> #include "MatUtil.h" #include "debug.h" #if (APSP_VER == 1) #include "Floyd.h" #elif (APSP_VER == 2) #include "Floyd_coa.h" #elif (APSP_VER == 3) #include "Floyd_sm.h" #else #include "Floyd_blk.h" #endif using namespace std; int main(int argc, char **argv) { struct timeval tv1, tv2; if (argc != 2) { cout << "Usage: test {N}" << endl; exit(-1); } //generate a random matrix. size_t N = atoi(argv[1]); /*if (N % TILE_WIDTH != 0)*/ /*{*/ /*cout << "The problem size must be divisible by " << TILE_WIDTH << endl;*/ /*exit(-1);*/ /*}*/ int *mat = (int*)malloc(sizeof(int)*N*N); GenMatrix(mat, N); //compute the reference result. #ifndef PROFILING int *ref = (int*)malloc(sizeof(int)*N*N); memcpy(ref, mat, sizeof(int)*N*N); gettimeofday(&tv1, NULL); ST_APSP(ref, N); gettimeofday(&tv2, NULL); long sequentialtime = (tv2.tv_sec - tv1.tv_sec)*1000000 + tv2.tv_usec - tv1.tv_usec; cout << "Elapsed time (sequential) = " << sequentialtime << " usecs" << endl; #endif //compute your results int *result = (int*)malloc(sizeof(int)*N*N); memcpy(result, mat, sizeof(int)*N*N); //replace by parallel algorithm gettimeofday(&tv1, NULL); Floyd_Warshall(result, N); gettimeofday(&tv2, NULL); long paralleltime = (tv2.tv_sec - tv1.tv_sec)*1000000 + tv2.tv_usec - tv1.tv_usec; cout << "Elapsed time (parallel) = " << paralleltime << " usecs" << endl; #ifndef PROFILING cout << "Speed up = " << (double)sequentialtime/paralleltime << endl; //compare your result with reference result if(CmpArray(result, ref, N*N)) cout << "Your result is correct." << endl; else cout << "Your result is wrong." << endl; #endif }
bc3d4c93310ff2447c6fd8ba4758c2f2dca46e80.cu
#include <iostream> #include <cstdlib> #include <sys/time.h> #include <cuda_runtime.h> #include "MatUtil.h" #include "debug.h" #if (APSP_VER == 1) #include "Floyd.h" #elif (APSP_VER == 2) #include "Floyd_coa.h" #elif (APSP_VER == 3) #include "Floyd_sm.h" #else #include "Floyd_blk.h" #endif using namespace std; int main(int argc, char **argv) { struct timeval tv1, tv2; if (argc != 2) { cout << "Usage: test {N}" << endl; exit(-1); } //generate a random matrix. size_t N = atoi(argv[1]); /*if (N % TILE_WIDTH != 0)*/ /*{*/ /*cout << "The problem size must be divisible by " << TILE_WIDTH << endl;*/ /*exit(-1);*/ /*}*/ int *mat = (int*)malloc(sizeof(int)*N*N); GenMatrix(mat, N); //compute the reference result. #ifndef PROFILING int *ref = (int*)malloc(sizeof(int)*N*N); memcpy(ref, mat, sizeof(int)*N*N); gettimeofday(&tv1, NULL); ST_APSP(ref, N); gettimeofday(&tv2, NULL); long sequentialtime = (tv2.tv_sec - tv1.tv_sec)*1000000 + tv2.tv_usec - tv1.tv_usec; cout << "Elapsed time (sequential) = " << sequentialtime << " usecs" << endl; #endif //compute your results int *result = (int*)malloc(sizeof(int)*N*N); memcpy(result, mat, sizeof(int)*N*N); //replace by parallel algorithm gettimeofday(&tv1, NULL); Floyd_Warshall(result, N); gettimeofday(&tv2, NULL); long paralleltime = (tv2.tv_sec - tv1.tv_sec)*1000000 + tv2.tv_usec - tv1.tv_usec; cout << "Elapsed time (parallel) = " << paralleltime << " usecs" << endl; #ifndef PROFILING cout << "Speed up = " << (double)sequentialtime/paralleltime << endl; //compare your result with reference result if(CmpArray(result, ref, N*N)) cout << "Your result is correct." << endl; else cout << "Your result is wrong." << endl; #endif }
33b105c630bb37129a6556f3b755f26d1b4a939b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void MyKernel(int *a, int *b, int *c, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } }
33b105c630bb37129a6556f3b755f26d1b4a939b.cu
#include "includes.h" __global__ void MyKernel(int *a, int *b, int *c, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } }
f09889a37f4aebcec1627137efa3320088d93f16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialUpSamplingNearest.cu" #else #include "../common.h" static inline void THNN_(SpatialUpSamplingNearest_shapeCheck) (THCState *state,THCTensor *input, THCTensor *gradOutput, int scale_factor) { THArgCheck(input != NULL, 2, "4D input tensor expected but got NULL"); THArgCheck(scale_factor > 1, 4, "scale_factor must be greater than 1, but got: %d", scale_factor); THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D input tensor expected but got: %s"); if (input->dim() == 3) { int nChannels = THCTensor_(size)(state, input, 0); int inputHeight = THCTensor_(size)(state, input, 1); int inputWidth = THCTensor_(size)(state, input, 2); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 3, 0, nChannels); THCUNN_check_dim_size(state, gradOutput, 3, 1, outputHeight); THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth); } } else { int nBatch = THCTensor_(size)(state, input, 0); int nChannels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } } void THNN_(SpatialUpSamplingNearest_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int scale_factor) { THCTensor_(zero)(state, output); THCUNN_assertSameGPU(state, 2, input, output); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, NULL, scale_factor); int inputHeight = THCTensor_(size)(state, input, input->dim()-2); int inputWidth = THCTensor_(size)(state, input, input->dim()-1); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (input->dim() == 3) { THCTensor_(resize3d)(state, output, THCTensor_(size)(state, input, 0), outputHeight, outputWidth); } else { THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); } input = THCTensor_(newContiguous)(state, input); // This is for allocating output Tensor int64_t no_elements = 1; for(int i = 0; i < input->dim(); i++){ no_elements *= input->size[i]; } no_elements *= scale_factor * scale_factor; int d1; int d2; int d3; if (input->dim() == 3) { d1 = output->size[0]; d2 = output->size[1]; d3 = output->size[2]; } else { d1 = output->size[1]; d2 = output->size[2]; d3 = output->size[3]; } real *input_data = THCTensor_(data)(state, input); real *output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int64_t nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this int64_t n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); int64_t n_yblocks = (int64_t)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: hipLaunchKernelGGL(( upscale), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(hipGetLastError()); // final cut: THCTensor_(free)(state, input); } void THNN_(SpatialUpSamplingNearest_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int scale_factor) { THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, gradOutput, scale_factor); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); real *gradInput_data = THCTensor_(data)(state, gradInput); real *gradOutput_data = THCTensor_(data)(state, gradOutput); int64_t no_elements = 1; for(int i = 0; i < gradInput->dim(); i++){ no_elements *= gradInput->size[i]; } int d1; int d2; int d3; if (gradInput->dim() == 3) { d1 = gradInput->size[0]; d2 = gradInput->size[1]; d3 = gradInput->size[2]; } else { d1 = gradInput->size[1]; d2 = gradInput->size[2]; d3 = gradInput->size[3]; } // cuda blocks & threads: int64_t nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this int64_t n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); int64_t n_yblocks = (int64_t)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: hipLaunchKernelGGL(( downscale<real ,accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
f09889a37f4aebcec1627137efa3320088d93f16.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialUpSamplingNearest.cu" #else #include "../common.h" static inline void THNN_(SpatialUpSamplingNearest_shapeCheck) (THCState *state,THCTensor *input, THCTensor *gradOutput, int scale_factor) { THArgCheck(input != NULL, 2, "4D input tensor expected but got NULL"); THArgCheck(scale_factor > 1, 4, "scale_factor must be greater than 1, but got: %d", scale_factor); THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 3 || input->dim() == 4), 2, input, "non-empty 3D or 4D input tensor expected but got: %s"); if (input->dim() == 3) { int nChannels = THCTensor_(size)(state, input, 0); int inputHeight = THCTensor_(size)(state, input, 1); int inputWidth = THCTensor_(size)(state, input, 2); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 3, 0, nChannels); THCUNN_check_dim_size(state, gradOutput, 3, 1, outputHeight); THCUNN_check_dim_size(state, gradOutput, 3, 2, outputWidth); } } else { int nBatch = THCTensor_(size)(state, input, 0); int nChannels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } } void THNN_(SpatialUpSamplingNearest_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int scale_factor) { THCTensor_(zero)(state, output); THCUNN_assertSameGPU(state, 2, input, output); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, NULL, scale_factor); int inputHeight = THCTensor_(size)(state, input, input->dim()-2); int inputWidth = THCTensor_(size)(state, input, input->dim()-1); int outputHeight = inputHeight * scale_factor; int outputWidth = inputWidth * scale_factor; if (input->dim() == 3) { THCTensor_(resize3d)(state, output, THCTensor_(size)(state, input, 0), outputHeight, outputWidth); } else { THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); } input = THCTensor_(newContiguous)(state, input); // This is for allocating output Tensor int64_t no_elements = 1; for(int i = 0; i < input->dim(); i++){ no_elements *= input->size[i]; } no_elements *= scale_factor * scale_factor; int d1; int d2; int d3; if (input->dim() == 3) { d1 = output->size[0]; d2 = output->size[1]; d3 = output->size[2]; } else { d1 = output->size[1]; d2 = output->size[2]; d3 = output->size[3]; } real *input_data = THCTensor_(data)(state, input); real *output_data = THCTensor_(data)(state, output); // cuda blocks & threads: int64_t nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this int64_t n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); int64_t n_yblocks = (int64_t)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: upscale<<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (input_data, output_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(cudaGetLastError()); // final cut: THCTensor_(free)(state, input); } void THNN_(SpatialUpSamplingNearest_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int scale_factor) { THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THNN_(SpatialUpSamplingNearest_shapeCheck)(state, input, gradOutput, scale_factor); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); real *gradInput_data = THCTensor_(data)(state, gradInput); real *gradOutput_data = THCTensor_(data)(state, gradOutput); int64_t no_elements = 1; for(int i = 0; i < gradInput->dim(); i++){ no_elements *= gradInput->size[i]; } int d1; int d2; int d3; if (gradInput->dim() == 3) { d1 = gradInput->size[0]; d2 = gradInput->size[1]; d3 = gradInput->size[2]; } else { d1 = gradInput->size[1]; d2 = gradInput->size[2]; d3 = gradInput->size[3]; } // cuda blocks & threads: int64_t nthreads = 256; // Max number of blocks: http://en.wikipedia.org/wiki/CUDA // 65535 for SM 2.x, 2^32 -1 for >= 3.0 // TODO: When we move to SM 3.5 we should update this int64_t n_xblocks = min(max((int)ceil((float)no_elements / nthreads), 1), 65535); int64_t n_yblocks = (int64_t)ceil((float)no_elements / (float)(n_xblocks * nthreads)); if (n_yblocks > 65535) { THError("Input size is too large! aborting"); } dim3 blocks(n_xblocks, n_yblocks); dim3 threads(nthreads); // kernel: downscale<real ,accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (gradInput_data, gradOutput_data, no_elements, scale_factor, d1, d2, d3); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
8dbda6230a8bc64fe2f8935291a02304b6b4dedf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cmath> #include <assert.h> #include <time.h> #include "init.h" /// Compute squared Euclidean distance transform for each column. /** Two scans are performed to find squared distance to the closest pixel in the column, which is stored in shared memoty. \param[in] src Source array with 8bit binary image. \param[out] out Output int array with squared Euclidean distance transform in columns. \param[in] sizeRow Image widht \param[in] sizeCol Image height \return void */ __global__ void computeCol(BYTE* src, int* out, int sizeRow, int sizeCol) { extern __shared__ int imgCol []; // allocates shared memory int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y; int untilPixel = min(x + sizeRow/blockDim.x, sizeCol); int row, rowi; int d; int value; for (row = threadIdx.x; row < sizeCol; row += blockDim.x) { imgCol[row] = src[row*sizeRow+y]; // copy column to shared memory } __syncthreads(); for(row = x; row < untilPixel; row += blockDim.x) { value = imgCol[row]; if(value != 0) { value = sizeRow*sizeRow + sizeCol*sizeCol; d = 1; for(rowi = 1; rowi < sizeCol - row; rowi++) { // scan 1 if(imgCol[row + rowi] == 0) value = min(value, d); d += 1 + 2 * rowi; if(d > value) break; } d = 1; for(rowi = 1; rowi <= row; rowi++) { // scan 2 if(imgCol[row - rowi] == 0) value = min(value, d); d += 1 + 2 * rowi; if(d > value) break; } } out[row * sizeRow + y] = value; } } /// Compute squared Euclidean distance transform for each row. /** Two scans are performed to find squared distance to the closest pixel in the row, which is stored in shared memoty. \param[in] out Output int array with squared Euclidean distance transform in columns. \param[out] res Output float array with exact Euclidean distance transform. \param[in] sizeRow Image widht \param[in] sizeCol Image height \return void */ __global__ void computeRow(int* out, float* res, int sizeRow, int sizeCol) { extern __shared__ int imgRow[]; // allocates shared memory int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * sizeRow; int untilPixel = min(x + sizeRow/blockDim.x, sizeRow); int col, coli; int value; int d; for(col = threadIdx.x; col < sizeRow; col += blockDim.x) { imgRow[col] = out[y + col]; // copy rows to shared memory } __syncthreads(); for(col = x; col < untilPixel; col += blockDim.x) { value = imgRow[col]; if(value != 0) { d = 1; for(coli = 1; coli < sizeRow - col; coli++) { // scan 1 value = min(value, imgRow[col + coli] + d); d += 1 + 2 * coli; if(d > value) break; } d = 1; for(coli = 1; coli <= col; coli++) { // scan 2 value = min(value, imgRow[col - coli] + d); d += 1 + 2 * coli; if(d > value) break; } } res[y + col] = sqrt((double)value); } } /// Compute exact Euclidean distance transform on GPU. /** The computation is split into two phases -- for rows and columns. In each phase, two scans are performed to find squared distance to the closest pixel in the row / column. Then, the squared root is taken, \param[in] diffData Source array with 8bit binary image. \param[out] dtData Output float array with Euclidean distance transform values. \param[in] w Image widht \param[in] h Image height \return void */ void gpuDTfast(const BYTE *diffData, float *dtData, int w, int h) { /// Maximal number of threads per row/column (can be changed if w/h is lower) int MAXTH = 1024; int ARRAY_SIZE = w*h; BYTE *devSrc; int *devTemp; float *devOut; hipMalloc((void **) &devSrc, ARRAY_SIZE * sizeof(BYTE)); hipMalloc((void **) &devTemp, ARRAY_SIZE * sizeof(int)); hipMalloc((void **) &devOut, ARRAY_SIZE * sizeof(float)); hipMemcpy(devSrc, diffData, ARRAY_SIZE*sizeof(BYTE), hipMemcpyHostToDevice); int TH = MAXTH; if(h < TH) TH = h; int DH = (int) ceil(h/(float)TH); dim3 dimGrid(DH, w, 1); hipLaunchKernelGGL(( computeCol), dim3(dimGrid), dim3(TH), h*sizeof(int), 0, devSrc, devTemp, w, h); hipDeviceSynchronize(); CHECK_ERROR(hipGetLastError()); int TW = MAXTH; if(w < TW) TW = w; int DW = (int) ceil(w/(float)TW); dim3 dimGridr(DW, h, 1); hipLaunchKernelGGL(( computeRow), dim3(dimGridr), dim3(TW), w*sizeof(int), 0, devTemp, devOut, w, h); hipDeviceSynchronize(); CHECK_ERROR(hipGetLastError()); hipMemcpy(dtData, devOut, ARRAY_SIZE * sizeof(float), hipMemcpyDeviceToHost ); CHECK_ERROR(hipGetLastError()); hipFree(devSrc); hipFree(devTemp); hipFree(devOut); }
8dbda6230a8bc64fe2f8935291a02304b6b4dedf.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cmath> #include <assert.h> #include <time.h> #include "init.h" /// Compute squared Euclidean distance transform for each column. /** Two scans are performed to find squared distance to the closest pixel in the column, which is stored in shared memoty. \param[in] src Source array with 8bit binary image. \param[out] out Output int array with squared Euclidean distance transform in columns. \param[in] sizeRow Image widht \param[in] sizeCol Image height \return void */ __global__ void computeCol(BYTE* src, int* out, int sizeRow, int sizeCol) { extern __shared__ int imgCol []; // allocates shared memory int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y; int untilPixel = min(x + sizeRow/blockDim.x, sizeCol); int row, rowi; int d; int value; for (row = threadIdx.x; row < sizeCol; row += blockDim.x) { imgCol[row] = src[row*sizeRow+y]; // copy column to shared memory } __syncthreads(); for(row = x; row < untilPixel; row += blockDim.x) { value = imgCol[row]; if(value != 0) { value = sizeRow*sizeRow + sizeCol*sizeCol; d = 1; for(rowi = 1; rowi < sizeCol - row; rowi++) { // scan 1 if(imgCol[row + rowi] == 0) value = min(value, d); d += 1 + 2 * rowi; if(d > value) break; } d = 1; for(rowi = 1; rowi <= row; rowi++) { // scan 2 if(imgCol[row - rowi] == 0) value = min(value, d); d += 1 + 2 * rowi; if(d > value) break; } } out[row * sizeRow + y] = value; } } /// Compute squared Euclidean distance transform for each row. /** Two scans are performed to find squared distance to the closest pixel in the row, which is stored in shared memoty. \param[in] out Output int array with squared Euclidean distance transform in columns. \param[out] res Output float array with exact Euclidean distance transform. \param[in] sizeRow Image widht \param[in] sizeCol Image height \return void */ __global__ void computeRow(int* out, float* res, int sizeRow, int sizeCol) { extern __shared__ int imgRow[]; // allocates shared memory int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * sizeRow; int untilPixel = min(x + sizeRow/blockDim.x, sizeRow); int col, coli; int value; int d; for(col = threadIdx.x; col < sizeRow; col += blockDim.x) { imgRow[col] = out[y + col]; // copy rows to shared memory } __syncthreads(); for(col = x; col < untilPixel; col += blockDim.x) { value = imgRow[col]; if(value != 0) { d = 1; for(coli = 1; coli < sizeRow - col; coli++) { // scan 1 value = min(value, imgRow[col + coli] + d); d += 1 + 2 * coli; if(d > value) break; } d = 1; for(coli = 1; coli <= col; coli++) { // scan 2 value = min(value, imgRow[col - coli] + d); d += 1 + 2 * coli; if(d > value) break; } } res[y + col] = sqrt((double)value); } } /// Compute exact Euclidean distance transform on GPU. /** The computation is split into two phases -- for rows and columns. In each phase, two scans are performed to find squared distance to the closest pixel in the row / column. Then, the squared root is taken, \param[in] diffData Source array with 8bit binary image. \param[out] dtData Output float array with Euclidean distance transform values. \param[in] w Image widht \param[in] h Image height \return void */ void gpuDTfast(const BYTE *diffData, float *dtData, int w, int h) { /// Maximal number of threads per row/column (can be changed if w/h is lower) int MAXTH = 1024; int ARRAY_SIZE = w*h; BYTE *devSrc; int *devTemp; float *devOut; cudaMalloc((void **) &devSrc, ARRAY_SIZE * sizeof(BYTE)); cudaMalloc((void **) &devTemp, ARRAY_SIZE * sizeof(int)); cudaMalloc((void **) &devOut, ARRAY_SIZE * sizeof(float)); cudaMemcpy(devSrc, diffData, ARRAY_SIZE*sizeof(BYTE), cudaMemcpyHostToDevice); int TH = MAXTH; if(h < TH) TH = h; int DH = (int) ceil(h/(float)TH); dim3 dimGrid(DH, w, 1); computeCol<<<dimGrid, TH, h*sizeof(int)>>>(devSrc, devTemp, w, h); cudaDeviceSynchronize(); CHECK_ERROR(cudaGetLastError()); int TW = MAXTH; if(w < TW) TW = w; int DW = (int) ceil(w/(float)TW); dim3 dimGridr(DW, h, 1); computeRow<<<dimGridr, TW, w*sizeof(int)>>>(devTemp, devOut, w, h); cudaDeviceSynchronize(); CHECK_ERROR(cudaGetLastError()); cudaMemcpy(dtData, devOut, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost ); CHECK_ERROR(cudaGetLastError()); cudaFree(devSrc); cudaFree(devTemp); cudaFree(devOut); }
797310871c130b5d21839afb208435e3b82562d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(Permute, LAYER_PERMUTE); __global__ void permute_kernel(int n, const float *srcData, int num_axes, int *permute_order, int *old_steps, int *new_steps, float *dstData) { CUDA_KERNEL_LOOP(index, n) { int old_idx = 0; int idx = index; for (int j = 0; j < num_axes; ++j) { int order = permute_order[j]; old_idx += (idx / new_steps[j]) * old_steps[order]; idx %= new_steps[j]; } dstData[index] = srcData[old_idx]; } } Status CudaPermuteLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs); if (ret != TNN_OK) { return ret; } auto params = dynamic_cast<PermuteLayerParam *>(param); if (!params) { return Status(TNNERR_MODEL_ERR, "Error: PermuteLayerParam is empyt"); } Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto input_dims = input_blob->GetBlobDesc().dims; auto output_dims = output_blob->GetBlobDesc().dims; ASSERT(input_dims.size() == output_dims.size()); CreateTempBuf(input_dims.size() * sizeof(int)); CreateTempBuf(input_dims.size() * sizeof(int)); CreateTempBuf(input_dims.size() * sizeof(int)); hipMemcpyAsync(tempbufs_[0].ptr, &(params->orders[0]), input_dims.size() * sizeof(int), hipMemcpyHostToDevice, context_->GetStream()); return TNN_OK; } Status CudaPermuteLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto input_dims = input_blob->GetBlobDesc().dims; auto output_dims = output_blob->GetBlobDesc().dims; std::vector<int> input_step; std::vector<int> output_step; for (int i = 0; i < input_dims.size(); i++) { input_step.push_back(DimsVectorUtils::Count(input_dims, i + 1)); output_step.push_back(DimsVectorUtils::Count(output_dims, i + 1)); } hipMemcpyAsync(tempbufs_[1].ptr, &(input_step[0]), input_dims.size() * sizeof(int), hipMemcpyHostToDevice, context_->GetStream()); hipMemcpyAsync(tempbufs_[2].ptr, &(output_step[0]), input_dims.size() * sizeof(int), hipMemcpyHostToDevice, context_->GetStream()); return TNN_OK; } Status CudaPermuteLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto dims = output_blob->GetBlobDesc().dims; int count = DimsVectorUtils::Count(dims); float* input_data = static_cast<float*>(input_blob->GetHandle().base); float* output_data = static_cast<float*>(output_blob->GetHandle().base); hipLaunchKernelGGL(( permute_kernel), dim3(TNN_CUDA_GET_BLOCKS(count)), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), count, input_data, dims.size(), (int*)tempbufs_[0].ptr, (int*)tempbufs_[1].ptr, (int*)tempbufs_[2].ptr, output_data); return TNN_OK; } REGISTER_CUDA_ACC(Permute, LAYER_PERMUTE); } // namespace TNN_NS
797310871c130b5d21839afb208435e3b82562d8.cu
// Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(Permute, LAYER_PERMUTE); __global__ void permute_kernel(int n, const float *srcData, int num_axes, int *permute_order, int *old_steps, int *new_steps, float *dstData) { CUDA_KERNEL_LOOP(index, n) { int old_idx = 0; int idx = index; for (int j = 0; j < num_axes; ++j) { int order = permute_order[j]; old_idx += (idx / new_steps[j]) * old_steps[order]; idx %= new_steps[j]; } dstData[index] = srcData[old_idx]; } } Status CudaPermuteLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs); if (ret != TNN_OK) { return ret; } auto params = dynamic_cast<PermuteLayerParam *>(param); if (!params) { return Status(TNNERR_MODEL_ERR, "Error: PermuteLayerParam is empyt"); } Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto input_dims = input_blob->GetBlobDesc().dims; auto output_dims = output_blob->GetBlobDesc().dims; ASSERT(input_dims.size() == output_dims.size()); CreateTempBuf(input_dims.size() * sizeof(int)); CreateTempBuf(input_dims.size() * sizeof(int)); CreateTempBuf(input_dims.size() * sizeof(int)); cudaMemcpyAsync(tempbufs_[0].ptr, &(params->orders[0]), input_dims.size() * sizeof(int), cudaMemcpyHostToDevice, context_->GetStream()); return TNN_OK; } Status CudaPermuteLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto input_dims = input_blob->GetBlobDesc().dims; auto output_dims = output_blob->GetBlobDesc().dims; std::vector<int> input_step; std::vector<int> output_step; for (int i = 0; i < input_dims.size(); i++) { input_step.push_back(DimsVectorUtils::Count(input_dims, i + 1)); output_step.push_back(DimsVectorUtils::Count(output_dims, i + 1)); } cudaMemcpyAsync(tempbufs_[1].ptr, &(input_step[0]), input_dims.size() * sizeof(int), cudaMemcpyHostToDevice, context_->GetStream()); cudaMemcpyAsync(tempbufs_[2].ptr, &(output_step[0]), input_dims.size() * sizeof(int), cudaMemcpyHostToDevice, context_->GetStream()); return TNN_OK; } Status CudaPermuteLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; auto dims = output_blob->GetBlobDesc().dims; int count = DimsVectorUtils::Count(dims); float* input_data = static_cast<float*>(input_blob->GetHandle().base); float* output_data = static_cast<float*>(output_blob->GetHandle().base); permute_kernel<<<TNN_CUDA_GET_BLOCKS(count), TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( count, input_data, dims.size(), (int*)tempbufs_[0].ptr, (int*)tempbufs_[1].ptr, (int*)tempbufs_[2].ptr, output_data); return TNN_OK; } REGISTER_CUDA_ACC(Permute, LAYER_PERMUTE); } // namespace TNN_NS
a22f2d52e87d81674fdea8498801186c524e6040.hip
// !!! This is a file automatically generated by hipify!!! #include "che.cuh" __host__ __device__ index_t cu_trig(index_t he) { if(he == NIL) return NIL; return he / che::P; } __host__ __device__ index_t cu_next(index_t he) { if(he == NIL) return NIL; return che::P * cu_trig(he) + (he + 1) % che::P; } __host__ __device__ index_t cu_prev(index_t he) { if(he == NIL) return NIL; return che::P * cu_trig(he) + (he + che::P - 1) % che::P; } void cuda_create_CHE(CHE * h_che, CHE *& dd_che, CHE *& d_che) { dd_che = (CHE *) malloc(sizeof(CHE)); memcpy(dd_che, h_che, sizeof(CHE)); hipMalloc(&dd_che->GT, sizeof(vertex_cu) * h_che->n_vertices); hipMemcpy(dd_che->GT, h_che->GT, sizeof(vertex_cu) * h_che->n_vertices, hipMemcpyHostToDevice); hipMalloc(&dd_che->VT, sizeof(index_t) * h_che->n_half_edges); hipMemcpy(dd_che->VT, h_che->VT, sizeof(index_t) * h_che->n_half_edges, hipMemcpyHostToDevice); hipMalloc(&dd_che->OT, sizeof(index_t) * h_che->n_half_edges); hipMemcpy(dd_che->OT, h_che->OT, sizeof(index_t) * h_che->n_half_edges, hipMemcpyHostToDevice); hipMalloc(&dd_che->EVT, sizeof(index_t) * h_che->n_vertices); hipMemcpy(dd_che->EVT, h_che->EVT, sizeof(index_t) * h_che->n_vertices, hipMemcpyHostToDevice); hipMalloc(&d_che, sizeof(CHE)); hipMemcpy(d_che, dd_che, sizeof(CHE), hipMemcpyHostToDevice); } void cuda_free_CHE(CHE *& dd_che, CHE *& d_che) { if(dd_che->GT) hipFree(dd_che->GT); if(dd_che->VT) hipFree(dd_che->VT); if(dd_che->OT) hipFree(dd_che->OT); if(dd_che->EVT) hipFree(dd_che->EVT); free(dd_che); hipFree(d_che); }
a22f2d52e87d81674fdea8498801186c524e6040.cu
#include "che.cuh" __host__ __device__ index_t cu_trig(index_t he) { if(he == NIL) return NIL; return he / che::P; } __host__ __device__ index_t cu_next(index_t he) { if(he == NIL) return NIL; return che::P * cu_trig(he) + (he + 1) % che::P; } __host__ __device__ index_t cu_prev(index_t he) { if(he == NIL) return NIL; return che::P * cu_trig(he) + (he + che::P - 1) % che::P; } void cuda_create_CHE(CHE * h_che, CHE *& dd_che, CHE *& d_che) { dd_che = (CHE *) malloc(sizeof(CHE)); memcpy(dd_che, h_che, sizeof(CHE)); cudaMalloc(&dd_che->GT, sizeof(vertex_cu) * h_che->n_vertices); cudaMemcpy(dd_che->GT, h_che->GT, sizeof(vertex_cu) * h_che->n_vertices, cudaMemcpyHostToDevice); cudaMalloc(&dd_che->VT, sizeof(index_t) * h_che->n_half_edges); cudaMemcpy(dd_che->VT, h_che->VT, sizeof(index_t) * h_che->n_half_edges, cudaMemcpyHostToDevice); cudaMalloc(&dd_che->OT, sizeof(index_t) * h_che->n_half_edges); cudaMemcpy(dd_che->OT, h_che->OT, sizeof(index_t) * h_che->n_half_edges, cudaMemcpyHostToDevice); cudaMalloc(&dd_che->EVT, sizeof(index_t) * h_che->n_vertices); cudaMemcpy(dd_che->EVT, h_che->EVT, sizeof(index_t) * h_che->n_vertices, cudaMemcpyHostToDevice); cudaMalloc(&d_che, sizeof(CHE)); cudaMemcpy(d_che, dd_che, sizeof(CHE), cudaMemcpyHostToDevice); } void cuda_free_CHE(CHE *& dd_che, CHE *& d_che) { if(dd_che->GT) cudaFree(dd_che->GT); if(dd_che->VT) cudaFree(dd_che->VT); if(dd_che->OT) cudaFree(dd_che->OT); if(dd_che->EVT) cudaFree(dd_che->EVT); free(dd_che); cudaFree(d_che); }
06dec2bd4a9a88be09cb985d1fcf8acba72fd24c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2016, National University of Defense Technology // Author: Xuhao Chen <[email protected]> #include <stdio.h> #define SYMGS_VARIANT "base" #include "symgs.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include "timer.h" __global__ void gs_kernel(int num_rows, int * Ap, int * Aj, int* indices, ValueType * Ax, ValueType * x, ValueType * b) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_rows) { int inew = indices[id]; int row_begin = Ap[inew]; int row_end = Ap[inew+1]; ValueType rsum = 0; ValueType diag = 0; for (int jj = row_begin; jj < row_end; jj++) { const int j = Aj[jj]; //column index if (inew == j) diag = Ax[jj]; else rsum += x[j] * Ax[jj]; } if (diag != 0) x[inew] = (b[inew] - rsum) / diag; } } void gauss_seidel(int *d_Ap, int *d_Aj, int *d_indices, ValueType *d_Ax, ValueType *d_x, ValueType *d_b, int row_start, int row_stop, int row_step) { int num_rows = row_stop - row_start; const size_t NUM_BLOCKS = (num_rows - 1) / BLOCK_SIZE + 1; //printf("num_rows=%d, nblocks=%ld, nthreads=%ld\n", num_rows, NUM_BLOCKS, BLOCK_SIZE); hipLaunchKernelGGL(( gs_kernel), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, num_rows, d_Ap, d_Aj, d_indices+row_start, d_Ax, d_x, d_b); } void SymGSSolver(int num_rows, int nnz, int *h_Ap, int *h_Aj, int *h_indices, ValueType *h_Ax, ValueType *h_x, ValueType *h_b, std::vector<int> color_offsets) { //print_device_info(0); int *d_Ap, *d_Aj, *d_indices; CUDA_SAFE_CALL(hipMalloc((void **)&d_Ap, (num_rows + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_Aj, nnz * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_indices, num_rows * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(d_Ap, h_Ap, (num_rows + 1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_Aj, h_Aj, nnz * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_indices, h_indices, num_rows * sizeof(int), hipMemcpyHostToDevice)); ValueType *d_Ax, *d_x, *d_b; CUDA_SAFE_CALL(hipMalloc((void **)&d_Ax, sizeof(ValueType) * nnz)); CUDA_SAFE_CALL(hipMalloc((void **)&d_x, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(hipMalloc((void **)&d_b, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(hipMemcpy(d_Ax, h_Ax, nnz * sizeof(ValueType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_x, h_x, num_rows * sizeof(ValueType), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_b, h_b, num_rows * sizeof(ValueType), hipMemcpyHostToDevice)); printf("Launching CUDA SymGS solver (%d threads/CTA) ...\n", BLOCK_SIZE); Timer t; t.Start(); //printf("Forward\n"); for(size_t i = 0; i < color_offsets.size()-1; i++) gauss_seidel(d_Ap, d_Aj, d_indices, d_Ax, d_x, d_b, color_offsets[i], color_offsets[i+1], 1); //printf("Backward\n"); for(size_t i = color_offsets.size()-1; i > 0; i--) gauss_seidel(d_Ap, d_Aj, d_indices, d_Ax, d_x, d_b, color_offsets[i-1], color_offsets[i], 1); CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Stop(); printf("\truntime [%s] = %f ms.\n", SYMGS_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(hipMemcpy(h_x, d_x, sizeof(ValueType) * num_rows, hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipFree(d_Ap)); CUDA_SAFE_CALL(hipFree(d_Aj)); CUDA_SAFE_CALL(hipFree(d_indices)); CUDA_SAFE_CALL(hipFree(d_Ax)); CUDA_SAFE_CALL(hipFree(d_x)); CUDA_SAFE_CALL(hipFree(d_b)); }
06dec2bd4a9a88be09cb985d1fcf8acba72fd24c.cu
// Copyright 2016, National University of Defense Technology // Author: Xuhao Chen <[email protected]> #include <stdio.h> #define SYMGS_VARIANT "base" #include "symgs.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include "timer.h" __global__ void gs_kernel(int num_rows, int * Ap, int * Aj, int* indices, ValueType * Ax, ValueType * x, ValueType * b) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_rows) { int inew = indices[id]; int row_begin = Ap[inew]; int row_end = Ap[inew+1]; ValueType rsum = 0; ValueType diag = 0; for (int jj = row_begin; jj < row_end; jj++) { const int j = Aj[jj]; //column index if (inew == j) diag = Ax[jj]; else rsum += x[j] * Ax[jj]; } if (diag != 0) x[inew] = (b[inew] - rsum) / diag; } } void gauss_seidel(int *d_Ap, int *d_Aj, int *d_indices, ValueType *d_Ax, ValueType *d_x, ValueType *d_b, int row_start, int row_stop, int row_step) { int num_rows = row_stop - row_start; const size_t NUM_BLOCKS = (num_rows - 1) / BLOCK_SIZE + 1; //printf("num_rows=%d, nblocks=%ld, nthreads=%ld\n", num_rows, NUM_BLOCKS, BLOCK_SIZE); gs_kernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(num_rows, d_Ap, d_Aj, d_indices+row_start, d_Ax, d_x, d_b); } void SymGSSolver(int num_rows, int nnz, int *h_Ap, int *h_Aj, int *h_indices, ValueType *h_Ax, ValueType *h_x, ValueType *h_b, std::vector<int> color_offsets) { //print_device_info(0); int *d_Ap, *d_Aj, *d_indices; CUDA_SAFE_CALL(cudaMalloc((void **)&d_Ap, (num_rows + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_Aj, nnz * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_indices, num_rows * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(d_Ap, h_Ap, (num_rows + 1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_Aj, h_Aj, nnz * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_indices, h_indices, num_rows * sizeof(int), cudaMemcpyHostToDevice)); ValueType *d_Ax, *d_x, *d_b; CUDA_SAFE_CALL(cudaMalloc((void **)&d_Ax, sizeof(ValueType) * nnz)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_x, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(cudaMalloc((void **)&d_b, sizeof(ValueType) * num_rows)); CUDA_SAFE_CALL(cudaMemcpy(d_Ax, h_Ax, nnz * sizeof(ValueType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_x, h_x, num_rows * sizeof(ValueType), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_b, h_b, num_rows * sizeof(ValueType), cudaMemcpyHostToDevice)); printf("Launching CUDA SymGS solver (%d threads/CTA) ...\n", BLOCK_SIZE); Timer t; t.Start(); //printf("Forward\n"); for(size_t i = 0; i < color_offsets.size()-1; i++) gauss_seidel(d_Ap, d_Aj, d_indices, d_Ax, d_x, d_b, color_offsets[i], color_offsets[i+1], 1); //printf("Backward\n"); for(size_t i = color_offsets.size()-1; i > 0; i--) gauss_seidel(d_Ap, d_Aj, d_indices, d_Ax, d_x, d_b, color_offsets[i-1], color_offsets[i], 1); CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Stop(); printf("\truntime [%s] = %f ms.\n", SYMGS_VARIANT, t.Millisecs()); CUDA_SAFE_CALL(cudaMemcpy(h_x, d_x, sizeof(ValueType) * num_rows, cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaFree(d_Ap)); CUDA_SAFE_CALL(cudaFree(d_Aj)); CUDA_SAFE_CALL(cudaFree(d_indices)); CUDA_SAFE_CALL(cudaFree(d_Ax)); CUDA_SAFE_CALL(cudaFree(d_x)); CUDA_SAFE_CALL(cudaFree(d_b)); }
b547ed57aa7b7b72428ea885803c4f359f70f792.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Modification of Ingemar Ragnemalm "Real Hello World!" program // To compile execute below: // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <stdio.h> #define N 16 #define BLOCK_SIZE 16 #define NUM_BLOCKS N/BLOCK_SIZE #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) /* Declare statically four arrays of ARRAY_SIZE each */ unsigned int cpu_block[ARRAY_SIZE]; __global__ void hello(int * block) { const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[thread_idx] = threadIdx.x; } void main_sub() { /* Declare pointers for GPU based params */ int *gpu_block; hipMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); hipMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); /* Execute our kernel */ hipLaunchKernelGGL(( hello), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, gpu_block); /* Free the arrays on the GPU as now we're done with them */ hipMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost ); hipFree(gpu_block); /* Iterate through the arrays and print */ for(unsigned int i = 0; i < ARRAY_SIZE; i++) { printf("Calculated Thread: - Block: %2u\n",cpu_block[i]); } } int main() { main_sub(); return EXIT_SUCCESS; }
b547ed57aa7b7b72428ea885803c4f359f70f792.cu
// Modification of Ingemar Ragnemalm "Real Hello World!" program // To compile execute below: // nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world #include <stdio.h> #define N 16 #define BLOCK_SIZE 16 #define NUM_BLOCKS N/BLOCK_SIZE #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) /* Declare statically four arrays of ARRAY_SIZE each */ unsigned int cpu_block[ARRAY_SIZE]; __global__ void hello(int * block) { const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[thread_idx] = threadIdx.x; } void main_sub() { /* Declare pointers for GPU based params */ int *gpu_block; cudaMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); cudaMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); /* Execute our kernel */ hello<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block); /* Free the arrays on the GPU as now we're done with them */ cudaMemcpy( cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); cudaFree(gpu_block); /* Iterate through the arrays and print */ for(unsigned int i = 0; i < ARRAY_SIZE; i++) { printf("Calculated Thread: - Block: %2u\n",cpu_block[i]); } } int main() { main_sub(); return EXIT_SUCCESS; }
73af28a96a996f725cf156992210952f13e5ef20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! \file tfising.cu \brief Functions to generate a Hamiltonian for the Transverse Field Ising model */ #include "hamiltonian.h" __device__ float HOffBondXTFI(const int si, const int bra, const float JJ) { float valH; //int S0, S1; //int T0, T1; valH = JJ; //contribution from the J part of the Hamiltonian return valH; } __device__ float HDiagPartTFI1D(const int bra, int latticeSize, int3* d_Bond, const float JJ) { int S0b,S1b ; //spins (bra int T0,T1; //site //int P0, P1, P2, P3; //sites for plaquette (Q) //int s0p, s1p, s2p, s3p; float valH = 0.f; for (int Ti=0; Ti<latticeSize; Ti++) { T0 = (d_Bond[Ti]).x; //lower left spin S0b = (bra>>T0)&1; //if (T0 != Ti) cout<<"Square error 3\n"; T1 = (d_Bond[Ti]).y; //first bond S1b = (bra>>T1)&1; //unpack bra valH += JJ*(S0b-0.5)*(S1b-0.5); }//T0 return valH; }//HdiagPart __device__ float HDiagPartTFI2D(const int bra, int latticeSize, int3* d_Bond, const float JJ) { int S0b,S1b ; //spins (bra int T0,T1; //site //int P0, P1, P2, P3; //sites for plaquette (Q) //int s0p, s1p, s2p, s3p; float valH = 0.f; for (int Ti=0; Ti<latticeSize; Ti++) { T0 = (d_Bond[Ti]).x; //lower left spin S0b = (bra>>T0)&1; //if (T0 != Ti) cout<<"Square error 3\n"; T1 = (d_Bond[Ti]).y; //first bond S1b = (bra>>T1)&1; //unpack bra valH += JJ*(S0b-0.5)*(S1b-0.5); T1 = (d_Bond[Ti]).z; //second bond S1b = (bra>>T1)&1; //unpack bra valH += JJ*(S0b-0.5)*(S1b-0.5); }//T0 return valH; }//HdiagPart __global__ void FillDiagonalsTFI(int* d_basis, f_hamiltonian H, int* d_Bond, parameters data) { int latticeSize = data.nsite; int row = blockIdx.x*blockDim.x + threadIdx.x; int site = threadIdx.x%(latticeSize); unsigned int tempi; __shared__ int3 tempBond[18]; //int3 tempBond[16]; if (row < H.sectorDim) { tempi = d_basis[row]; (tempBond[site]).x = d_Bond[site]; (tempBond[site]).y = d_Bond[latticeSize + site]; switch( data.dimension ) { case 1 : H.vals[row] = HDiagPartTFI1D(tempi, latticeSize, tempBond, data.J1); break; case 2 : (tempBond[site].z) = d_Bond[2*latticeSize + site]; H.vals[row] = HDiagPartTFI2D(tempi, latticeSize, tempBond, data.J1); break; } H.rows[row] = row; H.cols[row] = row; H.set[row] = 1; } else { H.rows[row] = H.sectorDim + 1; H.cols[row] = H.sectorDim + 1; H.set[row] = 0; } } /* Function FillSparse: this function takes the empty Hamiltonian arrays and fills them up. Each thread in x handles one ket |i>, and each thread in y handles one site T0 Inputs: d_basisPosition - position information about the basis d_basis - other basis infos d_dim - the number of kets H_sort - an array that will store the Hamiltonian d_Bond - the bond information d_latticeSize - the number of lattice sites JJ - the coupling parameter */ __global__ void FillSparseTFI(int* d_basisPosition, int* d_basis, f_hamiltonian H, int* d_Bond, parameters data, int offset) { int latticeSize = data.nsite; int dim = H.sectorDim; int ii = ( blockDim.x / ( 2 * latticeSize ) ) * blockIdx.x + threadIdx.x / ( 2 * latticeSize ) + offset;// + blockIdx.y* gridDim.x * blockDim.x / (2 * latticeSize); int T0 = threadIdx.x % ( 2 * latticeSize ); #if __CUDA_ARCH__ < 200 const int array_size = 512; #elif __CUDA_ARCH__ >= 200 const int array_size = 1024; #else #error Could not detect GPU architecture #endif //__shared__ int2 tempBond[20]; __shared__ int tempPos[ array_size ]; __shared__ float tempVal[ array_size ]; int stride = 2 * latticeSize; int site = T0 % ( latticeSize ); int rowTemp; __shared__ unsigned int tempi[array_size]; __shared__ unsigned int tempod[array_size]; //int start = (bool)(dim%array_size) ? (dim/array_size + 1)*array_size : dim/array_size; int start = ( bool )( dim % 512 ) ? ( dim / 512 + 1 ) * 512 : dim ; bool compare; if( ii < dim ) { tempi[ threadIdx.x ] = d_basis[ ii ]; if ( T0 < 2 * latticeSize ) { //Putting bond info in shared memory // (tempBond[site]).x = d_Bond[site]; // (tempBond[site]).y = d_Bond[latticeSize + site]; // __syncthreads(); tempod[ threadIdx.x ] = tempi[ threadIdx.x ]; //-----------------Horizontal bond --------------- tempPos[ threadIdx.x ] = ( tempod[ threadIdx.x ] ^ ( 1 << site ) ); //flip the site-th bit of row - applying the sigma_x operator compare = ( tempPos[ threadIdx.x ] > ii ) && ( tempPos[ threadIdx.x ] < dim ); tempPos[ threadIdx.x ] = compare ? tempPos[ threadIdx.x ] : dim + 1; tempVal[ threadIdx.x ] = HOffBondXTFI(site, tempi[ threadIdx.x ], data.J2); rowTemp = ( T0 / latticeSize ) ? ii : tempPos[ threadIdx.x ]; rowTemp = compare ? rowTemp : dim + 1; tempPos[ threadIdx.x ] = ( T0 / latticeSize) ? tempPos[ threadIdx.x ] : ii; tempPos[ threadIdx.x ] = compare ? tempPos[threadIdx.x] : dim + 1; //----Putting everything back into GPU main memory----------- H.vals[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = tempVal[ threadIdx.x ]; H.cols[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = tempPos[ threadIdx.x ]; H.rows[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = rowTemp; H.set[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = (int)compare; } }//end of ii }//end of FillSparse
73af28a96a996f725cf156992210952f13e5ef20.cu
/*! \file tfising.cu \brief Functions to generate a Hamiltonian for the Transverse Field Ising model */ #include "hamiltonian.h" __device__ float HOffBondXTFI(const int si, const int bra, const float JJ) { float valH; //int S0, S1; //int T0, T1; valH = JJ; //contribution from the J part of the Hamiltonian return valH; } __device__ float HDiagPartTFI1D(const int bra, int latticeSize, int3* d_Bond, const float JJ) { int S0b,S1b ; //spins (bra int T0,T1; //site //int P0, P1, P2, P3; //sites for plaquette (Q) //int s0p, s1p, s2p, s3p; float valH = 0.f; for (int Ti=0; Ti<latticeSize; Ti++) { T0 = (d_Bond[Ti]).x; //lower left spin S0b = (bra>>T0)&1; //if (T0 != Ti) cout<<"Square error 3\n"; T1 = (d_Bond[Ti]).y; //first bond S1b = (bra>>T1)&1; //unpack bra valH += JJ*(S0b-0.5)*(S1b-0.5); }//T0 return valH; }//HdiagPart __device__ float HDiagPartTFI2D(const int bra, int latticeSize, int3* d_Bond, const float JJ) { int S0b,S1b ; //spins (bra int T0,T1; //site //int P0, P1, P2, P3; //sites for plaquette (Q) //int s0p, s1p, s2p, s3p; float valH = 0.f; for (int Ti=0; Ti<latticeSize; Ti++) { T0 = (d_Bond[Ti]).x; //lower left spin S0b = (bra>>T0)&1; //if (T0 != Ti) cout<<"Square error 3\n"; T1 = (d_Bond[Ti]).y; //first bond S1b = (bra>>T1)&1; //unpack bra valH += JJ*(S0b-0.5)*(S1b-0.5); T1 = (d_Bond[Ti]).z; //second bond S1b = (bra>>T1)&1; //unpack bra valH += JJ*(S0b-0.5)*(S1b-0.5); }//T0 return valH; }//HdiagPart __global__ void FillDiagonalsTFI(int* d_basis, f_hamiltonian H, int* d_Bond, parameters data) { int latticeSize = data.nsite; int row = blockIdx.x*blockDim.x + threadIdx.x; int site = threadIdx.x%(latticeSize); unsigned int tempi; __shared__ int3 tempBond[18]; //int3 tempBond[16]; if (row < H.sectorDim) { tempi = d_basis[row]; (tempBond[site]).x = d_Bond[site]; (tempBond[site]).y = d_Bond[latticeSize + site]; switch( data.dimension ) { case 1 : H.vals[row] = HDiagPartTFI1D(tempi, latticeSize, tempBond, data.J1); break; case 2 : (tempBond[site].z) = d_Bond[2*latticeSize + site]; H.vals[row] = HDiagPartTFI2D(tempi, latticeSize, tempBond, data.J1); break; } H.rows[row] = row; H.cols[row] = row; H.set[row] = 1; } else { H.rows[row] = H.sectorDim + 1; H.cols[row] = H.sectorDim + 1; H.set[row] = 0; } } /* Function FillSparse: this function takes the empty Hamiltonian arrays and fills them up. Each thread in x handles one ket |i>, and each thread in y handles one site T0 Inputs: d_basisPosition - position information about the basis d_basis - other basis infos d_dim - the number of kets H_sort - an array that will store the Hamiltonian d_Bond - the bond information d_latticeSize - the number of lattice sites JJ - the coupling parameter */ __global__ void FillSparseTFI(int* d_basisPosition, int* d_basis, f_hamiltonian H, int* d_Bond, parameters data, int offset) { int latticeSize = data.nsite; int dim = H.sectorDim; int ii = ( blockDim.x / ( 2 * latticeSize ) ) * blockIdx.x + threadIdx.x / ( 2 * latticeSize ) + offset;// + blockIdx.y* gridDim.x * blockDim.x / (2 * latticeSize); int T0 = threadIdx.x % ( 2 * latticeSize ); #if __CUDA_ARCH__ < 200 const int array_size = 512; #elif __CUDA_ARCH__ >= 200 const int array_size = 1024; #else #error Could not detect GPU architecture #endif //__shared__ int2 tempBond[20]; __shared__ int tempPos[ array_size ]; __shared__ float tempVal[ array_size ]; int stride = 2 * latticeSize; int site = T0 % ( latticeSize ); int rowTemp; __shared__ unsigned int tempi[array_size]; __shared__ unsigned int tempod[array_size]; //int start = (bool)(dim%array_size) ? (dim/array_size + 1)*array_size : dim/array_size; int start = ( bool )( dim % 512 ) ? ( dim / 512 + 1 ) * 512 : dim ; bool compare; if( ii < dim ) { tempi[ threadIdx.x ] = d_basis[ ii ]; if ( T0 < 2 * latticeSize ) { //Putting bond info in shared memory // (tempBond[site]).x = d_Bond[site]; // (tempBond[site]).y = d_Bond[latticeSize + site]; // __syncthreads(); tempod[ threadIdx.x ] = tempi[ threadIdx.x ]; //-----------------Horizontal bond --------------- tempPos[ threadIdx.x ] = ( tempod[ threadIdx.x ] ^ ( 1 << site ) ); //flip the site-th bit of row - applying the sigma_x operator compare = ( tempPos[ threadIdx.x ] > ii ) && ( tempPos[ threadIdx.x ] < dim ); tempPos[ threadIdx.x ] = compare ? tempPos[ threadIdx.x ] : dim + 1; tempVal[ threadIdx.x ] = HOffBondXTFI(site, tempi[ threadIdx.x ], data.J2); rowTemp = ( T0 / latticeSize ) ? ii : tempPos[ threadIdx.x ]; rowTemp = compare ? rowTemp : dim + 1; tempPos[ threadIdx.x ] = ( T0 / latticeSize) ? tempPos[ threadIdx.x ] : ii; tempPos[ threadIdx.x ] = compare ? tempPos[threadIdx.x] : dim + 1; //----Putting everything back into GPU main memory----------- H.vals[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = tempVal[ threadIdx.x ]; H.cols[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = tempPos[ threadIdx.x ]; H.rows[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = rowTemp; H.set[ ii*stride + 2 * site + ( T0 / latticeSize ) + start ] = (int)compare; } }//end of ii }//end of FillSparse
283573dc2a8f9b7e790abdf2a26f7edf50a751fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Example 3.2.2 #include <stdio.h> #include "../common/book.h" __global__ void multiply(int a, int b, int *c) { *c = a * b; } int main(void) { int c; int *dev_c; HANDLE_ERROR(hipMalloc((void**)&dev_c, sizeof(int))); hipLaunchKernelGGL(( multiply) , dim3(1), dim3(1) , 0, 0, 4, 11, dev_c); HANDLE_ERROR(hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost)); printf("4 * 11 = %d\n", c); hipFree(dev_c); return 0; }
283573dc2a8f9b7e790abdf2a26f7edf50a751fa.cu
//Example 3.2.2 #include <stdio.h> #include "../common/book.h" __global__ void multiply(int a, int b, int *c) { *c = a * b; } int main(void) { int c; int *dev_c; HANDLE_ERROR(cudaMalloc((void**)&dev_c, sizeof(int))); multiply <<< 1, 1 >>> (4, 11, dev_c); HANDLE_ERROR(cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost)); printf("4 * 11 = %d\n", c); cudaFree(dev_c); return 0; }
97672e6410d3dd269a65eadccb87f7b5fa623492.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/time.h> #define SAMPLE_TEST_LEN 2048 #define SCALER 4096 #define LUT_SIZE 1024 __device__ short lut_sigmoid[LUT_SIZE] = { 2048,2056,2064,2072,2080,2088,2096,2104, 2112,2120,2128,2136,2144,2152,2160,2168, 2176,2184,2192,2200,2208,2216,2224,2232, 2239,2247,2255,2263,2271,2279,2287,2295, 2303,2311,2318,2326,2334,2342,2350,2358, 2365,2373,2381,2389,2397,2404,2412,2420, 2428,2435,2443,2451,2458,2466,2474,2481, 2489,2497,2504,2512,2519,2527,2535,2542, 2550,2557,2565,2572,2580,2587,2594,2602, 2609,2617,2624,2631,2639,2646,2653,2661, 2668,2675,2682,2690,2697,2704,2711,2718, 2726,2733,2740,2747,2754,2761,2768,2775, 2782,2789,2796,2803,2810,2817,2823,2830, 2837,2844,2851,2857,2864,2871,2878,2884, 2891,2898,2904,2911,2917,2924,2930,2937, 2943,2950,2956,2963,2969,2975,2982,2988, 2994,3001,3007,3013,3019,3026,3032,3038, 3044,3050,3056,3062,3068,3074,3080,3086, 3092,3098,3104,3110,3116,3121,3127,3133, 3139,3144,3150,3156,3161,3167,3173,3178, 3184,3189,3195,3200,3206,3211,3217,3222, 3227,3233,3238,3243,3249,3254,3259,3264, 3269,3275,3280,3285,3290,3295,3300,3305, 3310,3315,3320,3325,3330,3334,3339,3344, 3349,3354,3358,3363,3368,3372,3377,3382, 3386,3391,3395,3400,3404,3409,3413,3418, 3422,3427,3431,3435,3440,3444,3448,3452, 3457,3461,3465,3469,3473,3477,3481,3486, 3490,3494,3498,3502,3506,3510,3513,3517, 3521,3525,3529,3533,3536,3540,3544,3548, 3551,3555,3559,3562,3566,3570,3573,3577, 3580,3584,3587,3591,3594,3598,3601,3604, 3608,3611,3614,3618,3621,3624,3628,3631, 3634,3637,3640,3644,3647,3650,3653,3656, 3659,3662,3665,3668,3671,3674,3677,3680, 3683,3686,3689,3691,3694,3697,3700,3703, 3705,3708,3711,3714,3716,3719,3722,3724, 3727,3730,3732,3735,3737,3740,3742,3745, 3747,3750,3752,3755,3757,3760,3762,3764, 3767,3769,3772,3774,3776,3778,3781,3783, 3785,3788,3790,3792,3794,3796,3798,3801, 3803,3805,3807,3809,3811,3813,3815,3817, 3819,3821,3823,3825,3827,3829,3831,3833, 3835,3837,3839,3841,3843,3844,3846,3848, 3850,3852,3853,3855,3857,3859,3861,3862, 3864,3866,3867,3869,3871,3872,3874,3876, 3877,3879,3880,3882,3884,3885,3887,3888, 3890,3891,3893,3894,3896,3897,3899,3900, 3902,3903,3905,3906,3907,3909,3910,3912, 3913,3914,3916,3917,3918,3920,3921,3922, 3924,3925,3926,3927,3929,3930,3931,3932, 3934,3935,3936,3937,3938,3940,3941,3942, 3943,3944,3945,3947,3948,3949,3950,3951, 3952,3953,3954,3955,3956,3957,3958,3959, 3960,3961,3963,3964,3965,3966,3966,3967, 3968,3969,3970,3971,3972,3973,3974,3975, 3976,3977,3978,3979,3980,3980,3981,3982, 3983,3984,3985,3986,3986,3987,3988,3989, 3990,3990,3991,3992,3993,3994,3994,3995, 3996,3997,3997,3998,3999,4000,4000,4001, 4002,4003,4003,4004,4005,4005,4006,4007, 4007,4008,4009,4009,4010,4011,4011,4012, 4013,4013,4014,4015,4015,4016,4016,4017, 4018,4018,4019,4019,4020,4021,4021,4022, 4022,4023,4023,4024,4025,4025,4026,4026, 4027,4027,4028,4028,4029,4029,4030,4030, 4031,4031,4032,4032,4033,4033,4034,4034, 4035,4035,4036,4036,4037,4037,4038,4038, 4038,4039,4039,4040,4040,4041,4041,4041, 4042,4042,4043,4043,4043,4044,4044,4045, 4045,4045,4046,4046,4047,4047,4047,4048, 4048,4048,4049,4049,4050,4050,4050,4051, 4051,4051,4052,4052,4052,4053,4053,4053, 4054,4054,4054,4055,4055,4055,4056,4056, 4056,4057,4057,4057,4057,4058,4058,4058, 4059,4059,4059,4059,4060,4060,4060,4061, 4061,4061,4061,4062,4062,4062,4062,4063, 4063,4063,4063,4064,4064,4064,4064,4065, 4065,4065,4065,4066,4066,4066,4066,4067, 4067,4067,4067,4068,4068,4068,4068,4068, 4069,4069,4069,4069,4069,4070,4070,4070, 4070,4070,4071,4071,4071,4071,4071,4072, 4072,4072,4072,4072,4073,4073,4073,4073, 4073,4073,4074,4074,4074,4074,4074,4074, 4075,4075,4075,4075,4075,4075,4076,4076, 4076,4076,4076,4076,4077,4077,4077,4077, 4077,4077,4077,4078,4078,4078,4078,4078, 4078,4078,4079,4079,4079,4079,4079,4079, 4079,4079,4080,4080,4080,4080,4080,4080, 4080,4080,4081,4081,4081,4081,4081,4081, 4081,4081,4082,4082,4082,4082,4082,4082, 4082,4082,4082,4082,4083,4083,4083,4083, 4083,4083,4083,4083,4083,4084,4084,4084, 4084,4084,4084,4084,4084,4084,4084,4084, 4085,4085,4085,4085,4085,4085,4085,4085, 4085,4085,4085,4085,4086,4086,4086,4086, 4086,4086,4086,4086,4086,4086,4086,4086, 4086,4087,4087,4087,4087,4087,4087,4087, 4087,4087,4087,4087,4087,4087,4087,4088, 4088,4088,4088,4088,4088,4088,4088,4088, 4088,4088,4088,4088,4088,4088,4088,4089, 4089,4089,4089,4089,4089,4089,4089,4089, 4089,4089,4089,4089,4089,4089,4089,4089, 4089,4090,4090,4090,4090,4090,4090,4090, 4090,4090,4090,4090,4090,4090,4090,4090, 4090,4090,4090,4090,4090,4090,4090,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095}; __device__ short lut_tanh[LUT_SIZE] = { 0,32,64,96,128,160,192,224, 256,288,319,351,383,415,446,478, 509,541,572,604,635,666,697,728, 759,790,821,851,882,912,943,973, 1003,1033,1063,1093,1123,1152,1181,1211, 1240,1269,1298,1326,1355,1383,1412,1440, 1468,1496,1523,1551,1578,1605,1632,1659, 1686,1712,1739,1765,1791,1817,1842,1868, 1893,1918,1943,1968,1992,2016,2041,2064, 2088,2112,2135,2158,2181,2204,2227,2249, 2272,2294,2316,2337,2359,2380,2401,2422, 2443,2463,2484,2504,2524,2543,2563,2582, 2602,2621,2639,2658,2676,2695,2713,2731, 2748,2766,2783,2800,2817,2834,2851,2867, 2883,2899,2915,2931,2946,2962,2977,2992, 3007,3021,3036,3050,3064,3078,3092,3106, 3119,3133,3146,3159,3172,3185,3197,3210, 3222,3234,3246,3258,3270,3281,3293,3304, 3315,3326,3337,3347,3358,3368,3379,3389, 3399,3409,3419,3428,3438,3447,3456,3466, 3475,3483,3492,3501,3510,3518,3526,3535, 3543,3551,3559,3566,3574,3582,3589,3596, 3604,3611,3618,3625,3632,3639,3645,3652, 3659,3665,3671,3678,3684,3690,3696,3702, 3707,3713,3719,3724,3730,3735,3741,3746, 3751,3756,3761,3766,3771,3776,3781,3786, 3790,3795,3799,3804,3808,3812,3817,3821, 3825,3829,3833,3837,3841,3845,3848,3852, 3856,3859,3863,3867,3870,3873,3877,3880, 3883,3887,3890,3893,3896,3899,3902,3905, 3908,3911,3913,3916,3919,3922,3924,3927, 3929,3932,3934,3937,3939,3942,3944,3946, 3949,3951,3953,3955,3957,3960,3962,3964, 3966,3968,3970,3972,3973,3975,3977,3979, 3981,3983,3984,3986,3988,3989,3991,3993, 3994,3996,3997,3999,4000,4002,4003,4005, 4006,4007,4009,4010,4011,4013,4014,4015, 4016,4018,4019,4020,4021,4022,4024,4025, 4026,4027,4028,4029,4030,4031,4032,4033, 4034,4035,4036,4037,4038,4039,4039,4040, 4041,4042,4043,4044,4044,4045,4046,4047, 4048,4048,4049,4050,4050,4051,4052,4053, 4053,4054,4055,4055,4056,4056,4057,4058, 4058,4059,4059,4060,4061,4061,4062,4062, 4063,4063,4064,4064,4065,4065,4066,4066, 4067,4067,4067,4068,4068,4069,4069,4070, 4070,4070,4071,4071,4072,4072,4072,4073, 4073,4073,4074,4074,4074,4075,4075,4075, 4076,4076,4076,4077,4077,4077,4078,4078, 4078,4078,4079,4079,4079,4079,4080,4080, 4080,4080,4081,4081,4081,4081,4082,4082, 4082,4082,4082,4083,4083,4083,4083,4084, 4084,4084,4084,4084,4084,4085,4085,4085, 4085,4085,4085,4086,4086,4086,4086,4086, 4086,4087,4087,4087,4087,4087,4087,4087, 4088,4088,4088,4088,4088,4088,4088,4088, 4089,4089,4089,4089,4089,4089,4089,4089, 4089,4090,4090,4090,4090,4090,4090,4090, 4090,4090,4090,4090,4091,4091,4091,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096}; __device__ void lstm_n5_o1(int input[SAMPLE_TEST_LEN], short output[SAMPLE_TEST_LEN]) { int i, j, t; float inW[4][5] = { -0.00902497, 0.0130347, -0.305604, 0.0103134, -0.00143173, -0.00892103, -0.00877193, -0.0158959, -0.00261989, 0.00238156, -1.17159, -1.05888, -0.0252563, 1.32337, 0.896013, 0.112793, 0.107382, 0.459561, 0.112837, -0.0858938}; float intW[4][5][5] = { {{0.01465, 0.017885, 0.00462623, -0.00366126, 0.00414583}, {0.00709106, 0.00612325, 0.00509018, 0.00629193, -0.00820282}, {0.0594903, 0.0594652, 0.0879106, -0.202968, 0.146663}, {0.0173266, -0.00258213, -0.00156304, -0.0161799, 0.0206139}, {0.00378391, 0.0190192, 0.0140174, 0.0183843, -0.00042357}}, {{-0.007224, -2.52633e-05, -0.00375626, 0.0171819, -0.0146835}, {0.0095475, 0.0111485, 0.00723207, -0.00279432, -0.00130744}, {-0.00358937, -0.0211212, -0.0445563, -0.0203464, 0.0123881}, {-0.00648264, -0.00841806, 0.00112013, 0.00435087, -0.0138258}, {0.00533612, -0.00909088, 0.00789575, 0.00117046, 0.00834566}}, {{0.74772, 0.635634, 0.730541, -1.11435, 0.814002}, {0.623608, 0.53032, 0.652992, -1.01461, 0.768323}, {0.120079, 0.113368, 0.0824013, -0.000308211, -0.0182162}, {-1.28265, -1.18123, -0.480213, 0.984297, -0.576107}, {-1.00799, -0.944089, -0.355751, 0.536079, -0.27723}}, {{0.0134795, 0.0447042, 0.015088, 0.0920375, -0.0777375}, {0.0384587, 0.0330071, 0.0205698, 0.0858556, -0.0671409}, {-0.63912, -0.570696, -0.0891825, 0.706698, -0.5}, {0.0172945, 0.0240723, 0.00149645, 0.0341813, -0.0418003}, {-0.0122831, -0.0280598, -0.00341253, -0.0265756, 0.0246845}} }; float intB[4][5] = { 0.016, 0.0139732, -0.183891, 0.0139634, 0.00864378, 5.00094, 5.00059, 4.97023, 5.0002, 5.00032, 0.0676543, -0.0445895, 0.248995, -0.978814, -1.0258, 0.204404, 0.190113, -0.156202, 0.219446, -0.179526}; float outW[5] = {-0.4272, -0.33769, 0.167592, 0.50495, -0.502329}; float outB = -0.0394433; short inWF[4][5] = {0}; short intWF[4][5][5] = {0}; short intBF[4][5] = {0}; short outWF[5] = {0}; short outBF = 0; for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { inWF[i][j] = (short) (inW[i][j] * SCALER); } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { for (t = 0; t < 5; ++t) { intWF[i][j][t] = (short) (intW[i][j][t] * SCALER); } } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { intBF[i][j] = (short) (intB[i][j] * SCALER); } } for (i = 0; i < 5; ++i) { outWF[i] = (short) (outW[i] * SCALER); } outBF = (short) (outB * SCALER); short h_stateF[5] = {0}; short c_stateF[5] = {0}; short i_stateF[5] = {0}; short f_stateF[5] = {0}; short o_stateF[5] = {0}; short g_stateF[5] = {0}; short sampleinput_16b; for (t = 0; t < SAMPLE_TEST_LEN; ++t) { sampleinput_16b = (short) (input[t] + 120000) * 256 / 1875; for (j = 0; j < 5; ++j) { i_stateF[j] = (inWF[0][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) i_stateF[j] += ((h_stateF[i] * intWF[0][j][i]) >> 12); i_stateF[j] += intBF[0][j]; i_stateF[j] = i_stateF[j] >> 5; if (i_stateF[j] >= LUT_SIZE) i_stateF[j] = 4095; else if (i_stateF[j] >= 0) i_stateF[j] = lut_sigmoid[i_stateF[j]]; else if (i_stateF[j] > -LUT_SIZE) i_stateF[j] = 4096 - lut_sigmoid[-i_stateF[j]]; else i_stateF[j] = 1; } for (j = 0; j < 5; ++j) { f_stateF[j] = (inWF[1][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) f_stateF[j] += ((h_stateF[i] * intWF[1][j][i]) >> 12); f_stateF[j] += intBF[1][j]; f_stateF[j] = f_stateF[j] >> 5; if (f_stateF[j] >= LUT_SIZE) f_stateF[j] = 4095; else if (f_stateF[j] >= 0) f_stateF[j] = lut_sigmoid[f_stateF[j]]; else if (f_stateF[j] > -LUT_SIZE) f_stateF[j] = 4096 - lut_sigmoid[-f_stateF[j]]; else f_stateF[j] = 1; } for (j = 0; j < 5; ++j) { o_stateF[j] = (inWF[2][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) o_stateF[j] += ((h_stateF[i] * intWF[2][j][i]) >> 12); o_stateF[j] += intBF[2][j]; o_stateF[j] = o_stateF[j] >> 5; if (o_stateF[j] >= LUT_SIZE) o_stateF[j] = 4095; else if (o_stateF[j] >= 0) o_stateF[j] = lut_sigmoid[o_stateF[j]]; else if (o_stateF[j] > -LUT_SIZE) o_stateF[j] = 4096 - lut_sigmoid[-o_stateF[j]]; else o_stateF[j] = 1; } for (j = 0; j < 5; ++j) { g_stateF[j] = (inWF[3][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) g_stateF[j] += ((h_stateF[i] * intWF[3][j][i]) >> 12); g_stateF[j] += intBF[3][j]; g_stateF[j] = g_stateF[j] >> 5; if (g_stateF[j] >= LUT_SIZE) g_stateF[j] = 4096; else if (g_stateF[j] >= 0) g_stateF[j] = lut_tanh[g_stateF[j]]; else if (g_stateF[j] > -LUT_SIZE) g_stateF[j] = -lut_tanh[-g_stateF[j]]; else g_stateF[j] = -4096; } for (j = 0; j < 5; ++j) { c_stateF[j] = (((c_stateF[j] * f_stateF[j]) >> 8) + ((g_stateF[j] * i_stateF[j]) >> 12)) >> 4; h_stateF[j] = c_stateF[j] >> 1; if (h_stateF[j] >= LUT_SIZE) h_stateF[j] = 4096; else if (h_stateF[j] >= 0) h_stateF[j] = lut_tanh[h_stateF[j]]; else if (h_stateF[j] > -LUT_SIZE) h_stateF[j] = -lut_tanh[-h_stateF[j]]; else h_stateF[j] = -4096; h_stateF[j] = (h_stateF[j] * o_stateF[j]) >> 12; } output[t] = outBF; for (j = 0; j < 5; ++j) output[t] += ((h_stateF[j] * outWF[j]) >> 12); } } __device__ void lstm_n5_o2(int input[SAMPLE_TEST_LEN], short output[SAMPLE_TEST_LEN]) { int i, j, t; float inW[4][5] = { -0.133907, 0.0967799, -0.0249856, -0.0482016, 0.000138663, -0.0025821, -0.0107074, -0.0135626, -0.0265616, -0.00990482, 0.0279149, 0.29944, 0.00367669, -0.0406378, -0.122106, 0.305937, -1.54966, 0.108542, -0.086096, -0.278674}; float intW[4][5][5] = { {{0.0465599, -0.0784586, 0.0703757, -0.0961503, 0.103885}, {0.137839, 0.0785531, 0.172321, 0.00198996, 0.115174}, {0.0896546, -0.00207286, 0.0280649, 0.0300854, 0.0549556}, {0.0952124, 0.011873, 0.0253059, -0.00619738, 0.10025}, {-0.0796523, -0.0310471, 0.0336561, -0.0999846, -0.00944991}}, {{-0.00558139, -0.0249531, -0.0196812, -0.0283953, -0.00538974}, {0.0124158, 0.00739093, 0.00918819, -0.00951965, 0.00634635}, {-0.008908, 0.0113348, -0.00387874, 0.00339979, -0.000628876}, {-0.00832763, 0.0040069, 0.00346749, -0.0256792, 0.00539768}, {0.00337389, -0.0148225, -0.0283464, 0.00277652, 0.000571859}}, {{-0.00736941, 0.0578041, 0.141176, 0.00565979, -0.079775}, {-0.140356, 0.0521767, 0.0813636, -0.0342324, -0.0847605}, {0.0534741, 0.0335436, 0.0464466, 0.0670157, 0.0266309}, {0.0142565, -0.0397183, -0.0116136, -0.0507669, 0.0575363}, {-0.0518841, 0.0358612, 0.0333015, -0.119254, 0.0368938}}, {{-0.40111, 1.17447, 0.172804, 0.197255, 0.0786499}, {-0.307048, -0.923395, -0.362905, 0.194527, -0.438387}, {-0.671133, 0.728081, -0.520196, 0.0108215, -0.139992}, {-0.600645, 0.151967, 0.0101909, -0.235608, -0.367466}, {0.262652, 0.84919, -0.131239, 0.0756875, -0.261777}} }; float intB[4][5] = { -0.0421559, 0.246112, 0.0348797, -0.0619016, 0.0988568, 4.98184, 4.97131, 4.98673, 4.97446, 4.96925, 0.255813, 0.527195, 0.120779, -0.0979445, 0.02733, 0.0091722, 0.551458, -0.0521645, 0.0113755, 0.2287}; float outW[5] = {-0.592906, 0.576557, -0.38704, 0.0146919, -0.35076}; float outB = -0.0191289; short inWF[4][5] = {0}; short intWF[4][5][5] = {0}; short intBF[4][5] = {0}; short outWF[5] = {0}; short outBF = 0; for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { inWF[i][j] = (short) (inW[i][j] * SCALER); } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { for (t = 0; t < 5; ++t) { intWF[i][j][t] = (short) (intW[i][j][t] * SCALER); } } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { intBF[i][j] = (short) (intB[i][j] * SCALER); } } for (i = 0; i < 5; ++i) { outWF[i] = (short) (outW[i] * SCALER); } outBF = (short) (outB * SCALER); short h_stateF[5] = {0}; short c_stateF[5] = {0}; short i_stateF[5] = {0}; short f_stateF[5] = {0}; short o_stateF[5] = {0}; short g_stateF[5] = {0}; short sampleinput_16b; for (t = 0; t < SAMPLE_TEST_LEN; ++t) { sampleinput_16b = (short) (input[t] + 120000) * 256 / 1875; for (j = 0; j < 5; ++j) { i_stateF[j] = (inWF[0][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) i_stateF[j] += ((h_stateF[i] * intWF[0][j][i]) >> 12); i_stateF[j] += intBF[0][j]; i_stateF[j] = i_stateF[j] >> 5; if (i_stateF[j] >= LUT_SIZE) i_stateF[j] = 4095; else if (i_stateF[j] >= 0) i_stateF[j] = lut_sigmoid[i_stateF[j]]; else if (i_stateF[j] > -LUT_SIZE) i_stateF[j] = 4096 - lut_sigmoid[-i_stateF[j]]; else i_stateF[j] = 1; } for (j = 0; j < 5; ++j) { f_stateF[j] = (inWF[1][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) f_stateF[j] += ((h_stateF[i] * intWF[1][j][i]) >> 12); f_stateF[j] += intBF[1][j]; f_stateF[j] = f_stateF[j] >> 5; if (f_stateF[j] >= LUT_SIZE) f_stateF[j] = 4095; else if (f_stateF[j] >= 0) f_stateF[j] = lut_sigmoid[f_stateF[j]]; else if (f_stateF[j] > -LUT_SIZE) f_stateF[j] = 4096 - lut_sigmoid[-f_stateF[j]]; else f_stateF[j] = 1; } for (j = 0; j < 5; ++j) { o_stateF[j] = (inWF[2][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) o_stateF[j] += ((h_stateF[i] * intWF[2][j][i]) >> 12); o_stateF[j] += intBF[2][j]; o_stateF[j] = o_stateF[j] >> 5; if (o_stateF[j] >= LUT_SIZE) o_stateF[j] = 4095; else if (o_stateF[j] >= 0) o_stateF[j] = lut_sigmoid[o_stateF[j]]; else if (o_stateF[j] > -LUT_SIZE) o_stateF[j] = 4096 - lut_sigmoid[-o_stateF[j]]; else o_stateF[j] = 1; } for (j = 0; j < 5; ++j) { g_stateF[j] = (inWF[3][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) g_stateF[j] += ((h_stateF[i] * intWF[3][j][i]) >> 12); g_stateF[j] += intBF[3][j]; g_stateF[j] = g_stateF[j] >> 5; if (g_stateF[j] >= LUT_SIZE) g_stateF[j] = 4096; else if (g_stateF[j] >= 0) g_stateF[j] = lut_tanh[g_stateF[j]]; else if (g_stateF[j] > -LUT_SIZE) g_stateF[j] = -lut_tanh[-g_stateF[j]]; else g_stateF[j] = -4096; } for (j = 0; j < 5; ++j) { c_stateF[j] = (((c_stateF[j] * f_stateF[j]) >> 8) + ((g_stateF[j] * i_stateF[j]) >> 12)) >> 4; h_stateF[j] = c_stateF[j] >> 1; if (h_stateF[j] >= LUT_SIZE) h_stateF[j] = 4096; else if (h_stateF[j] >= 0) h_stateF[j] = lut_tanh[h_stateF[j]]; else if (h_stateF[j] > -LUT_SIZE) h_stateF[j] = -lut_tanh[-h_stateF[j]]; else h_stateF[j] = -4096; h_stateF[j] = (h_stateF[j] * o_stateF[j]) >> 12; } output[t] = outBF; for (j = 0; j < 5; ++j) output[t] += ((h_stateF[j] * outWF[j]) >> 12); } } __global__ void lstm_task(int n, int *x, short *y1, short *y2) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //if (i < n) y[i] = a*x[i] + y[i]; int i; int sampleinput[SAMPLE_TEST_LEN]; short test_out1[SAMPLE_TEST_LEN]; short test_out2[SAMPLE_TEST_LEN]; if (idx < n) { for (i = 0; i < SAMPLE_TEST_LEN; ++i) { sampleinput[i] = x[idx * SAMPLE_TEST_LEN + i]; } lstm_n5_o1(sampleinput, test_out1); lstm_n5_o2(sampleinput, test_out2); for (i = 0; i < SAMPLE_TEST_LEN; ++i) { y1[idx * SAMPLE_TEST_LEN + i] = test_out1[i]; y2[idx * SAMPLE_TEST_LEN + i] = test_out2[i]; } } } int main(void) { int N = 1<<19; int *x, *d_x; short *y1, *y2, *d_y1, *d_y2; FILE *ifp, *ofp; struct timeval t1, t2, tr; int i, j; int sampleinput[SAMPLE_TEST_LEN]; short test_out1[SAMPLE_TEST_LEN]; short test_out2[SAMPLE_TEST_LEN]; // Read in sample input from "converted-lstm-in.txt" file if (!(ifp = fopen("converted-lstm-in.txt", "r"))) { printf("File converted-lstm-in.txt cannot be opened for read.\n"); return -1; } for (i = 0; i < SAMPLE_TEST_LEN; ++i) { fscanf(ifp, "%d", &sampleinput[i]); } fclose(ifp); // Open output.txt for output data write back. if (!(ofp = fopen("output.txt", "w"))) { printf("File output.txt cannot be opened for write.\n"); return -1; } x = (int*)malloc(N * SAMPLE_TEST_LEN * sizeof(int)); y1 = (short*)malloc(N * SAMPLE_TEST_LEN * sizeof(short)); y2 = (short*)malloc(N * SAMPLE_TEST_LEN * sizeof(short)); hipMalloc(&d_x, N * SAMPLE_TEST_LEN * sizeof(int)); hipMalloc(&d_y1, N * SAMPLE_TEST_LEN * sizeof(short)); hipMalloc(&d_y2, N * SAMPLE_TEST_LEN * sizeof(short)); for (i = 0; i < N; i++) { for (j = 0; j < SAMPLE_TEST_LEN; ++j) { x[i * SAMPLE_TEST_LEN + j] = sampleinput[j]; } } for (int k = 9; k < 10; ++k) { gettimeofday(&t1, NULL); for (j = 0; j < k; ++j) { hipMemcpy(d_x, x, N * SAMPLE_TEST_LEN * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_y1, y1, N * SAMPLE_TEST_LEN * sizeof(short), hipMemcpyHostToDevice); hipMemcpy(d_y2, y2, N * SAMPLE_TEST_LEN * sizeof(short), hipMemcpyHostToDevice); // Perform SAXPY on 1M elements hipLaunchKernelGGL(( lstm_task), dim3((N+63)/64), dim3(64), 0, 0, N, d_x, d_y1, d_y2); hipMemcpy(y1, d_y1, N * SAMPLE_TEST_LEN * sizeof(short), hipMemcpyDeviceToHost); hipMemcpy(y2, d_y2, N * SAMPLE_TEST_LEN * sizeof(short), hipMemcpyDeviceToHost); } gettimeofday(&t2, NULL); timersub(&t1, &t2, &tr); printf("Round %d Excute time: %.2f sec\n", k, -tr.tv_sec-(double)tr.tv_usec/1000000.0); } for (i = 0; i < SAMPLE_TEST_LEN; ++i) { test_out1[i] = y1[10000 * SAMPLE_TEST_LEN + i]; test_out2[i] = y2[10000 * SAMPLE_TEST_LEN + i]; } for (i = 0; i < SAMPLE_TEST_LEN; ++i) fprintf(ofp, "%d,%d\n", test_out1[i], test_out2[i]); fclose(ofp); hipFree(d_x); hipFree(d_y1); hipFree(d_y2); free(x); free(y1); free(y2); }
97672e6410d3dd269a65eadccb87f7b5fa623492.cu
#include <stdio.h> #include <sys/time.h> #define SAMPLE_TEST_LEN 2048 #define SCALER 4096 #define LUT_SIZE 1024 __device__ short lut_sigmoid[LUT_SIZE] = { 2048,2056,2064,2072,2080,2088,2096,2104, 2112,2120,2128,2136,2144,2152,2160,2168, 2176,2184,2192,2200,2208,2216,2224,2232, 2239,2247,2255,2263,2271,2279,2287,2295, 2303,2311,2318,2326,2334,2342,2350,2358, 2365,2373,2381,2389,2397,2404,2412,2420, 2428,2435,2443,2451,2458,2466,2474,2481, 2489,2497,2504,2512,2519,2527,2535,2542, 2550,2557,2565,2572,2580,2587,2594,2602, 2609,2617,2624,2631,2639,2646,2653,2661, 2668,2675,2682,2690,2697,2704,2711,2718, 2726,2733,2740,2747,2754,2761,2768,2775, 2782,2789,2796,2803,2810,2817,2823,2830, 2837,2844,2851,2857,2864,2871,2878,2884, 2891,2898,2904,2911,2917,2924,2930,2937, 2943,2950,2956,2963,2969,2975,2982,2988, 2994,3001,3007,3013,3019,3026,3032,3038, 3044,3050,3056,3062,3068,3074,3080,3086, 3092,3098,3104,3110,3116,3121,3127,3133, 3139,3144,3150,3156,3161,3167,3173,3178, 3184,3189,3195,3200,3206,3211,3217,3222, 3227,3233,3238,3243,3249,3254,3259,3264, 3269,3275,3280,3285,3290,3295,3300,3305, 3310,3315,3320,3325,3330,3334,3339,3344, 3349,3354,3358,3363,3368,3372,3377,3382, 3386,3391,3395,3400,3404,3409,3413,3418, 3422,3427,3431,3435,3440,3444,3448,3452, 3457,3461,3465,3469,3473,3477,3481,3486, 3490,3494,3498,3502,3506,3510,3513,3517, 3521,3525,3529,3533,3536,3540,3544,3548, 3551,3555,3559,3562,3566,3570,3573,3577, 3580,3584,3587,3591,3594,3598,3601,3604, 3608,3611,3614,3618,3621,3624,3628,3631, 3634,3637,3640,3644,3647,3650,3653,3656, 3659,3662,3665,3668,3671,3674,3677,3680, 3683,3686,3689,3691,3694,3697,3700,3703, 3705,3708,3711,3714,3716,3719,3722,3724, 3727,3730,3732,3735,3737,3740,3742,3745, 3747,3750,3752,3755,3757,3760,3762,3764, 3767,3769,3772,3774,3776,3778,3781,3783, 3785,3788,3790,3792,3794,3796,3798,3801, 3803,3805,3807,3809,3811,3813,3815,3817, 3819,3821,3823,3825,3827,3829,3831,3833, 3835,3837,3839,3841,3843,3844,3846,3848, 3850,3852,3853,3855,3857,3859,3861,3862, 3864,3866,3867,3869,3871,3872,3874,3876, 3877,3879,3880,3882,3884,3885,3887,3888, 3890,3891,3893,3894,3896,3897,3899,3900, 3902,3903,3905,3906,3907,3909,3910,3912, 3913,3914,3916,3917,3918,3920,3921,3922, 3924,3925,3926,3927,3929,3930,3931,3932, 3934,3935,3936,3937,3938,3940,3941,3942, 3943,3944,3945,3947,3948,3949,3950,3951, 3952,3953,3954,3955,3956,3957,3958,3959, 3960,3961,3963,3964,3965,3966,3966,3967, 3968,3969,3970,3971,3972,3973,3974,3975, 3976,3977,3978,3979,3980,3980,3981,3982, 3983,3984,3985,3986,3986,3987,3988,3989, 3990,3990,3991,3992,3993,3994,3994,3995, 3996,3997,3997,3998,3999,4000,4000,4001, 4002,4003,4003,4004,4005,4005,4006,4007, 4007,4008,4009,4009,4010,4011,4011,4012, 4013,4013,4014,4015,4015,4016,4016,4017, 4018,4018,4019,4019,4020,4021,4021,4022, 4022,4023,4023,4024,4025,4025,4026,4026, 4027,4027,4028,4028,4029,4029,4030,4030, 4031,4031,4032,4032,4033,4033,4034,4034, 4035,4035,4036,4036,4037,4037,4038,4038, 4038,4039,4039,4040,4040,4041,4041,4041, 4042,4042,4043,4043,4043,4044,4044,4045, 4045,4045,4046,4046,4047,4047,4047,4048, 4048,4048,4049,4049,4050,4050,4050,4051, 4051,4051,4052,4052,4052,4053,4053,4053, 4054,4054,4054,4055,4055,4055,4056,4056, 4056,4057,4057,4057,4057,4058,4058,4058, 4059,4059,4059,4059,4060,4060,4060,4061, 4061,4061,4061,4062,4062,4062,4062,4063, 4063,4063,4063,4064,4064,4064,4064,4065, 4065,4065,4065,4066,4066,4066,4066,4067, 4067,4067,4067,4068,4068,4068,4068,4068, 4069,4069,4069,4069,4069,4070,4070,4070, 4070,4070,4071,4071,4071,4071,4071,4072, 4072,4072,4072,4072,4073,4073,4073,4073, 4073,4073,4074,4074,4074,4074,4074,4074, 4075,4075,4075,4075,4075,4075,4076,4076, 4076,4076,4076,4076,4077,4077,4077,4077, 4077,4077,4077,4078,4078,4078,4078,4078, 4078,4078,4079,4079,4079,4079,4079,4079, 4079,4079,4080,4080,4080,4080,4080,4080, 4080,4080,4081,4081,4081,4081,4081,4081, 4081,4081,4082,4082,4082,4082,4082,4082, 4082,4082,4082,4082,4083,4083,4083,4083, 4083,4083,4083,4083,4083,4084,4084,4084, 4084,4084,4084,4084,4084,4084,4084,4084, 4085,4085,4085,4085,4085,4085,4085,4085, 4085,4085,4085,4085,4086,4086,4086,4086, 4086,4086,4086,4086,4086,4086,4086,4086, 4086,4087,4087,4087,4087,4087,4087,4087, 4087,4087,4087,4087,4087,4087,4087,4088, 4088,4088,4088,4088,4088,4088,4088,4088, 4088,4088,4088,4088,4088,4088,4088,4089, 4089,4089,4089,4089,4089,4089,4089,4089, 4089,4089,4089,4089,4089,4089,4089,4089, 4089,4090,4090,4090,4090,4090,4090,4090, 4090,4090,4090,4090,4090,4090,4090,4090, 4090,4090,4090,4090,4090,4090,4090,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095}; __device__ short lut_tanh[LUT_SIZE] = { 0,32,64,96,128,160,192,224, 256,288,319,351,383,415,446,478, 509,541,572,604,635,666,697,728, 759,790,821,851,882,912,943,973, 1003,1033,1063,1093,1123,1152,1181,1211, 1240,1269,1298,1326,1355,1383,1412,1440, 1468,1496,1523,1551,1578,1605,1632,1659, 1686,1712,1739,1765,1791,1817,1842,1868, 1893,1918,1943,1968,1992,2016,2041,2064, 2088,2112,2135,2158,2181,2204,2227,2249, 2272,2294,2316,2337,2359,2380,2401,2422, 2443,2463,2484,2504,2524,2543,2563,2582, 2602,2621,2639,2658,2676,2695,2713,2731, 2748,2766,2783,2800,2817,2834,2851,2867, 2883,2899,2915,2931,2946,2962,2977,2992, 3007,3021,3036,3050,3064,3078,3092,3106, 3119,3133,3146,3159,3172,3185,3197,3210, 3222,3234,3246,3258,3270,3281,3293,3304, 3315,3326,3337,3347,3358,3368,3379,3389, 3399,3409,3419,3428,3438,3447,3456,3466, 3475,3483,3492,3501,3510,3518,3526,3535, 3543,3551,3559,3566,3574,3582,3589,3596, 3604,3611,3618,3625,3632,3639,3645,3652, 3659,3665,3671,3678,3684,3690,3696,3702, 3707,3713,3719,3724,3730,3735,3741,3746, 3751,3756,3761,3766,3771,3776,3781,3786, 3790,3795,3799,3804,3808,3812,3817,3821, 3825,3829,3833,3837,3841,3845,3848,3852, 3856,3859,3863,3867,3870,3873,3877,3880, 3883,3887,3890,3893,3896,3899,3902,3905, 3908,3911,3913,3916,3919,3922,3924,3927, 3929,3932,3934,3937,3939,3942,3944,3946, 3949,3951,3953,3955,3957,3960,3962,3964, 3966,3968,3970,3972,3973,3975,3977,3979, 3981,3983,3984,3986,3988,3989,3991,3993, 3994,3996,3997,3999,4000,4002,4003,4005, 4006,4007,4009,4010,4011,4013,4014,4015, 4016,4018,4019,4020,4021,4022,4024,4025, 4026,4027,4028,4029,4030,4031,4032,4033, 4034,4035,4036,4037,4038,4039,4039,4040, 4041,4042,4043,4044,4044,4045,4046,4047, 4048,4048,4049,4050,4050,4051,4052,4053, 4053,4054,4055,4055,4056,4056,4057,4058, 4058,4059,4059,4060,4061,4061,4062,4062, 4063,4063,4064,4064,4065,4065,4066,4066, 4067,4067,4067,4068,4068,4069,4069,4070, 4070,4070,4071,4071,4072,4072,4072,4073, 4073,4073,4074,4074,4074,4075,4075,4075, 4076,4076,4076,4077,4077,4077,4078,4078, 4078,4078,4079,4079,4079,4079,4080,4080, 4080,4080,4081,4081,4081,4081,4082,4082, 4082,4082,4082,4083,4083,4083,4083,4084, 4084,4084,4084,4084,4084,4085,4085,4085, 4085,4085,4085,4086,4086,4086,4086,4086, 4086,4087,4087,4087,4087,4087,4087,4087, 4088,4088,4088,4088,4088,4088,4088,4088, 4089,4089,4089,4089,4089,4089,4089,4089, 4089,4090,4090,4090,4090,4090,4090,4090, 4090,4090,4090,4090,4091,4091,4091,4091, 4091,4091,4091,4091,4091,4091,4091,4091, 4091,4092,4092,4092,4092,4092,4092,4092, 4092,4092,4092,4092,4092,4092,4092,4092, 4092,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4093, 4093,4093,4093,4093,4093,4093,4093,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4094, 4094,4094,4094,4094,4094,4094,4094,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4095,4095, 4095,4095,4095,4095,4095,4095,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096, 4096,4096,4096,4096,4096,4096,4096,4096}; __device__ void lstm_n5_o1(int input[SAMPLE_TEST_LEN], short output[SAMPLE_TEST_LEN]) { int i, j, t; float inW[4][5] = { -0.00902497, 0.0130347, -0.305604, 0.0103134, -0.00143173, -0.00892103, -0.00877193, -0.0158959, -0.00261989, 0.00238156, -1.17159, -1.05888, -0.0252563, 1.32337, 0.896013, 0.112793, 0.107382, 0.459561, 0.112837, -0.0858938}; float intW[4][5][5] = { {{0.01465, 0.017885, 0.00462623, -0.00366126, 0.00414583}, {0.00709106, 0.00612325, 0.00509018, 0.00629193, -0.00820282}, {0.0594903, 0.0594652, 0.0879106, -0.202968, 0.146663}, {0.0173266, -0.00258213, -0.00156304, -0.0161799, 0.0206139}, {0.00378391, 0.0190192, 0.0140174, 0.0183843, -0.00042357}}, {{-0.007224, -2.52633e-05, -0.00375626, 0.0171819, -0.0146835}, {0.0095475, 0.0111485, 0.00723207, -0.00279432, -0.00130744}, {-0.00358937, -0.0211212, -0.0445563, -0.0203464, 0.0123881}, {-0.00648264, -0.00841806, 0.00112013, 0.00435087, -0.0138258}, {0.00533612, -0.00909088, 0.00789575, 0.00117046, 0.00834566}}, {{0.74772, 0.635634, 0.730541, -1.11435, 0.814002}, {0.623608, 0.53032, 0.652992, -1.01461, 0.768323}, {0.120079, 0.113368, 0.0824013, -0.000308211, -0.0182162}, {-1.28265, -1.18123, -0.480213, 0.984297, -0.576107}, {-1.00799, -0.944089, -0.355751, 0.536079, -0.27723}}, {{0.0134795, 0.0447042, 0.015088, 0.0920375, -0.0777375}, {0.0384587, 0.0330071, 0.0205698, 0.0858556, -0.0671409}, {-0.63912, -0.570696, -0.0891825, 0.706698, -0.5}, {0.0172945, 0.0240723, 0.00149645, 0.0341813, -0.0418003}, {-0.0122831, -0.0280598, -0.00341253, -0.0265756, 0.0246845}} }; float intB[4][5] = { 0.016, 0.0139732, -0.183891, 0.0139634, 0.00864378, 5.00094, 5.00059, 4.97023, 5.0002, 5.00032, 0.0676543, -0.0445895, 0.248995, -0.978814, -1.0258, 0.204404, 0.190113, -0.156202, 0.219446, -0.179526}; float outW[5] = {-0.4272, -0.33769, 0.167592, 0.50495, -0.502329}; float outB = -0.0394433; short inWF[4][5] = {0}; short intWF[4][5][5] = {0}; short intBF[4][5] = {0}; short outWF[5] = {0}; short outBF = 0; for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { inWF[i][j] = (short) (inW[i][j] * SCALER); } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { for (t = 0; t < 5; ++t) { intWF[i][j][t] = (short) (intW[i][j][t] * SCALER); } } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { intBF[i][j] = (short) (intB[i][j] * SCALER); } } for (i = 0; i < 5; ++i) { outWF[i] = (short) (outW[i] * SCALER); } outBF = (short) (outB * SCALER); short h_stateF[5] = {0}; short c_stateF[5] = {0}; short i_stateF[5] = {0}; short f_stateF[5] = {0}; short o_stateF[5] = {0}; short g_stateF[5] = {0}; short sampleinput_16b; for (t = 0; t < SAMPLE_TEST_LEN; ++t) { sampleinput_16b = (short) (input[t] + 120000) * 256 / 1875; for (j = 0; j < 5; ++j) { i_stateF[j] = (inWF[0][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) i_stateF[j] += ((h_stateF[i] * intWF[0][j][i]) >> 12); i_stateF[j] += intBF[0][j]; i_stateF[j] = i_stateF[j] >> 5; if (i_stateF[j] >= LUT_SIZE) i_stateF[j] = 4095; else if (i_stateF[j] >= 0) i_stateF[j] = lut_sigmoid[i_stateF[j]]; else if (i_stateF[j] > -LUT_SIZE) i_stateF[j] = 4096 - lut_sigmoid[-i_stateF[j]]; else i_stateF[j] = 1; } for (j = 0; j < 5; ++j) { f_stateF[j] = (inWF[1][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) f_stateF[j] += ((h_stateF[i] * intWF[1][j][i]) >> 12); f_stateF[j] += intBF[1][j]; f_stateF[j] = f_stateF[j] >> 5; if (f_stateF[j] >= LUT_SIZE) f_stateF[j] = 4095; else if (f_stateF[j] >= 0) f_stateF[j] = lut_sigmoid[f_stateF[j]]; else if (f_stateF[j] > -LUT_SIZE) f_stateF[j] = 4096 - lut_sigmoid[-f_stateF[j]]; else f_stateF[j] = 1; } for (j = 0; j < 5; ++j) { o_stateF[j] = (inWF[2][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) o_stateF[j] += ((h_stateF[i] * intWF[2][j][i]) >> 12); o_stateF[j] += intBF[2][j]; o_stateF[j] = o_stateF[j] >> 5; if (o_stateF[j] >= LUT_SIZE) o_stateF[j] = 4095; else if (o_stateF[j] >= 0) o_stateF[j] = lut_sigmoid[o_stateF[j]]; else if (o_stateF[j] > -LUT_SIZE) o_stateF[j] = 4096 - lut_sigmoid[-o_stateF[j]]; else o_stateF[j] = 1; } for (j = 0; j < 5; ++j) { g_stateF[j] = (inWF[3][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) g_stateF[j] += ((h_stateF[i] * intWF[3][j][i]) >> 12); g_stateF[j] += intBF[3][j]; g_stateF[j] = g_stateF[j] >> 5; if (g_stateF[j] >= LUT_SIZE) g_stateF[j] = 4096; else if (g_stateF[j] >= 0) g_stateF[j] = lut_tanh[g_stateF[j]]; else if (g_stateF[j] > -LUT_SIZE) g_stateF[j] = -lut_tanh[-g_stateF[j]]; else g_stateF[j] = -4096; } for (j = 0; j < 5; ++j) { c_stateF[j] = (((c_stateF[j] * f_stateF[j]) >> 8) + ((g_stateF[j] * i_stateF[j]) >> 12)) >> 4; h_stateF[j] = c_stateF[j] >> 1; if (h_stateF[j] >= LUT_SIZE) h_stateF[j] = 4096; else if (h_stateF[j] >= 0) h_stateF[j] = lut_tanh[h_stateF[j]]; else if (h_stateF[j] > -LUT_SIZE) h_stateF[j] = -lut_tanh[-h_stateF[j]]; else h_stateF[j] = -4096; h_stateF[j] = (h_stateF[j] * o_stateF[j]) >> 12; } output[t] = outBF; for (j = 0; j < 5; ++j) output[t] += ((h_stateF[j] * outWF[j]) >> 12); } } __device__ void lstm_n5_o2(int input[SAMPLE_TEST_LEN], short output[SAMPLE_TEST_LEN]) { int i, j, t; float inW[4][5] = { -0.133907, 0.0967799, -0.0249856, -0.0482016, 0.000138663, -0.0025821, -0.0107074, -0.0135626, -0.0265616, -0.00990482, 0.0279149, 0.29944, 0.00367669, -0.0406378, -0.122106, 0.305937, -1.54966, 0.108542, -0.086096, -0.278674}; float intW[4][5][5] = { {{0.0465599, -0.0784586, 0.0703757, -0.0961503, 0.103885}, {0.137839, 0.0785531, 0.172321, 0.00198996, 0.115174}, {0.0896546, -0.00207286, 0.0280649, 0.0300854, 0.0549556}, {0.0952124, 0.011873, 0.0253059, -0.00619738, 0.10025}, {-0.0796523, -0.0310471, 0.0336561, -0.0999846, -0.00944991}}, {{-0.00558139, -0.0249531, -0.0196812, -0.0283953, -0.00538974}, {0.0124158, 0.00739093, 0.00918819, -0.00951965, 0.00634635}, {-0.008908, 0.0113348, -0.00387874, 0.00339979, -0.000628876}, {-0.00832763, 0.0040069, 0.00346749, -0.0256792, 0.00539768}, {0.00337389, -0.0148225, -0.0283464, 0.00277652, 0.000571859}}, {{-0.00736941, 0.0578041, 0.141176, 0.00565979, -0.079775}, {-0.140356, 0.0521767, 0.0813636, -0.0342324, -0.0847605}, {0.0534741, 0.0335436, 0.0464466, 0.0670157, 0.0266309}, {0.0142565, -0.0397183, -0.0116136, -0.0507669, 0.0575363}, {-0.0518841, 0.0358612, 0.0333015, -0.119254, 0.0368938}}, {{-0.40111, 1.17447, 0.172804, 0.197255, 0.0786499}, {-0.307048, -0.923395, -0.362905, 0.194527, -0.438387}, {-0.671133, 0.728081, -0.520196, 0.0108215, -0.139992}, {-0.600645, 0.151967, 0.0101909, -0.235608, -0.367466}, {0.262652, 0.84919, -0.131239, 0.0756875, -0.261777}} }; float intB[4][5] = { -0.0421559, 0.246112, 0.0348797, -0.0619016, 0.0988568, 4.98184, 4.97131, 4.98673, 4.97446, 4.96925, 0.255813, 0.527195, 0.120779, -0.0979445, 0.02733, 0.0091722, 0.551458, -0.0521645, 0.0113755, 0.2287}; float outW[5] = {-0.592906, 0.576557, -0.38704, 0.0146919, -0.35076}; float outB = -0.0191289; short inWF[4][5] = {0}; short intWF[4][5][5] = {0}; short intBF[4][5] = {0}; short outWF[5] = {0}; short outBF = 0; for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { inWF[i][j] = (short) (inW[i][j] * SCALER); } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { for (t = 0; t < 5; ++t) { intWF[i][j][t] = (short) (intW[i][j][t] * SCALER); } } } for (i = 0; i < 4; ++i) { for (j = 0; j < 5; ++j) { intBF[i][j] = (short) (intB[i][j] * SCALER); } } for (i = 0; i < 5; ++i) { outWF[i] = (short) (outW[i] * SCALER); } outBF = (short) (outB * SCALER); short h_stateF[5] = {0}; short c_stateF[5] = {0}; short i_stateF[5] = {0}; short f_stateF[5] = {0}; short o_stateF[5] = {0}; short g_stateF[5] = {0}; short sampleinput_16b; for (t = 0; t < SAMPLE_TEST_LEN; ++t) { sampleinput_16b = (short) (input[t] + 120000) * 256 / 1875; for (j = 0; j < 5; ++j) { i_stateF[j] = (inWF[0][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) i_stateF[j] += ((h_stateF[i] * intWF[0][j][i]) >> 12); i_stateF[j] += intBF[0][j]; i_stateF[j] = i_stateF[j] >> 5; if (i_stateF[j] >= LUT_SIZE) i_stateF[j] = 4095; else if (i_stateF[j] >= 0) i_stateF[j] = lut_sigmoid[i_stateF[j]]; else if (i_stateF[j] > -LUT_SIZE) i_stateF[j] = 4096 - lut_sigmoid[-i_stateF[j]]; else i_stateF[j] = 1; } for (j = 0; j < 5; ++j) { f_stateF[j] = (inWF[1][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) f_stateF[j] += ((h_stateF[i] * intWF[1][j][i]) >> 12); f_stateF[j] += intBF[1][j]; f_stateF[j] = f_stateF[j] >> 5; if (f_stateF[j] >= LUT_SIZE) f_stateF[j] = 4095; else if (f_stateF[j] >= 0) f_stateF[j] = lut_sigmoid[f_stateF[j]]; else if (f_stateF[j] > -LUT_SIZE) f_stateF[j] = 4096 - lut_sigmoid[-f_stateF[j]]; else f_stateF[j] = 1; } for (j = 0; j < 5; ++j) { o_stateF[j] = (inWF[2][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) o_stateF[j] += ((h_stateF[i] * intWF[2][j][i]) >> 12); o_stateF[j] += intBF[2][j]; o_stateF[j] = o_stateF[j] >> 5; if (o_stateF[j] >= LUT_SIZE) o_stateF[j] = 4095; else if (o_stateF[j] >= 0) o_stateF[j] = lut_sigmoid[o_stateF[j]]; else if (o_stateF[j] > -LUT_SIZE) o_stateF[j] = 4096 - lut_sigmoid[-o_stateF[j]]; else o_stateF[j] = 1; } for (j = 0; j < 5; ++j) { g_stateF[j] = (inWF[3][j] * sampleinput_16b) >> 15; for (i = 0; i < 5; ++i) g_stateF[j] += ((h_stateF[i] * intWF[3][j][i]) >> 12); g_stateF[j] += intBF[3][j]; g_stateF[j] = g_stateF[j] >> 5; if (g_stateF[j] >= LUT_SIZE) g_stateF[j] = 4096; else if (g_stateF[j] >= 0) g_stateF[j] = lut_tanh[g_stateF[j]]; else if (g_stateF[j] > -LUT_SIZE) g_stateF[j] = -lut_tanh[-g_stateF[j]]; else g_stateF[j] = -4096; } for (j = 0; j < 5; ++j) { c_stateF[j] = (((c_stateF[j] * f_stateF[j]) >> 8) + ((g_stateF[j] * i_stateF[j]) >> 12)) >> 4; h_stateF[j] = c_stateF[j] >> 1; if (h_stateF[j] >= LUT_SIZE) h_stateF[j] = 4096; else if (h_stateF[j] >= 0) h_stateF[j] = lut_tanh[h_stateF[j]]; else if (h_stateF[j] > -LUT_SIZE) h_stateF[j] = -lut_tanh[-h_stateF[j]]; else h_stateF[j] = -4096; h_stateF[j] = (h_stateF[j] * o_stateF[j]) >> 12; } output[t] = outBF; for (j = 0; j < 5; ++j) output[t] += ((h_stateF[j] * outWF[j]) >> 12); } } __global__ void lstm_task(int n, int *x, short *y1, short *y2) { int idx = blockIdx.x*blockDim.x + threadIdx.x; //if (i < n) y[i] = a*x[i] + y[i]; int i; int sampleinput[SAMPLE_TEST_LEN]; short test_out1[SAMPLE_TEST_LEN]; short test_out2[SAMPLE_TEST_LEN]; if (idx < n) { for (i = 0; i < SAMPLE_TEST_LEN; ++i) { sampleinput[i] = x[idx * SAMPLE_TEST_LEN + i]; } lstm_n5_o1(sampleinput, test_out1); lstm_n5_o2(sampleinput, test_out2); for (i = 0; i < SAMPLE_TEST_LEN; ++i) { y1[idx * SAMPLE_TEST_LEN + i] = test_out1[i]; y2[idx * SAMPLE_TEST_LEN + i] = test_out2[i]; } } } int main(void) { int N = 1<<19; int *x, *d_x; short *y1, *y2, *d_y1, *d_y2; FILE *ifp, *ofp; struct timeval t1, t2, tr; int i, j; int sampleinput[SAMPLE_TEST_LEN]; short test_out1[SAMPLE_TEST_LEN]; short test_out2[SAMPLE_TEST_LEN]; // Read in sample input from "converted-lstm-in.txt" file if (!(ifp = fopen("converted-lstm-in.txt", "r"))) { printf("File converted-lstm-in.txt cannot be opened for read.\n"); return -1; } for (i = 0; i < SAMPLE_TEST_LEN; ++i) { fscanf(ifp, "%d", &sampleinput[i]); } fclose(ifp); // Open output.txt for output data write back. if (!(ofp = fopen("output.txt", "w"))) { printf("File output.txt cannot be opened for write.\n"); return -1; } x = (int*)malloc(N * SAMPLE_TEST_LEN * sizeof(int)); y1 = (short*)malloc(N * SAMPLE_TEST_LEN * sizeof(short)); y2 = (short*)malloc(N * SAMPLE_TEST_LEN * sizeof(short)); cudaMalloc(&d_x, N * SAMPLE_TEST_LEN * sizeof(int)); cudaMalloc(&d_y1, N * SAMPLE_TEST_LEN * sizeof(short)); cudaMalloc(&d_y2, N * SAMPLE_TEST_LEN * sizeof(short)); for (i = 0; i < N; i++) { for (j = 0; j < SAMPLE_TEST_LEN; ++j) { x[i * SAMPLE_TEST_LEN + j] = sampleinput[j]; } } for (int k = 9; k < 10; ++k) { gettimeofday(&t1, NULL); for (j = 0; j < k; ++j) { cudaMemcpy(d_x, x, N * SAMPLE_TEST_LEN * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_y1, y1, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyHostToDevice); cudaMemcpy(d_y2, y2, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyHostToDevice); // Perform SAXPY on 1M elements lstm_task<<<(N+63)/64, 64>>>(N, d_x, d_y1, d_y2); cudaMemcpy(y1, d_y1, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyDeviceToHost); cudaMemcpy(y2, d_y2, N * SAMPLE_TEST_LEN * sizeof(short), cudaMemcpyDeviceToHost); } gettimeofday(&t2, NULL); timersub(&t1, &t2, &tr); printf("Round %d Excute time: %.2f sec\n", k, -tr.tv_sec-(double)tr.tv_usec/1000000.0); } for (i = 0; i < SAMPLE_TEST_LEN; ++i) { test_out1[i] = y1[10000 * SAMPLE_TEST_LEN + i]; test_out2[i] = y2[10000 * SAMPLE_TEST_LEN + i]; } for (i = 0; i < SAMPLE_TEST_LEN; ++i) fprintf(ofp, "%d,%d\n", test_out1[i], test_out2[i]); fclose(ofp); cudaFree(d_x); cudaFree(d_y1); cudaFree(d_y2); free(x); free(y1); free(y2); }
bd05a3b7a6c71bff278639d518c97f3a521c7a26.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <iostream> using namespace std; using namespace cv; _global_ void knn(uchar4 * input_image, uchar4 * output_image, int k) { int thread_id = threadIdx.x; int block_offset = blockIdx.x * blockDim.x; int grid_id = block_offset + thread_id; output_image[blockIdx.y, grid_id] = input_image[1 + static_cast<int>(round(blockIdx.y / k)), 1 + static_cast<int>(round(grid_id / k))]; } int main() { int k = 4; uchar4 *h_inputImage, *input_image; uchar4 *h_outputImage, *output_image; cv::Mat image = cv::imread("C:/2nd Sem/PA/CUDA/Assignment-3/image.jpg"); if (image.empty()) { std::cerr << "Couldn't open file: " << std::endl; exit(1); } cv::Mat imageInput; cv::Mat imageOutput; cvtColor(image, imageInput, cv::COLOR_RGB2GRAY); int num_Rows = imageInput.rows; int num_Cols = imageInput.cols; int grid_x = k * num_Rows / 32; int grid_y = k * num_Cols / 32; h_inputImage = (uchar4 *)imageInput.ptr<unsigned char>(0); h_outputImage = (uchar4 *)imageOutput.ptr<unsigned char>(0); const int numPixels = num_Rows * num_Cols; hipMalloc((void**)input_image, sizeof(uchar4) * numPixels); hipMalloc((void**)output_image, sizeof(uchar4) * numPixels * k); hipMemcpy(input_image, h_inputImage, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice); dim3 block(32, 1, 1); dim3 grid(grid_x, grid_y, 1); knn << <grid, block >> > (input_image, output_image, k); hipDeviceSynchronize(); hipMemcpy(h_outputImage, output_image, sizeof(uchar4) * numPixels * k, hipMemcpyDeviceToHost); hipDeviceReset(); }
bd05a3b7a6c71bff278639d518c97f3a521c7a26.cu
#include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <iostream> using namespace std; using namespace cv; _global_ void knn(uchar4 * input_image, uchar4 * output_image, int k) { int thread_id = threadIdx.x; int block_offset = blockIdx.x * blockDim.x; int grid_id = block_offset + thread_id; output_image[blockIdx.y, grid_id] = input_image[1 + static_cast<int>(round(blockIdx.y / k)), 1 + static_cast<int>(round(grid_id / k))]; } int main() { int k = 4; uchar4 *h_inputImage, *input_image; uchar4 *h_outputImage, *output_image; cv::Mat image = cv::imread("C:/2nd Sem/PA/CUDA/Assignment-3/image.jpg"); if (image.empty()) { std::cerr << "Couldn't open file: " << std::endl; exit(1); } cv::Mat imageInput; cv::Mat imageOutput; cvtColor(image, imageInput, cv::COLOR_RGB2GRAY); int num_Rows = imageInput.rows; int num_Cols = imageInput.cols; int grid_x = k * num_Rows / 32; int grid_y = k * num_Cols / 32; h_inputImage = (uchar4 *)imageInput.ptr<unsigned char>(0); h_outputImage = (uchar4 *)imageOutput.ptr<unsigned char>(0); const int numPixels = num_Rows * num_Cols; cudaMalloc((void**)input_image, sizeof(uchar4) * numPixels); cudaMalloc((void**)output_image, sizeof(uchar4) * numPixels * k); cudaMemcpy(input_image, h_inputImage, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice); dim3 block(32, 1, 1); dim3 grid(grid_x, grid_y, 1); knn << <grid, block >> > (input_image, output_image, k); cudaDeviceSynchronize(); cudaMemcpy(h_outputImage, output_image, sizeof(uchar4) * numPixels * k, cudaMemcpyDeviceToHost); cudaDeviceReset(); }
6326365ee9ac6e3a9ec0377b6938fd2bd5510856.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <vector> #include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { // A Cuda kernel to compute the depthwise convolution forward pass // in NCHW format. template <typename T> __global__ void KernelDepthwiseConv( const int nthreads, const T* const input_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const output_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / output_channels / output_height / output_width; const int c_out = (index / output_height / output_width) % output_channels; const int h_out = (index / output_width) % output_height; const int w_out = index % output_width; const int c_in = c_out / filter_multiplier; const T* weight = filter_data + c_out * filter_height * filter_width; T value = 0; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = h_in_start + filter_height; const int w_in_end = w_in_start + filter_width; const int in_offset = ((batch * input_channels + c_in) * input_height) * input_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; value += weight[(h_in - h_in_start) * filter_width + (w_in - w_in_start)] * input_data[offset]; } } output_data[index] = value; } } // CUDA kernel to compute the depthwise convolution backprop w.r.t input. template <typename T> __global__ void KernelDepthwiseConvInputGrad( const int nthreads, const T* const output_grad_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const input_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / input_channels / input_height / input_width; const int c_in = (index / input_height / input_width) % input_channels; const int h_in = (index / input_width) % input_height; const int w_in = index % input_width; const int c_out_start = c_in * filter_multiplier; int h_out_start = (h_in - filter_height + padding_height + stride_height) / stride_height; h_out_start = 0 > h_out_start ? 0 : h_out_start; int h_out_end = (h_in + padding_height) / stride_height; h_out_end = output_height - 1 < h_out_end ? output_height - 1 : h_out_end; int w_out_start = (w_in - filter_width + padding_width + stride_width) / stride_width; w_out_start = 0 > w_out_start ? 0 : w_out_start; int w_out_end = (w_in + padding_width) / stride_width; w_out_end = output_width - 1 < w_out_end ? output_width - 1 : w_out_end; T value = 0; for (int c_out = c_out_start; c_out < c_out_start + filter_multiplier; c_out++) { for (int h_out = h_out_start; h_out <= h_out_end; ++h_out) { const int filter_h = h_in + padding_height - h_out * stride_height; for (int w_out = w_out_start; w_out <= w_out_end; ++w_out) { const int filter_w = w_in + padding_width - w_out * stride_width; const int filter_offset = c_out * filter_height * filter_width + filter_h * filter_width + filter_w; const int output_grad_offset = ((batch * output_channels + c_out) * output_height + h_out) * output_width + w_out; value += output_grad_data[output_grad_offset] * filter_data[filter_offset]; } } } input_grad_data[index] += value; } } // Cuda kernel to compute the depthwise convolution backprop w.r.t. filter. template <typename T> __global__ void KernelDepthwiseConvFilterGrad( const int nthreads, const T* const output_grad_data, const T* const input_data, const int num, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const filter_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int w_out = index % output_width; const int h_out = (index / output_width) % output_height; const int c_out = (index / output_width / output_height) % output_channels; const int batch = (index / output_width / output_height / output_channels); const int c_in = c_out / filter_multiplier; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = -padding_height + h_out * stride_height + filter_height; const int w_in_end = -padding_width + w_out * stride_width + filter_width; const int in_offset = (batch * input_channels + c_in) * input_height * input_width; T* addr_offset = filter_grad_data + c_out * filter_height * filter_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; const T diff_temp = output_grad_data[index] * input_data[offset]; T* addr = addr_offset + (h_in - h_in_start) * filter_width + (w_in - w_in_start); paddle::platform::CudaAtomicAdd(addr, diff_temp); } } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <class T> class DepthwiseConvFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* filter_data = filter.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelDepthwiseConv<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data); } }; template <typename T> class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* filter_data = filter.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelDepthwiseConvInputGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template <typename T> class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* filter_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter_grad->dims()[2]; const int ksize_width = filter_grad->dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* filter_grad_data = filter_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelDepthwiseConvFilterGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, input_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, filter_grad_data); } }; template class DepthwiseConvFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
6326365ee9ac6e3a9ec0377b6938fd2bd5510856.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <vector> #include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { namespace operators { namespace math { // A Cuda kernel to compute the depthwise convolution forward pass // in NCHW format. template <typename T> __global__ void KernelDepthwiseConv( const int nthreads, const T* const input_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const output_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / output_channels / output_height / output_width; const int c_out = (index / output_height / output_width) % output_channels; const int h_out = (index / output_width) % output_height; const int w_out = index % output_width; const int c_in = c_out / filter_multiplier; const T* weight = filter_data + c_out * filter_height * filter_width; T value = 0; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = h_in_start + filter_height; const int w_in_end = w_in_start + filter_width; const int in_offset = ((batch * input_channels + c_in) * input_height) * input_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; value += weight[(h_in - h_in_start) * filter_width + (w_in - w_in_start)] * input_data[offset]; } } output_data[index] = value; } } // CUDA kernel to compute the depthwise convolution backprop w.r.t input. template <typename T> __global__ void KernelDepthwiseConvInputGrad( const int nthreads, const T* const output_grad_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const input_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / input_channels / input_height / input_width; const int c_in = (index / input_height / input_width) % input_channels; const int h_in = (index / input_width) % input_height; const int w_in = index % input_width; const int c_out_start = c_in * filter_multiplier; int h_out_start = (h_in - filter_height + padding_height + stride_height) / stride_height; h_out_start = 0 > h_out_start ? 0 : h_out_start; int h_out_end = (h_in + padding_height) / stride_height; h_out_end = output_height - 1 < h_out_end ? output_height - 1 : h_out_end; int w_out_start = (w_in - filter_width + padding_width + stride_width) / stride_width; w_out_start = 0 > w_out_start ? 0 : w_out_start; int w_out_end = (w_in + padding_width) / stride_width; w_out_end = output_width - 1 < w_out_end ? output_width - 1 : w_out_end; T value = 0; for (int c_out = c_out_start; c_out < c_out_start + filter_multiplier; c_out++) { for (int h_out = h_out_start; h_out <= h_out_end; ++h_out) { const int filter_h = h_in + padding_height - h_out * stride_height; for (int w_out = w_out_start; w_out <= w_out_end; ++w_out) { const int filter_w = w_in + padding_width - w_out * stride_width; const int filter_offset = c_out * filter_height * filter_width + filter_h * filter_width + filter_w; const int output_grad_offset = ((batch * output_channels + c_out) * output_height + h_out) * output_width + w_out; value += output_grad_data[output_grad_offset] * filter_data[filter_offset]; } } } input_grad_data[index] += value; } } // Cuda kernel to compute the depthwise convolution backprop w.r.t. filter. template <typename T> __global__ void KernelDepthwiseConvFilterGrad( const int nthreads, const T* const output_grad_data, const T* const input_data, const int num, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const filter_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int w_out = index % output_width; const int h_out = (index / output_width) % output_height; const int c_out = (index / output_width / output_height) % output_channels; const int batch = (index / output_width / output_height / output_channels); const int c_in = c_out / filter_multiplier; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = -padding_height + h_out * stride_height + filter_height; const int w_in_end = -padding_width + w_out * stride_width + filter_width; const int in_offset = (batch * input_channels + c_in) * input_height * input_width; T* addr_offset = filter_grad_data + c_out * filter_height * filter_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; const T diff_temp = output_grad_data[index] * input_data[offset]; T* addr = addr_offset + (h_in - h_in_start) * filter_width + (w_in - w_in_start); paddle::platform::CudaAtomicAdd(addr, diff_temp); } } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <class T> class DepthwiseConvFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* filter_data = filter.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelDepthwiseConv<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data); } }; template <typename T> class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* filter_data = filter.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelDepthwiseConvInputGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template <typename T> class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* filter_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter_grad->dims()[2]; const int ksize_width = filter_grad->dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* filter_grad_data = filter_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelDepthwiseConvFilterGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, input_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, filter_grad_data); } }; template class DepthwiseConvFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
205519b2a468f08f946378742a32a0187e8b4bb6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/affine_channel_op.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, StorageOrder kOrder> __global__ void AffineChannelScaleBiasBackwardCUDAKernel( const int N, const int C, const int HxW, const T* dY, const T* X, T* dscale, T* dbias) { const int outer_size = C; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_sum = 0; T db_sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = kOrder == StorageOrder::NCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; #if __CUDA_ARCH__ >= 350 ds_sum += __ldg(dY + index) * __ldg(X + index); db_sum += __ldg(dY + index); #else ds_sum += dY[index] * X[index]; db_sum += dY[index]; #endif } ds_sum = BlockReduce<T>(ds_storage).Reduce(ds_sum, hipcub::Sum()); db_sum = BlockReduce<T>(db_storage).Reduce(db_sum, hipcub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum; dbias[i] = db_sum; } __syncthreads(); } } } // namespace template <> bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { const auto& dY = Input(0); const auto& scale = is_learnable_ ? Input(2) : Input(1); auto* dX = Output(0); dX->ResizeLike(dY); const int N = dY.dim32(0); const int C = dY.dim32(1); const int HxW = dY.size() / (N * C); const float* dY_data = dY.data<float>(); const float* scale_data = scale.data<float>(); const std::array<int, 3> X_dims = {N, C, HxW}; const std::array<int, 3> scale_dims = {1, C, 1}; math::Mul<float, CUDAContext>( 3, X_dims.data(), 3, scale_dims.data(), dY_data, scale_data, dX->template mutable_data<float>(), &context_); if (is_learnable_) { const auto& X = Input(1); const float* X_data = X.data<float>(); auto* dscale = Output(1); auto* dbias = Output(2); dscale->ResizeLike(scale); dbias->ResizeLike(scale); const int outer_size = N * HxW; hipLaunchKernelGGL(( AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NCHW>) , dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dY_data, X_data, dscale->template mutable_data<float>(), dbias->template mutable_data<float>()); } return true; } template <> bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { const auto& dY = Input(0); const auto& scale = is_learnable_ ? Input(2) : Input(1); auto* dX = Output(0); dX->ResizeLike(dY); const int ndim = dY.ndim(); const int C = dY.dim32(ndim - 1); const int rows = dY.size() / C; const int cols = C; const float* dY_data = dY.data<float>(); const float* scale_data = scale.data<float>(); math::RowwiseMul<float, CUDAContext>( rows, cols, dY_data, scale_data, dX->template mutable_data<float>(), &context_); if (is_learnable_) { const auto& X = Input(1); const float* X_data = X.data<float>(); const int N = X.dim32(0); const int HxW = rows / N; auto* dscale = Output(1); auto* dbias = Output(2); dscale->ResizeLike(scale); dbias->ResizeLike(scale); hipLaunchKernelGGL(( AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NHWC>) , dim3(::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, C, HxW, dY_data, X_data, dscale->template mutable_data<float>(), dbias->template mutable_data<float>()); } return true; } REGISTER_CUDA_OPERATOR(AffineChannel, AffineChannelOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( AffineChannelGradient, AffineChannelGradientOp<float, CUDAContext>); } // namespace caffe2
205519b2a468f08f946378742a32a0187e8b4bb6.cu
#include "caffe2/operators/affine_channel_op.h" #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T, StorageOrder kOrder> __global__ void AffineChannelScaleBiasBackwardCUDAKernel( const int N, const int C, const int HxW, const T* dY, const T* X, T* dscale, T* dbias) { const int outer_size = C; const int inner_size = N * HxW; __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { T ds_sum = 0; T db_sum = 0; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = kOrder == StorageOrder::NCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; #if __CUDA_ARCH__ >= 350 ds_sum += __ldg(dY + index) * __ldg(X + index); db_sum += __ldg(dY + index); #else ds_sum += dY[index] * X[index]; db_sum += dY[index]; #endif } ds_sum = BlockReduce<T>(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce<T>(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum; dbias[i] = db_sum; } __syncthreads(); } } } // namespace template <> bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { const auto& dY = Input(0); const auto& scale = is_learnable_ ? Input(2) : Input(1); auto* dX = Output(0); dX->ResizeLike(dY); const int N = dY.dim32(0); const int C = dY.dim32(1); const int HxW = dY.size() / (N * C); const float* dY_data = dY.data<float>(); const float* scale_data = scale.data<float>(); const std::array<int, 3> X_dims = {N, C, HxW}; const std::array<int, 3> scale_dims = {1, C, 1}; math::Mul<float, CUDAContext>( 3, X_dims.data(), 3, scale_dims.data(), dY_data, scale_data, dX->template mutable_data<float>(), &context_); if (is_learnable_) { const auto& X = Input(1); const float* X_data = X.data<float>(); auto* dscale = Output(1); auto* dbias = Output(2); dscale->ResizeLike(scale); dbias->ResizeLike(scale); const int outer_size = N * HxW; AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NCHW> <<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, dY_data, X_data, dscale->template mutable_data<float>(), dbias->template mutable_data<float>()); } return true; } template <> bool AffineChannelGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { const auto& dY = Input(0); const auto& scale = is_learnable_ ? Input(2) : Input(1); auto* dX = Output(0); dX->ResizeLike(dY); const int ndim = dY.ndim(); const int C = dY.dim32(ndim - 1); const int rows = dY.size() / C; const int cols = C; const float* dY_data = dY.data<float>(); const float* scale_data = scale.data<float>(); math::RowwiseMul<float, CUDAContext>( rows, cols, dY_data, scale_data, dX->template mutable_data<float>(), &context_); if (is_learnable_) { const auto& X = Input(1); const float* X_data = X.data<float>(); const int N = X.dim32(0); const int HxW = rows / N; auto* dscale = Output(1); auto* dbias = Output(2); dscale->ResizeLike(scale); dbias->ResizeLike(scale); AffineChannelScaleBiasBackwardCUDAKernel<float, StorageOrder::NHWC> <<<std::min(rows, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, C, HxW, dY_data, X_data, dscale->template mutable_data<float>(), dbias->template mutable_data<float>()); } return true; } REGISTER_CUDA_OPERATOR(AffineChannel, AffineChannelOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( AffineChannelGradient, AffineChannelGradientOp<float, CUDAContext>); } // namespace caffe2
786f624cdd69d6bc603c7fd1af1e7d54bf923a0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" namespace paddle { namespace operators { namespace math { template <typename T, typename LabelT> __global__ void CrossEntropyKernel(T* Y, const T* X, const LabelT* label, const int N, const int D, const int ignore_index) { CUDA_KERNEL_LOOP(i, N) { auto lbl = static_cast<int64_t>(label[i]); PADDLE_ENFORCE(lbl >= 0 && lbl < D || lbl == ignore_index, "The value of label[%d] expected >= 0 and < %ld, or == %ld, " "but got %ld. Please check input value.", i, D, ignore_index, lbl); Y[i] = ignore_index == lbl ? static_cast<T>(0) : -math::TolerableValue<T>()(real_log(X[i * D + lbl])); } } template <typename T> __global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label, const int class_num) { int tid = threadIdx.x; T val(0); int idx = blockIdx.x * class_num + tid; int end = blockIdx.x * class_num + class_num; for (; idx < end; idx += blockDim.x) { val += math::TolerableValue<T>()(real_log(X[idx])) * label[idx]; } val = paddle::platform::reduceSum(val, tid, blockDim.x); if (threadIdx.x == 0) { Y[blockIdx.x] = -val; } } template <typename T> struct HardLabelCrossEntropyCUDAFunctorImpl { public: HardLabelCrossEntropyCUDAFunctorImpl(T* loss_data, const T* prob_data, const void* label_data, const int batch_size, const int class_num, const int ignore_index, const int block_size, gpuStream_t stream) : loss_data_(loss_data), prob_data_(prob_data), label_data_(label_data), batch_size_(batch_size), class_num_(class_num), ignore_index_(ignore_index), block_size_(block_size), stream_(stream) {} template <typename U> void apply() const { int grid_size = (batch_size_ + block_size_ - 1) / block_size_; hipLaunchKernelGGL(( CrossEntropyKernel<T, U>), dim3(grid_size), dim3(block_size_), 0, stream_, loss_data_, prob_data_, static_cast<const U*>(label_data_), batch_size_, class_num_, ignore_index_); } private: T* loss_data_; const T* prob_data_; const void* label_data_; const int batch_size_; const int class_num_; const int ignore_index_; const int block_size_; gpuStream_t stream_; }; template <typename DeviceContext, typename T> void CrossEntropyFunctor<DeviceContext, T>::operator()( const DeviceContext& ctx, framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel, const int ignore_index, const int axis_dim) { const T* prob_data = prob->data<T>(); T* loss_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = prob->dims()[0]; int class_num = prob->dims()[1]; #ifdef __HIPCC__ constexpr int kMaxBlockDim = 256; #else constexpr int kMaxBlockDim = 512; #endif if (softLabel) { const T* label_data = labels->data<T>(); int block = class_num > kMaxBlockDim ? kMaxBlockDim : pow(2, static_cast<int>(std::log2(class_num))); hipLaunchKernelGGL(( SoftCrossEntropyKernel<T>), dim3(batch_size), dim3(block), 0, ctx.stream(), loss_data, prob_data, label_data, class_num); } else { HardLabelCrossEntropyCUDAFunctorImpl<T> functor(loss_data, prob_data, labels->data(), batch_size, class_num, ignore_index, kMaxBlockDim, ctx.stream()); framework::VisitDataType(framework::TransToProtoVarType(labels->dtype()), functor); } } template class CrossEntropyFunctor<phi::GPUContext, float>; template class CrossEntropyFunctor<phi::GPUContext, double>; template class CrossEntropyFunctor<phi::GPUContext, platform::float16>; } // namespace math } // namespace operators } // namespace paddle
786f624cdd69d6bc603c7fd1af1e7d54bf923a0e.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" namespace paddle { namespace operators { namespace math { template <typename T, typename LabelT> __global__ void CrossEntropyKernel(T* Y, const T* X, const LabelT* label, const int N, const int D, const int ignore_index) { CUDA_KERNEL_LOOP(i, N) { auto lbl = static_cast<int64_t>(label[i]); PADDLE_ENFORCE(lbl >= 0 && lbl < D || lbl == ignore_index, "The value of label[%d] expected >= 0 and < %ld, or == %ld, " "but got %ld. Please check input value.", i, D, ignore_index, lbl); Y[i] = ignore_index == lbl ? static_cast<T>(0) : -math::TolerableValue<T>()(real_log(X[i * D + lbl])); } } template <typename T> __global__ void SoftCrossEntropyKernel(T* Y, const T* X, const T* label, const int class_num) { int tid = threadIdx.x; T val(0); int idx = blockIdx.x * class_num + tid; int end = blockIdx.x * class_num + class_num; for (; idx < end; idx += blockDim.x) { val += math::TolerableValue<T>()(real_log(X[idx])) * label[idx]; } val = paddle::platform::reduceSum(val, tid, blockDim.x); if (threadIdx.x == 0) { Y[blockIdx.x] = -val; } } template <typename T> struct HardLabelCrossEntropyCUDAFunctorImpl { public: HardLabelCrossEntropyCUDAFunctorImpl(T* loss_data, const T* prob_data, const void* label_data, const int batch_size, const int class_num, const int ignore_index, const int block_size, gpuStream_t stream) : loss_data_(loss_data), prob_data_(prob_data), label_data_(label_data), batch_size_(batch_size), class_num_(class_num), ignore_index_(ignore_index), block_size_(block_size), stream_(stream) {} template <typename U> void apply() const { int grid_size = (batch_size_ + block_size_ - 1) / block_size_; CrossEntropyKernel<T, U><<<grid_size, block_size_, 0, stream_>>>( loss_data_, prob_data_, static_cast<const U*>(label_data_), batch_size_, class_num_, ignore_index_); } private: T* loss_data_; const T* prob_data_; const void* label_data_; const int batch_size_; const int class_num_; const int ignore_index_; const int block_size_; gpuStream_t stream_; }; template <typename DeviceContext, typename T> void CrossEntropyFunctor<DeviceContext, T>::operator()( const DeviceContext& ctx, framework::Tensor* out, const framework::Tensor* prob, const framework::Tensor* labels, const bool softLabel, const int ignore_index, const int axis_dim) { const T* prob_data = prob->data<T>(); T* loss_data = out->mutable_data<T>(ctx.GetPlace()); int batch_size = prob->dims()[0]; int class_num = prob->dims()[1]; #ifdef __HIPCC__ constexpr int kMaxBlockDim = 256; #else constexpr int kMaxBlockDim = 512; #endif if (softLabel) { const T* label_data = labels->data<T>(); int block = class_num > kMaxBlockDim ? kMaxBlockDim : pow(2, static_cast<int>(std::log2(class_num))); SoftCrossEntropyKernel<T><<<batch_size, block, 0, ctx.stream()>>>( loss_data, prob_data, label_data, class_num); } else { HardLabelCrossEntropyCUDAFunctorImpl<T> functor(loss_data, prob_data, labels->data(), batch_size, class_num, ignore_index, kMaxBlockDim, ctx.stream()); framework::VisitDataType(framework::TransToProtoVarType(labels->dtype()), functor); } } template class CrossEntropyFunctor<phi::GPUContext, float>; template class CrossEntropyFunctor<phi::GPUContext, double>; template class CrossEntropyFunctor<phi::GPUContext, platform::float16>; } // namespace math } // namespace operators } // namespace paddle
433dd3dc21d0180c3978467e51160958cc82613a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "dev_const.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *px = NULL; hipMalloc(&px, XSIZE*YSIZE); float k = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( dev_const), dim3(gridBlock),dim3(threadBlock), 0, 0, px,k); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( dev_const), dim3(gridBlock),dim3(threadBlock), 0, 0, px,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( dev_const), dim3(gridBlock),dim3(threadBlock), 0, 0, px,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
433dd3dc21d0180c3978467e51160958cc82613a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "dev_const.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *px = NULL; cudaMalloc(&px, XSIZE*YSIZE); float k = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); dev_const<<<gridBlock,threadBlock>>>(px,k); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { dev_const<<<gridBlock,threadBlock>>>(px,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { dev_const<<<gridBlock,threadBlock>>>(px,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
40cfe32a3ab7b5358fc6853d1a19736cebf53df2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/fused_attention_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/activation_functor.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #include "paddle/phi/kernels/funcs/sparse/sparse_blas.h" #include "paddle/phi/kernels/sparse/empty_kernel.h" #include "paddle/phi/kernels/sparse/matmul_kernel.h" #include "paddle/phi/kernels/sparse/sparse_utils_kernel.h" namespace phi { namespace sparse { template <typename T> __global__ void AttnSoftmaxGpuKernel(const int64_t* x_crows, const int64_t* x_cols, const T* x_values, const T* kp_mask, const T* attn_mask, T* out_values, int M, int total_row_num, int num_heads, int batch_nnz) { // out = exp(x-x_max) / sum(exp(x-x_max)) int row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= total_row_num) return; int cur_batch = row / M; int cur_row = row % M; int crow_idx = cur_batch * (M + 1) + cur_row; int row_first = cur_batch * batch_nnz + static_cast<int>(x_crows[crow_idx]); int row_nnz = static_cast<int>(x_crows[crow_idx + 1] - x_crows[crow_idx]); if (row_nnz == 0) return; T max_val = -std::numeric_limits<T>::infinity(); for (int idx = threadIdx.x; idx < row_nnz; idx += blockDim.x) { bool mask = false; int col_idx = static_cast<int>(x_cols[row_first + idx]); if (kp_mask != nullptr && kp_mask[(cur_batch / num_heads) * M + col_idx] == 0) { mask = true; } if (attn_mask != nullptr && attn_mask[cur_row * M + col_idx] == 0) { mask = true; } if (!mask) { T val = x_values[row_first + idx]; if (val > max_val) { max_val = val; } out_values[row_first + idx] = val; } else { // Note corner case: when all elements of the row are masked, result // may be wrong because of exp('-inf' - '-inf'), just ignore now. out_values[row_first + idx] = -std::numeric_limits<T>::infinity(); } } T row_max_val = phi::funcs::WarpReduceMax<T>(max_val, 0xFFFFFFFF); T exp_sum = 0; for (int idx = threadIdx.x; idx < row_nnz; idx += blockDim.x) { auto functor = phi::funcs::CudaExpFunctor<T>(); T exp = functor(out_values[row_first + idx] - row_max_val); exp_sum += exp; out_values[row_first + idx] = exp; } T row_exp_sum = phi::funcs::WarpReduceSum<T>(exp_sum, 0xFFFFFFFF); for (int idx = threadIdx.x; idx < row_nnz; idx += blockDim.x) { out_values[row_first + idx] = out_values[row_first + idx] / row_exp_sum; } } template <typename T, typename Context> void FusedAttentionCsrKernel( const Context& dev_ctx, const DenseTensor& query, const DenseTensor& key, const DenseTensor& value, const SparseCsrTensor& sparse_mask, const paddle::optional<DenseTensor>& key_padding_mask, const paddle::optional<DenseTensor>& attn_mask, DenseTensor* out, SparseCsrTensor* softmax) { #if TORCH_HIP_VERSION >= 11080 /* Check Shape */ auto q_dim = query.dims(); auto q_rank = q_dim.size(); int total_row_num = 1; int batch_num = 1; for (int i = 0; i < q_rank - 1; ++i) { total_row_num *= q_dim[i]; if (i < q_rank - 2) { batch_num *= q_dim[i]; } } int M = q_dim[q_rank - 2]; int N = q_dim[q_rank - 1]; PADDLE_ENFORCE_EQ(query.dims().size(), 4, phi::errors::InvalidArgument(" 'query' must be 4D Tensor")); PADDLE_ENFORCE_EQ(key.dims().size(), 4, phi::errors::InvalidArgument(" 'key' must be 4D Tensor")); PADDLE_ENFORCE_EQ(value.dims().size(), 4, phi::errors::InvalidArgument(" 'value' must be 4D Tensor")); PADDLE_ENFORCE_EQ( sparse_mask.dims().size(), 3, phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); PADDLE_ENFORCE_EQ( sparse_mask.dims()[0], q_dim[0] * q_dim[1], phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); PADDLE_ENFORCE_EQ( sparse_mask.dims()[1], M, phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); PADDLE_ENFORCE_EQ( sparse_mask.dims()[2], M, phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); const auto kp_mask_ptr = key_padding_mask.get_ptr(); if (kp_mask_ptr) { PADDLE_ENFORCE_EQ( kp_mask_ptr->dims().size(), 2, phi::errors::InvalidArgument( "shape of 'key_padding_mask' must be [batch_size, seq_len]")); PADDLE_ENFORCE_EQ( kp_mask_ptr->dims()[0], q_dim[0], phi::errors::InvalidArgument( "shape of 'key_padding_mask' must be [batch_size, seq_len]")); PADDLE_ENFORCE_EQ( kp_mask_ptr->dims()[1], M, phi::errors::InvalidArgument( "shape of 'key_padding_mask' must be [batch_size, seq_len]")); } const auto attn_mask_ptr = attn_mask.get_ptr(); if (attn_mask_ptr) { PADDLE_ENFORCE_EQ(attn_mask_ptr->dims().size(), 2, phi::errors::InvalidArgument( "shape of 'attn_mask' must be [seq_len, seq_len]")); PADDLE_ENFORCE_EQ(attn_mask_ptr->dims()[0], M, phi::errors::InvalidArgument( "shape of 'attn_mask' must be [seq_len, seq_len]")); PADDLE_ENFORCE_EQ(attn_mask_ptr->dims()[1], M, phi::errors::InvalidArgument( "shape of 'attn_mask' must be [seq_len, seq_len]")); } /* Step1: SDD Matmul, reuse matmul */ SparseCsrTensor sdd_result; EmptyLikeCsrKernel<T, Context>(dev_ctx, sparse_mask, &sdd_result); auto sparse_blas = phi::funcs::sparse::GetSparseBlas<Context, T>(dev_ctx); sparse_blas.SDDMM(false, true, static_cast<T>(1 / std::sqrt(N)), query, key, static_cast<T>(0), &sdd_result); EmptyLikeCsrKernel<T, Context>(dev_ctx, sdd_result, softmax); dim3 grid((total_row_num + 7) / 8); dim3 block(WARP_SIZE, 8); int batch_nnz = sdd_result.nnz() / batch_num; hipLaunchKernelGGL(( AttnSoftmaxGpuKernel<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), sdd_result.crows().data<int64_t>(), sdd_result.cols().data<int64_t>(), sdd_result.values().data<T>(), kp_mask_ptr ? kp_mask_ptr->data<T>() : nullptr, attn_mask_ptr ? attn_mask_ptr->data<T>() : nullptr, softmax->mutable_values()->data<T>(), M, total_row_num, q_dim[1], batch_nnz); softmax->set_dims(phi::make_ddim({q_dim[0], q_dim[1], q_dim[2], q_dim[2]})); MatmulCsrDenseKernel<T, Context>(dev_ctx, *softmax, value, out); #else PADDLE_THROW( phi::errors::Unimplemented("forward of 'sparse.nn.functional.attention' " "use 'hipsparseCsrSetStridedBatch', which is " "completed supported from CUDA 11.8")); #endif } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(fused_attention_csr, GPU, ALL_LAYOUT, phi::sparse::FusedAttentionCsrKernel, float, double) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR); }
40cfe32a3ab7b5358fc6853d1a19736cebf53df2.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/fused_attention_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/activation_functor.h" #include "paddle/phi/kernels/funcs/math_cuda_utils.h" #include "paddle/phi/kernels/funcs/sparse/sparse_blas.h" #include "paddle/phi/kernels/sparse/empty_kernel.h" #include "paddle/phi/kernels/sparse/matmul_kernel.h" #include "paddle/phi/kernels/sparse/sparse_utils_kernel.h" namespace phi { namespace sparse { template <typename T> __global__ void AttnSoftmaxGpuKernel(const int64_t* x_crows, const int64_t* x_cols, const T* x_values, const T* kp_mask, const T* attn_mask, T* out_values, int M, int total_row_num, int num_heads, int batch_nnz) { // out = exp(x-x_max) / sum(exp(x-x_max)) int row = blockIdx.x * blockDim.y + threadIdx.y; if (row >= total_row_num) return; int cur_batch = row / M; int cur_row = row % M; int crow_idx = cur_batch * (M + 1) + cur_row; int row_first = cur_batch * batch_nnz + static_cast<int>(x_crows[crow_idx]); int row_nnz = static_cast<int>(x_crows[crow_idx + 1] - x_crows[crow_idx]); if (row_nnz == 0) return; T max_val = -std::numeric_limits<T>::infinity(); for (int idx = threadIdx.x; idx < row_nnz; idx += blockDim.x) { bool mask = false; int col_idx = static_cast<int>(x_cols[row_first + idx]); if (kp_mask != nullptr && kp_mask[(cur_batch / num_heads) * M + col_idx] == 0) { mask = true; } if (attn_mask != nullptr && attn_mask[cur_row * M + col_idx] == 0) { mask = true; } if (!mask) { T val = x_values[row_first + idx]; if (val > max_val) { max_val = val; } out_values[row_first + idx] = val; } else { // Note corner case: when all elements of the row are masked, result // may be wrong because of exp('-inf' - '-inf'), just ignore now. out_values[row_first + idx] = -std::numeric_limits<T>::infinity(); } } T row_max_val = phi::funcs::WarpReduceMax<T>(max_val, 0xFFFFFFFF); T exp_sum = 0; for (int idx = threadIdx.x; idx < row_nnz; idx += blockDim.x) { auto functor = phi::funcs::CudaExpFunctor<T>(); T exp = functor(out_values[row_first + idx] - row_max_val); exp_sum += exp; out_values[row_first + idx] = exp; } T row_exp_sum = phi::funcs::WarpReduceSum<T>(exp_sum, 0xFFFFFFFF); for (int idx = threadIdx.x; idx < row_nnz; idx += blockDim.x) { out_values[row_first + idx] = out_values[row_first + idx] / row_exp_sum; } } template <typename T, typename Context> void FusedAttentionCsrKernel( const Context& dev_ctx, const DenseTensor& query, const DenseTensor& key, const DenseTensor& value, const SparseCsrTensor& sparse_mask, const paddle::optional<DenseTensor>& key_padding_mask, const paddle::optional<DenseTensor>& attn_mask, DenseTensor* out, SparseCsrTensor* softmax) { #if CUDA_VERSION >= 11080 /* Check Shape */ auto q_dim = query.dims(); auto q_rank = q_dim.size(); int total_row_num = 1; int batch_num = 1; for (int i = 0; i < q_rank - 1; ++i) { total_row_num *= q_dim[i]; if (i < q_rank - 2) { batch_num *= q_dim[i]; } } int M = q_dim[q_rank - 2]; int N = q_dim[q_rank - 1]; PADDLE_ENFORCE_EQ(query.dims().size(), 4, phi::errors::InvalidArgument(" 'query' must be 4D Tensor")); PADDLE_ENFORCE_EQ(key.dims().size(), 4, phi::errors::InvalidArgument(" 'key' must be 4D Tensor")); PADDLE_ENFORCE_EQ(value.dims().size(), 4, phi::errors::InvalidArgument(" 'value' must be 4D Tensor")); PADDLE_ENFORCE_EQ( sparse_mask.dims().size(), 3, phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); PADDLE_ENFORCE_EQ( sparse_mask.dims()[0], q_dim[0] * q_dim[1], phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); PADDLE_ENFORCE_EQ( sparse_mask.dims()[1], M, phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); PADDLE_ENFORCE_EQ( sparse_mask.dims()[2], M, phi::errors::InvalidArgument("dense shape of 'sparse_mask' must be " "[batch_size*num_heads, seq_len, seq_len]")); const auto kp_mask_ptr = key_padding_mask.get_ptr(); if (kp_mask_ptr) { PADDLE_ENFORCE_EQ( kp_mask_ptr->dims().size(), 2, phi::errors::InvalidArgument( "shape of 'key_padding_mask' must be [batch_size, seq_len]")); PADDLE_ENFORCE_EQ( kp_mask_ptr->dims()[0], q_dim[0], phi::errors::InvalidArgument( "shape of 'key_padding_mask' must be [batch_size, seq_len]")); PADDLE_ENFORCE_EQ( kp_mask_ptr->dims()[1], M, phi::errors::InvalidArgument( "shape of 'key_padding_mask' must be [batch_size, seq_len]")); } const auto attn_mask_ptr = attn_mask.get_ptr(); if (attn_mask_ptr) { PADDLE_ENFORCE_EQ(attn_mask_ptr->dims().size(), 2, phi::errors::InvalidArgument( "shape of 'attn_mask' must be [seq_len, seq_len]")); PADDLE_ENFORCE_EQ(attn_mask_ptr->dims()[0], M, phi::errors::InvalidArgument( "shape of 'attn_mask' must be [seq_len, seq_len]")); PADDLE_ENFORCE_EQ(attn_mask_ptr->dims()[1], M, phi::errors::InvalidArgument( "shape of 'attn_mask' must be [seq_len, seq_len]")); } /* Step1: SDD Matmul, reuse matmul */ SparseCsrTensor sdd_result; EmptyLikeCsrKernel<T, Context>(dev_ctx, sparse_mask, &sdd_result); auto sparse_blas = phi::funcs::sparse::GetSparseBlas<Context, T>(dev_ctx); sparse_blas.SDDMM(false, true, static_cast<T>(1 / std::sqrt(N)), query, key, static_cast<T>(0), &sdd_result); EmptyLikeCsrKernel<T, Context>(dev_ctx, sdd_result, softmax); dim3 grid((total_row_num + 7) / 8); dim3 block(WARP_SIZE, 8); int batch_nnz = sdd_result.nnz() / batch_num; AttnSoftmaxGpuKernel<T><<<grid, block, 0, dev_ctx.stream()>>>( sdd_result.crows().data<int64_t>(), sdd_result.cols().data<int64_t>(), sdd_result.values().data<T>(), kp_mask_ptr ? kp_mask_ptr->data<T>() : nullptr, attn_mask_ptr ? attn_mask_ptr->data<T>() : nullptr, softmax->mutable_values()->data<T>(), M, total_row_num, q_dim[1], batch_nnz); softmax->set_dims(phi::make_ddim({q_dim[0], q_dim[1], q_dim[2], q_dim[2]})); MatmulCsrDenseKernel<T, Context>(dev_ctx, *softmax, value, out); #else PADDLE_THROW( phi::errors::Unimplemented("forward of 'sparse.nn.functional.attention' " "use 'cusparseCsrSetStridedBatch', which is " "completed supported from CUDA 11.8")); #endif } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(fused_attention_csr, GPU, ALL_LAYOUT, phi::sparse::FusedAttentionCsrKernel, float, double) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR); }
f9a08dab7c89a452a13fb91e67ddfe8b3e551f37.hip
// !!! This is a file automatically generated by hipify!!! #include"CudaHelper.cuh" #include"CudaInterface.hpp" int currCudaDeviceID = 0; void Anime4KCPP::Cuda::cuSetDeviceID(const int id) { if (id < 0 || id >= cuGetDeviceCount()) currCudaDeviceID = 0; else currCudaDeviceID = id; } int Anime4KCPP::Cuda::cuGetDeviceID() noexcept { return currCudaDeviceID; } void Anime4KCPP::Cuda::cuReleaseCuda() noexcept { hipDeviceReset(); currCudaDeviceID = 0; } int Anime4KCPP::Cuda::cuGetDeviceCount() noexcept { int deviceCount; hipError_t err = hipGetDeviceCount(&deviceCount); if (err != hipSuccess) return 0; return deviceCount; } std::string Anime4KCPP::Cuda::cuGetDeviceInfo(const int id) { hipDeviceProp_t deviceProp; hipError_t err = id < 0 || id >= cuGetDeviceCount() ? hipGetDeviceProperties(&deviceProp, 0): hipGetDeviceProperties(&deviceProp, id); CheckCudaErr(err); return "Device id: " + std::to_string(id) + "\n Type: " + std::string(deviceProp.name) + "\n Video Memory: " + std::to_string(deviceProp.totalGlobalMem >> 20) + " mb" + "\n Compute Capability: " + std::to_string(deviceProp.major) + "." + std::to_string(deviceProp.minor); } std::string Anime4KCPP::Cuda::cuGetCudaInfo() { std::string info; int deviceCount = cuGetDeviceCount(); if (!deviceCount) info = "No CUDA device found"; else for (int i = 0; i < deviceCount; i++) info += cuGetDeviceInfo(i) + "\n"; return info; } bool Anime4KCPP::Cuda::cuCheckDeviceSupport(const int id) noexcept { hipDeviceProp_t deviceProp; hipError_t err = id < 0 || id >= cuGetDeviceCount() ? hipGetDeviceProperties(&deviceProp, 0) : hipGetDeviceProperties(&deviceProp, id); if (err != hipSuccess || deviceProp.major < 2) return false; return true; }
f9a08dab7c89a452a13fb91e67ddfe8b3e551f37.cu
#include"CudaHelper.cuh" #include"CudaInterface.hpp" int currCudaDeviceID = 0; void Anime4KCPP::Cuda::cuSetDeviceID(const int id) { if (id < 0 || id >= cuGetDeviceCount()) currCudaDeviceID = 0; else currCudaDeviceID = id; } int Anime4KCPP::Cuda::cuGetDeviceID() noexcept { return currCudaDeviceID; } void Anime4KCPP::Cuda::cuReleaseCuda() noexcept { cudaDeviceReset(); currCudaDeviceID = 0; } int Anime4KCPP::Cuda::cuGetDeviceCount() noexcept { int deviceCount; cudaError_t err = cudaGetDeviceCount(&deviceCount); if (err != cudaSuccess) return 0; return deviceCount; } std::string Anime4KCPP::Cuda::cuGetDeviceInfo(const int id) { cudaDeviceProp deviceProp; cudaError_t err = id < 0 || id >= cuGetDeviceCount() ? cudaGetDeviceProperties(&deviceProp, 0): cudaGetDeviceProperties(&deviceProp, id); CheckCudaErr(err); return "Device id: " + std::to_string(id) + "\n Type: " + std::string(deviceProp.name) + "\n Video Memory: " + std::to_string(deviceProp.totalGlobalMem >> 20) + " mb" + "\n Compute Capability: " + std::to_string(deviceProp.major) + "." + std::to_string(deviceProp.minor); } std::string Anime4KCPP::Cuda::cuGetCudaInfo() { std::string info; int deviceCount = cuGetDeviceCount(); if (!deviceCount) info = "No CUDA device found"; else for (int i = 0; i < deviceCount; i++) info += cuGetDeviceInfo(i) + "\n"; return info; } bool Anime4KCPP::Cuda::cuCheckDeviceSupport(const int id) noexcept { cudaDeviceProp deviceProp; cudaError_t err = id < 0 || id >= cuGetDeviceCount() ? cudaGetDeviceProperties(&deviceProp, 0) : cudaGetDeviceProperties(&deviceProp, id); if (err != cudaSuccess || deviceProp.major < 2) return false; return true; }
97f9a14427789464feaf088028ec9086a2be80b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: jglaser /*! \file SFCPackUpdaterGPU.cu \brief Defines GPU kernel code for generating the space-filling curve sorted order on the GPU. Used by SFCPackUpdaterGPU. */ #include "SFCPackUpdaterGPU.cuh" #include "kernels/mergesort.cuh" //! Kernel to bin particles template<bool twod> __global__ void gpu_sfc_bin_particles_kernel(unsigned int N, const Scalar4 *d_pos, unsigned int *d_particle_bins, const unsigned int *d_traversal_order, unsigned int n_grid, unsigned int *d_sorted_order, const BoxDim box) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; // fetch particle position Scalar4 postype = d_pos[idx]; Scalar3 p = make_scalar3(postype.x, postype.y, postype.z); Scalar3 f = box.makeFraction(p); unsigned int ib = (unsigned int)(f.x * n_grid) % n_grid; unsigned int jb = (unsigned int)(f.y * n_grid) % n_grid; unsigned int kb = (unsigned int)(f.z * n_grid) % n_grid; // record its bin unsigned int bin; if (twod) { // do not use Hilbert curve in 2D bin = ib*n_grid + jb; d_particle_bins[idx] = bin; } else { bin = ib*(n_grid*n_grid) + jb * n_grid + kb; d_particle_bins[idx] = d_traversal_order[bin]; } // store index of ptl d_sorted_order[idx] = idx; } /*! \param N number of local particles \param d_pos Device array of positions \param d_particle_bins Device array of particle bins \param d_traversal_order Device array of Hilbert-curve bins \param n_grid Number of grid elements along one edge \param d_sorted_order Sorted order of particles \param box Box dimensions \param twod If true, bin particles in two dimensions */ void gpu_generate_sorted_order(unsigned int N, const Scalar4 *d_pos, unsigned int *d_particle_bins, unsigned int *d_traversal_order, unsigned int n_grid, unsigned int *d_sorted_order, const BoxDim& box, bool twod, mgpu::ContextPtr mgpu_context) { // maybe need to autotune, but SFCPackUpdater is called infrequently unsigned int block_size = 512; unsigned int n_blocks = N/block_size + 1; if (twod) hipLaunchKernelGGL(( gpu_sfc_bin_particles_kernel<true>), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box); else hipLaunchKernelGGL(( gpu_sfc_bin_particles_kernel<false>), dim3(n_blocks), dim3(block_size), 0, 0, N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box); // Sort particles if (N) mgpu::MergesortPairs(d_particle_bins, d_sorted_order, N, *mgpu_context); } //! Kernel to apply sorted order __global__ void gpu_apply_sorted_order_kernel( unsigned int N, const unsigned int *d_sorted_order, const Scalar4 *d_pos, Scalar4 *d_pos_alt, const Scalar4 *d_vel, Scalar4 *d_vel_alt, const Scalar3 *d_accel, Scalar3 *d_accel_alt, const Scalar *d_charge, Scalar *d_charge_alt, const Scalar *d_diameter, Scalar *d_diameter_alt, const int3 *d_image, int3 *d_image_alt, const unsigned int *d_body, unsigned int *d_body_alt, const unsigned int *d_tag, unsigned int *d_tag_alt, const Scalar4 *d_orientation, Scalar4 *d_orientation_alt, const Scalar *d_net_virial, Scalar *d_net_virial_alt, const Scalar4 *d_net_force, Scalar4 *d_net_force_alt, const Scalar4 *d_net_torque, Scalar4 *d_net_torque_alt, unsigned int *d_rtag) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; unsigned int old_idx = d_sorted_order[idx]; // permute and copy over particle data d_pos_alt[idx] = d_pos[old_idx]; d_vel_alt[idx] = d_vel[old_idx]; d_accel_alt[idx] = d_accel[old_idx]; d_charge_alt[idx] = d_charge[old_idx]; d_diameter_alt[idx] = d_diameter[old_idx]; d_image_alt[idx] = d_image[old_idx]; d_body_alt[idx] = d_body[old_idx]; unsigned int tag = d_tag[old_idx]; d_tag_alt[idx] = tag; d_orientation_alt[idx] = d_orientation[old_idx]; d_net_virial_alt[idx] = d_net_virial[old_idx]; d_net_force_alt[idx] = d_net_force[old_idx]; d_net_torque_alt[idx] = d_net_torque[old_idx]; // update rtag to point to particle position in new arrays d_rtag[tag] = idx; } void gpu_apply_sorted_order( unsigned int N, const unsigned int *d_sorted_order, const Scalar4 *d_pos, Scalar4 *d_pos_alt, const Scalar4 *d_vel, Scalar4 *d_vel_alt, const Scalar3 *d_accel, Scalar3 *d_accel_alt, const Scalar *d_charge, Scalar *d_charge_alt, const Scalar *d_diameter, Scalar *d_diameter_alt, const int3 *d_image, int3 *d_image_alt, const unsigned int *d_body, unsigned int *d_body_alt, const unsigned int *d_tag, unsigned int *d_tag_alt, const Scalar4 *d_orientation, Scalar4 *d_orientation_alt, const Scalar *d_net_virial, Scalar *d_net_virial_alt, const Scalar4 *d_net_force, Scalar4 *d_net_force_alt, const Scalar4 *d_net_torque, Scalar4 *d_net_torque_alt, unsigned int *d_rtag ) { unsigned int block_size = 512; unsigned int n_blocks = N/block_size + 1; hipLaunchKernelGGL(( gpu_apply_sorted_order_kernel), dim3(n_blocks), dim3(block_size), 0, 0, N, d_sorted_order, d_pos, d_pos_alt, d_vel, d_vel_alt, d_accel, d_accel_alt, d_charge, d_charge_alt, d_diameter, d_diameter_alt, d_image, d_image_alt, d_body, d_body_alt, d_tag, d_tag_alt, d_orientation, d_orientation_alt, d_net_virial, d_net_virial_alt, d_net_force, d_net_force_alt, d_net_torque, d_net_torque_alt, d_rtag); }
97f9a14427789464feaf088028ec9086a2be80b9.cu
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: jglaser /*! \file SFCPackUpdaterGPU.cu \brief Defines GPU kernel code for generating the space-filling curve sorted order on the GPU. Used by SFCPackUpdaterGPU. */ #include "SFCPackUpdaterGPU.cuh" #include "kernels/mergesort.cuh" //! Kernel to bin particles template<bool twod> __global__ void gpu_sfc_bin_particles_kernel(unsigned int N, const Scalar4 *d_pos, unsigned int *d_particle_bins, const unsigned int *d_traversal_order, unsigned int n_grid, unsigned int *d_sorted_order, const BoxDim box) { unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= N) return; // fetch particle position Scalar4 postype = d_pos[idx]; Scalar3 p = make_scalar3(postype.x, postype.y, postype.z); Scalar3 f = box.makeFraction(p); unsigned int ib = (unsigned int)(f.x * n_grid) % n_grid; unsigned int jb = (unsigned int)(f.y * n_grid) % n_grid; unsigned int kb = (unsigned int)(f.z * n_grid) % n_grid; // record its bin unsigned int bin; if (twod) { // do not use Hilbert curve in 2D bin = ib*n_grid + jb; d_particle_bins[idx] = bin; } else { bin = ib*(n_grid*n_grid) + jb * n_grid + kb; d_particle_bins[idx] = d_traversal_order[bin]; } // store index of ptl d_sorted_order[idx] = idx; } /*! \param N number of local particles \param d_pos Device array of positions \param d_particle_bins Device array of particle bins \param d_traversal_order Device array of Hilbert-curve bins \param n_grid Number of grid elements along one edge \param d_sorted_order Sorted order of particles \param box Box dimensions \param twod If true, bin particles in two dimensions */ void gpu_generate_sorted_order(unsigned int N, const Scalar4 *d_pos, unsigned int *d_particle_bins, unsigned int *d_traversal_order, unsigned int n_grid, unsigned int *d_sorted_order, const BoxDim& box, bool twod, mgpu::ContextPtr mgpu_context) { // maybe need to autotune, but SFCPackUpdater is called infrequently unsigned int block_size = 512; unsigned int n_blocks = N/block_size + 1; if (twod) gpu_sfc_bin_particles_kernel<true><<<n_blocks, block_size>>>(N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box); else gpu_sfc_bin_particles_kernel<false><<<n_blocks, block_size>>>(N, d_pos, d_particle_bins, d_traversal_order, n_grid, d_sorted_order, box); // Sort particles if (N) mgpu::MergesortPairs(d_particle_bins, d_sorted_order, N, *mgpu_context); } //! Kernel to apply sorted order __global__ void gpu_apply_sorted_order_kernel( unsigned int N, const unsigned int *d_sorted_order, const Scalar4 *d_pos, Scalar4 *d_pos_alt, const Scalar4 *d_vel, Scalar4 *d_vel_alt, const Scalar3 *d_accel, Scalar3 *d_accel_alt, const Scalar *d_charge, Scalar *d_charge_alt, const Scalar *d_diameter, Scalar *d_diameter_alt, const int3 *d_image, int3 *d_image_alt, const unsigned int *d_body, unsigned int *d_body_alt, const unsigned int *d_tag, unsigned int *d_tag_alt, const Scalar4 *d_orientation, Scalar4 *d_orientation_alt, const Scalar *d_net_virial, Scalar *d_net_virial_alt, const Scalar4 *d_net_force, Scalar4 *d_net_force_alt, const Scalar4 *d_net_torque, Scalar4 *d_net_torque_alt, unsigned int *d_rtag) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; unsigned int old_idx = d_sorted_order[idx]; // permute and copy over particle data d_pos_alt[idx] = d_pos[old_idx]; d_vel_alt[idx] = d_vel[old_idx]; d_accel_alt[idx] = d_accel[old_idx]; d_charge_alt[idx] = d_charge[old_idx]; d_diameter_alt[idx] = d_diameter[old_idx]; d_image_alt[idx] = d_image[old_idx]; d_body_alt[idx] = d_body[old_idx]; unsigned int tag = d_tag[old_idx]; d_tag_alt[idx] = tag; d_orientation_alt[idx] = d_orientation[old_idx]; d_net_virial_alt[idx] = d_net_virial[old_idx]; d_net_force_alt[idx] = d_net_force[old_idx]; d_net_torque_alt[idx] = d_net_torque[old_idx]; // update rtag to point to particle position in new arrays d_rtag[tag] = idx; } void gpu_apply_sorted_order( unsigned int N, const unsigned int *d_sorted_order, const Scalar4 *d_pos, Scalar4 *d_pos_alt, const Scalar4 *d_vel, Scalar4 *d_vel_alt, const Scalar3 *d_accel, Scalar3 *d_accel_alt, const Scalar *d_charge, Scalar *d_charge_alt, const Scalar *d_diameter, Scalar *d_diameter_alt, const int3 *d_image, int3 *d_image_alt, const unsigned int *d_body, unsigned int *d_body_alt, const unsigned int *d_tag, unsigned int *d_tag_alt, const Scalar4 *d_orientation, Scalar4 *d_orientation_alt, const Scalar *d_net_virial, Scalar *d_net_virial_alt, const Scalar4 *d_net_force, Scalar4 *d_net_force_alt, const Scalar4 *d_net_torque, Scalar4 *d_net_torque_alt, unsigned int *d_rtag ) { unsigned int block_size = 512; unsigned int n_blocks = N/block_size + 1; gpu_apply_sorted_order_kernel<<<n_blocks, block_size>>>(N, d_sorted_order, d_pos, d_pos_alt, d_vel, d_vel_alt, d_accel, d_accel_alt, d_charge, d_charge_alt, d_diameter, d_diameter_alt, d_image, d_image_alt, d_body, d_body_alt, d_tag, d_tag_alt, d_orientation, d_orientation_alt, d_net_virial, d_net_virial_alt, d_net_force, d_net_force_alt, d_net_torque, d_net_torque_alt, d_rtag); }
03c7ae99e77d033eec3c5c421dfcb9922ab375ae.hip
// !!! This is a file automatically generated by hipify!!! // This file contains the reduce kernel and its wrapper. The code comes from CUDA // sdk example "CUDA_path/Samples/6_Advanced/reduction". This uses the kernel 4 // since it doesn't require the input to be power of 2 yet still retain most optimization // be definitely fast enough to handle 500cube image reduction #include <stdio.h> #include <iostream> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <iostream> #include <algorithm> #include "helper_functions.h" #include "helper_cuda.h" #include "reducer.h" // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory{ __device__ inline operator T *(){ extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double>{ __device__ inline operator double *(){ extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const{ extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; /* Comments from the SDK. This version uses the warp shuffle operation if available to reduce warp synchronization. When shuffle is not available the final warp's worth of work is unrolled to reduce looping overhead. See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ for additional information about using shuffle to perform a reduction within a warp. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ __global__ void reduce_sum_kernel(float * g_idata, float * g_odata, unsigned int n, unsigned int blockSize){ float * sdata = SharedMemory<float>(); // perform first level of reduction, // reading from global memory, writing to shared memory // Each thread corresponding to two values in this kernel. That's where the blockDim.x * 2 and the if comes from unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; float mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem. When reduced down to single warp shift to // warp shuffling. for (unsigned int s=blockDim.x/2; s>32; s>>=1){ if (tid < s){ sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } if ( tid < 32 ){ // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2){ mySum += __shfl_down(mySum, offset); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } __global__ void reduce_max_kernel(float * g_idata, float * g_odata, unsigned int n, unsigned int blockSize){ float * sdata = SharedMemory<float>(); // perform first level of reduction, // reading from global memory, writing to shared memory // Each thread corresponding to two values in this kernel. That's where the blockDim.x * 2 and the if comes from unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; float myMax = (i < n) ? g_idata[i] : -10000000000000.0; if (i + blockSize < n) myMax = max(myMax, g_idata[i+blockSize]); sdata[tid] = myMax; __syncthreads(); // do reduction in shared mem. When reduced down to single warp shift to // warp shuffling. for (unsigned int s=blockDim.x/2; s>32; s>>=1){ if (tid < s){ sdata[tid] = myMax = max(myMax , sdata[tid + s]); } __syncthreads(); } if ( tid < 32 ){ // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) myMax = max(myMax, sdata[tid + 32]); for (int offset = warpSize/2; offset > 0; offset /= 2){ myMax = max( myMax, __shfl_down(myMax, offset)); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = myMax; } __global__ void reduce_min_kernel(float * g_idata, float * g_odata, unsigned int n, unsigned int blockSize){ float * sdata = SharedMemory<float>(); // perform first level of reduction, // reading from global memory, writing to shared memory // Each thread corresponding to two values in this kernel. That's where the blockDim.x * 2 and the if comes from unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; float myMin = (i < n) ? g_idata[i] : 10000000000000.0; if (i + blockSize < n) myMin = min(myMin, g_idata[i+blockSize]); sdata[tid] = myMin; __syncthreads(); // do reduction in shared mem. When reduced down to single warp shift to // warp shuffling. for (unsigned int s=blockDim.x/2; s>32; s>>=1){ if (tid < s){ sdata[tid] = myMin = min(myMin , sdata[tid + s]); } __syncthreads(); } if ( tid < 32 ){ // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) myMin = min(myMin, sdata[tid + 32]); for (int offset = warpSize/2; offset > 0; offset /= 2){ myMin = min( myMin, __shfl_down(myMin, offset)); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = myMin; } /*****************************Kernel above, Helper functions and wrapper below************************************/ bool isPow2(unsigned int x){ return ((x&(x-1))==0); } unsigned int nextPow2(unsigned int x){ --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the given reduction kernel // For the kernels >= 3, we set threads / block to the minimum of maxThreads and // n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel // 6, we observe the maximum specified number of blocks, because each thread in // that kernel can process a variable number of elements. //////////////////////////////////////////////////////////////////////////////// void getNumBlocksAndThreads(int n, int &blocks, int &threads){ //get device capability, to avoid block/grid size exceed the upper bound int maxThreads = 1024; int maxBlock = 2147483647; threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + threads * 2 - 1) / (threads * 2); // Need to notice that there is limit on how large block dimension can be. // On my GTX980Ti the maximum blocks number on X direction is 2,147,483,647. // So this is completely enough for 500cube and 1000cube image so I delete // the check code here. Please check this with // CUDA Path/samples/1_Utilities/deviceQuery/deviceQuery } void reduce_sum_kernel_wrapper(int size, int threads, int blocks, float * d_idata, float * d_odata){ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); switch (threads){ case 1024: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 1024); break; case 512: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 512); break; case 256: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 256); break; case 128: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 128); break; case 64: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 64); break; case 32: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 32); break; case 16: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 16); break; case 8: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 8); break; case 4: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 4); break; case 2: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 2); break; case 1: hipLaunchKernelGGL(( reduce_sum_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 1); break; default: break; // std::cout<<"From function reduce_kernel_wrapper: The number of thread is not a power of 2. Check your code!"<<std::endl; } } void reduce_max_kernel_wrapper(int size, int threads, int blocks, float * d_idata, float * d_odata){ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); switch (threads){ case 1024: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 1024); break; case 512: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 512); break; case 256: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 256); break; case 128: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 128); break; case 64: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 64); break; case 32: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 32); break; case 16: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 16); break; case 8: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 8); break; case 4: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 4); break; case 2: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 2); break; case 1: hipLaunchKernelGGL(( reduce_max_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 1); break; default: break; // std::cout<<"From function reduce_kernel_wrapper: The number of thread is not a power of 2. Check your code!"<<std::endl; } } void reduce_min_kernel_wrapper(int size, int threads, int blocks, float * d_idata, float * d_odata){ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); switch (threads){ case 1024: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 1024); break; case 512: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 512); break; case 256: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 256); break; case 128: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 128); break; case 64: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 64); break; case 32: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 32); break; case 16: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 16); break; case 8: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 8); break; case 4: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 4); break; case 2: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 2); break; case 1: hipLaunchKernelGGL(( reduce_min_kernel), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size, 1); break; default: break; // std::cout<<"From function reduce_kernel_wrapper: The number of thread is not a power of 2. Check your code!"<<std::endl; } } /*********************************** Helper functions above, wrapper below ********************************/ float Reducer::reduce_sum_wrapper(int n, float *d_idata, float *d_odata){ // Create and start timer StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); float gpu_result = 0; int numThreads = 0; int numBlocks = 0; getNumBlocksAndThreads(n, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; // First pass of the kernel. Basically do a reduction and copy to d_odata int kernelCount = 0; reduce_sum_kernel_wrapper(n, numThreads, numBlocks, d_idata, d_odata); kernelCount++; // sum partial block sums on GPU int s = numBlocks; // Since we are considering reducing 500cube images, the cpu reduce scheme won't save // use much time compared to total kernel launch. So it is omitted here. while (s > 1){ getNumBlocksAndThreads(s, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; reduce_sum_kernel_wrapper(s, numThreads, numBlocks, d_odata, d_odata); s = (s + (numThreads*2-1)) / (numThreads*2); kernelCount++; } // Stop timer hipDeviceSynchronize(); sdkStopTimer(&timer); double reduce_time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); // copy final sum from device to host checkCudaErrors(hipMemcpy(&gpu_result, d_odata, sizeof(float), hipMemcpyDeviceToHost)); // std::cout<<"From function reduce_wrapper: The reduce kernel has launch "<< kernelCount << " times." << std::endl; // std::cout<<"Time: " << reduce_time << " ms." << std::endl; return gpu_result; } float Reducer::reduce_max_wrapper(int n, float *d_idata, float *d_odata){ // Create and start timer StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); float gpu_result = 0; int numThreads = 0; int numBlocks = 0; getNumBlocksAndThreads(n, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; // First pass of the kernel. Basically do a reduction and copy to d_odata int kernelCount = 0; reduce_max_kernel_wrapper(n, numThreads, numBlocks, d_idata, d_odata); kernelCount++; // sum partial block sums on GPU int s = numBlocks; // Since we are considering reducing 500cube images, the cpu reduce scheme won't save // use much time compared to total kernel launch. So it is omitted here. while (s > 1){ getNumBlocksAndThreads(s, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; reduce_max_kernel_wrapper(s, numThreads, numBlocks, d_odata, d_odata); s = (s + (numThreads*2-1)) / (numThreads*2); kernelCount++; } // Stop timer hipDeviceSynchronize(); sdkStopTimer(&timer); double reduce_time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); // copy final sum from device to host checkCudaErrors(hipMemcpy(&gpu_result, d_odata, sizeof(float), hipMemcpyDeviceToHost)); // std::cout<<"From function reduce_wrapper: The reduce kernel has launch "<< kernelCount << " times." << std::endl; // std::cout<<"Time: " << reduce_time << " ms." << std::endl; return gpu_result; } float Reducer::reduce_min_wrapper(int n, float *d_idata, float *d_odata){ // Create and start timer StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); float gpu_result = 0; int numThreads = 0; int numBlocks = 0; getNumBlocksAndThreads(n, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; // First pass of the kernel. Basically do a reduction and copy to d_odata int kernelCount = 0; reduce_min_kernel_wrapper(n, numThreads, numBlocks, d_idata, d_odata); kernelCount++; // sum partial block sums on GPU int s = numBlocks; // Since we are considering reducing 500cube images, the cpu reduce scheme won't save // use much time compared to total kernel launch. So it is omitted here. while (s > 1){ getNumBlocksAndThreads(s, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; reduce_min_kernel_wrapper(s, numThreads, numBlocks, d_odata, d_odata); s = (s + (numThreads*2-1)) / (numThreads*2); kernelCount++; } // Stop timer hipDeviceSynchronize(); sdkStopTimer(&timer); double reduce_time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); // copy final sum from device to host checkCudaErrors(hipMemcpy(&gpu_result, d_odata, sizeof(float), hipMemcpyDeviceToHost)); // std::cout<<"From function reduce_wrapper: The reduce kernel has launch "<< kernelCount << " times." << std::endl; // std::cout<<"Time: " << reduce_time << " ms." << std::endl; return gpu_result; }
03c7ae99e77d033eec3c5c421dfcb9922ab375ae.cu
// This file contains the reduce kernel and its wrapper. The code comes from CUDA // sdk example "CUDA_path/Samples/6_Advanced/reduction". This uses the kernel 4 // since it doesn't require the input to be power of 2 yet still retain most optimization // be definitely fast enough to handle 500cube image reduction #include <stdio.h> #include <iostream> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <algorithm> #include "helper_functions.h" #include "helper_cuda.h" #include "reducer.h" // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory{ __device__ inline operator T *(){ extern __shared__ int __smem[]; return (T *)__smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T *)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double>{ __device__ inline operator double *(){ extern __shared__ double __smem_d[]; return (double *)__smem_d; } __device__ inline operator const double *() const{ extern __shared__ double __smem_d[]; return (double *)__smem_d; } }; /* Comments from the SDK. This version uses the warp shuffle operation if available to reduce warp synchronization. When shuffle is not available the final warp's worth of work is unrolled to reduce looping overhead. See http://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ for additional information about using shuffle to perform a reduction within a warp. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ __global__ void reduce_sum_kernel(float * g_idata, float * g_odata, unsigned int n, unsigned int blockSize){ float * sdata = SharedMemory<float>(); // perform first level of reduction, // reading from global memory, writing to shared memory // Each thread corresponding to two values in this kernel. That's where the blockDim.x * 2 and the if comes from unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; float mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem. When reduced down to single warp shift to // warp shuffling. for (unsigned int s=blockDim.x/2; s>32; s>>=1){ if (tid < s){ sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } if ( tid < 32 ){ // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) mySum += sdata[tid + 32]; for (int offset = warpSize/2; offset > 0; offset /= 2){ mySum += __shfl_down(mySum, offset); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = mySum; } __global__ void reduce_max_kernel(float * g_idata, float * g_odata, unsigned int n, unsigned int blockSize){ float * sdata = SharedMemory<float>(); // perform first level of reduction, // reading from global memory, writing to shared memory // Each thread corresponding to two values in this kernel. That's where the blockDim.x * 2 and the if comes from unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; float myMax = (i < n) ? g_idata[i] : -10000000000000.0; if (i + blockSize < n) myMax = max(myMax, g_idata[i+blockSize]); sdata[tid] = myMax; __syncthreads(); // do reduction in shared mem. When reduced down to single warp shift to // warp shuffling. for (unsigned int s=blockDim.x/2; s>32; s>>=1){ if (tid < s){ sdata[tid] = myMax = max(myMax , sdata[tid + s]); } __syncthreads(); } if ( tid < 32 ){ // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) myMax = max(myMax, sdata[tid + 32]); for (int offset = warpSize/2; offset > 0; offset /= 2){ myMax = max( myMax, __shfl_down(myMax, offset)); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = myMax; } __global__ void reduce_min_kernel(float * g_idata, float * g_odata, unsigned int n, unsigned int blockSize){ float * sdata = SharedMemory<float>(); // perform first level of reduction, // reading from global memory, writing to shared memory // Each thread corresponding to two values in this kernel. That's where the blockDim.x * 2 and the if comes from unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x; float myMin = (i < n) ? g_idata[i] : 10000000000000.0; if (i + blockSize < n) myMin = min(myMin, g_idata[i+blockSize]); sdata[tid] = myMin; __syncthreads(); // do reduction in shared mem. When reduced down to single warp shift to // warp shuffling. for (unsigned int s=blockDim.x/2; s>32; s>>=1){ if (tid < s){ sdata[tid] = myMin = min(myMin , sdata[tid + s]); } __syncthreads(); } if ( tid < 32 ){ // Fetch final intermediate sum from 2nd warp if (blockSize >= 64) myMin = min(myMin, sdata[tid + 32]); for (int offset = warpSize/2; offset > 0; offset /= 2){ myMin = min( myMin, __shfl_down(myMin, offset)); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = myMin; } /*****************************Kernel above, Helper functions and wrapper below************************************/ bool isPow2(unsigned int x){ return ((x&(x-1))==0); } unsigned int nextPow2(unsigned int x){ --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the given reduction kernel // For the kernels >= 3, we set threads / block to the minimum of maxThreads and // n/2. For kernels < 3, we set to the minimum of maxThreads and n. For kernel // 6, we observe the maximum specified number of blocks, because each thread in // that kernel can process a variable number of elements. //////////////////////////////////////////////////////////////////////////////// void getNumBlocksAndThreads(int n, int &blocks, int &threads){ //get device capability, to avoid block/grid size exceed the upper bound int maxThreads = 1024; int maxBlock = 2147483647; threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads; blocks = (n + threads * 2 - 1) / (threads * 2); // Need to notice that there is limit on how large block dimension can be. // On my GTX980Ti the maximum blocks number on X direction is 2,147,483,647. // So this is completely enough for 500cube and 1000cube image so I delete // the check code here. Please check this with // CUDA Path/samples/1_Utilities/deviceQuery/deviceQuery } void reduce_sum_kernel_wrapper(int size, int threads, int blocks, float * d_idata, float * d_odata){ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); switch (threads){ case 1024: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 1024); break; case 512: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 512); break; case 256: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 256); break; case 128: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 128); break; case 64: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 64); break; case 32: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 32); break; case 16: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 16); break; case 8: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 8); break; case 4: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 4); break; case 2: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 2); break; case 1: reduce_sum_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 1); break; default: break; // std::cout<<"From function reduce_kernel_wrapper: The number of thread is not a power of 2. Check your code!"<<std::endl; } } void reduce_max_kernel_wrapper(int size, int threads, int blocks, float * d_idata, float * d_odata){ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); switch (threads){ case 1024: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 1024); break; case 512: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 512); break; case 256: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 256); break; case 128: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 128); break; case 64: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 64); break; case 32: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 32); break; case 16: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 16); break; case 8: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 8); break; case 4: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 4); break; case 2: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 2); break; case 1: reduce_max_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 1); break; default: break; // std::cout<<"From function reduce_kernel_wrapper: The number of thread is not a power of 2. Check your code!"<<std::endl; } } void reduce_min_kernel_wrapper(int size, int threads, int blocks, float * d_idata, float * d_odata){ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float); switch (threads){ case 1024: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 1024); break; case 512: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 512); break; case 256: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 256); break; case 128: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 128); break; case 64: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 64); break; case 32: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 32); break; case 16: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 16); break; case 8: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 8); break; case 4: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 4); break; case 2: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 2); break; case 1: reduce_min_kernel<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size, 1); break; default: break; // std::cout<<"From function reduce_kernel_wrapper: The number of thread is not a power of 2. Check your code!"<<std::endl; } } /*********************************** Helper functions above, wrapper below ********************************/ float Reducer::reduce_sum_wrapper(int n, float *d_idata, float *d_odata){ // Create and start timer StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); float gpu_result = 0; int numThreads = 0; int numBlocks = 0; getNumBlocksAndThreads(n, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; // First pass of the kernel. Basically do a reduction and copy to d_odata int kernelCount = 0; reduce_sum_kernel_wrapper(n, numThreads, numBlocks, d_idata, d_odata); kernelCount++; // sum partial block sums on GPU int s = numBlocks; // Since we are considering reducing 500cube images, the cpu reduce scheme won't save // use much time compared to total kernel launch. So it is omitted here. while (s > 1){ getNumBlocksAndThreads(s, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; reduce_sum_kernel_wrapper(s, numThreads, numBlocks, d_odata, d_odata); s = (s + (numThreads*2-1)) / (numThreads*2); kernelCount++; } // Stop timer cudaDeviceSynchronize(); sdkStopTimer(&timer); double reduce_time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); // copy final sum from device to host checkCudaErrors(cudaMemcpy(&gpu_result, d_odata, sizeof(float), cudaMemcpyDeviceToHost)); // std::cout<<"From function reduce_wrapper: The reduce kernel has launch "<< kernelCount << " times." << std::endl; // std::cout<<"Time: " << reduce_time << " ms." << std::endl; return gpu_result; } float Reducer::reduce_max_wrapper(int n, float *d_idata, float *d_odata){ // Create and start timer StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); float gpu_result = 0; int numThreads = 0; int numBlocks = 0; getNumBlocksAndThreads(n, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; // First pass of the kernel. Basically do a reduction and copy to d_odata int kernelCount = 0; reduce_max_kernel_wrapper(n, numThreads, numBlocks, d_idata, d_odata); kernelCount++; // sum partial block sums on GPU int s = numBlocks; // Since we are considering reducing 500cube images, the cpu reduce scheme won't save // use much time compared to total kernel launch. So it is omitted here. while (s > 1){ getNumBlocksAndThreads(s, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; reduce_max_kernel_wrapper(s, numThreads, numBlocks, d_odata, d_odata); s = (s + (numThreads*2-1)) / (numThreads*2); kernelCount++; } // Stop timer cudaDeviceSynchronize(); sdkStopTimer(&timer); double reduce_time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); // copy final sum from device to host checkCudaErrors(cudaMemcpy(&gpu_result, d_odata, sizeof(float), cudaMemcpyDeviceToHost)); // std::cout<<"From function reduce_wrapper: The reduce kernel has launch "<< kernelCount << " times." << std::endl; // std::cout<<"Time: " << reduce_time << " ms." << std::endl; return gpu_result; } float Reducer::reduce_min_wrapper(int n, float *d_idata, float *d_odata){ // Create and start timer StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkResetTimer(&timer); sdkStartTimer(&timer); float gpu_result = 0; int numThreads = 0; int numBlocks = 0; getNumBlocksAndThreads(n, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; // First pass of the kernel. Basically do a reduction and copy to d_odata int kernelCount = 0; reduce_min_kernel_wrapper(n, numThreads, numBlocks, d_idata, d_odata); kernelCount++; // sum partial block sums on GPU int s = numBlocks; // Since we are considering reducing 500cube images, the cpu reduce scheme won't save // use much time compared to total kernel launch. So it is omitted here. while (s > 1){ getNumBlocksAndThreads(s, numBlocks, numThreads); // std::cout<<"From function reduce_wrapper, numBlocks : "<< numBlocks << "numThreads: " << numThreads << std::endl; reduce_min_kernel_wrapper(s, numThreads, numBlocks, d_odata, d_odata); s = (s + (numThreads*2-1)) / (numThreads*2); kernelCount++; } // Stop timer cudaDeviceSynchronize(); sdkStopTimer(&timer); double reduce_time = sdkGetTimerValue(&timer); sdkDeleteTimer(&timer); // copy final sum from device to host checkCudaErrors(cudaMemcpy(&gpu_result, d_odata, sizeof(float), cudaMemcpyDeviceToHost)); // std::cout<<"From function reduce_wrapper: The reduce kernel has launch "<< kernelCount << " times." << std::endl; // std::cout<<"Time: " << reduce_time << " ms." << std::endl; return gpu_result; }
db674a83c34a7993bcc94e445b8bbf33717a5787.hip
// !!! This is a file automatically generated by hipify!!! #include <cutil.h> #include <hip/hip_runtime.h> #ifdef _WIN32 #include <windows.h> #endif #include "../Algorithm/Configuration.h" #ifdef VIS #include <cuda_gl_interop.h> #include <cutil_inline_runtime.h> #endif #include "GraphStorage.h" /** * CPU side memory allocator * @param [in] ptr predefined CPU pointer * @param [in] size allocate size bytes memory */ void CPUMalloc(void **ptr, int size) { (*ptr) = (void*)malloc(size); if((*ptr) == NULL) { printf("CPUMalloc Failed!\n"); exit(-1); } } /** * CPU side memory allocator * @param [in] D_ptr predefined CPU pointer * @param [in] size allocate size bytes memory */ void GPUMalloc(void **D_ptr, int size) { CUDA_SAFE_CALL(hipMalloc(D_ptr, size)); } /** * generate random number in the range of 0 ~ 2^32 - 1 * @note unsigned int type should be at least 32bits */ unsigned int rand32() { unsigned int b8 = 0; unsigned int b16 = 0; unsigned int b24 = 0; unsigned int b32 = 0; b8 = rand()%256 + 1; b16 = rand()%256 + 1; b24 = rand()%256 + 1; b32 = rand()%256; unsigned r32 = b8 + (b16<<8) + (b24<<16) + (b32<<24); return r32; } /** * * * @param [] * @return * @note * */ void InitCUDA() { int count = 0; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device. Aborted!\n"); exit(-1); } #ifdef VIS hipGLSetGLDevice(0); hipGetDeviceProperties(&GRAPH_STORAGE_CPU::device_prop, 0); #else hipSetDevice(0); hipGetDeviceProperties(&GRAPH_STORAGE_CPU::device_prop, 0); #endif printf("CUDA initialized.\n"); // hipDeviceSetCacheConfig(hipFuncCachePreferShared); }
db674a83c34a7993bcc94e445b8bbf33717a5787.cu
#include <cutil.h> #include <cuda_runtime.h> #ifdef _WIN32 #include <windows.h> #endif #include "../Algorithm/Configuration.h" #ifdef VIS #include <cuda_gl_interop.h> #include <cutil_inline_runtime.h> #endif #include "GraphStorage.h" /** * CPU side memory allocator * @param [in] ptr predefined CPU pointer * @param [in] size allocate size bytes memory */ void CPUMalloc(void **ptr, int size) { (*ptr) = (void*)malloc(size); if((*ptr) == NULL) { printf("CPUMalloc Failed!\n"); exit(-1); } } /** * CPU side memory allocator * @param [in] D_ptr predefined CPU pointer * @param [in] size allocate size bytes memory */ void GPUMalloc(void **D_ptr, int size) { CUDA_SAFE_CALL(cudaMalloc(D_ptr, size)); } /** * generate random number in the range of 0 ~ 2^32 - 1 * @note unsigned int type should be at least 32bits */ unsigned int rand32() { unsigned int b8 = 0; unsigned int b16 = 0; unsigned int b24 = 0; unsigned int b32 = 0; b8 = rand()%256 + 1; b16 = rand()%256 + 1; b24 = rand()%256 + 1; b32 = rand()%256; unsigned r32 = b8 + (b16<<8) + (b24<<16) + (b32<<24); return r32; } /** * * * @param [] * @return * @note * */ void InitCUDA() { int count = 0; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device. Aborted!\n"); exit(-1); } #ifdef VIS cudaGLSetGLDevice(0); cudaGetDeviceProperties(&GRAPH_STORAGE_CPU::device_prop, 0); #else cudaSetDevice(0); cudaGetDeviceProperties(&GRAPH_STORAGE_CPU::device_prop, 0); #endif printf("CUDA initialized.\n"); // cudaThreadSetCacheConfig(cudaFuncCachePreferShared); }
5eef4ec928be29012b27233f7b5cc0684b3d88ba.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
5eef4ec928be29012b27233f7b5cc0684b3d88ba.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
d2230ff0ab03535c9fe2018b5275ef319db494ad.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thread> // NOLINT #include <vector> #include "gtest/gtest.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { namespace memory { const int NUM_STREAMS = 8; const int N = 2; const float DELTA = 1e-1; using CudaDevCtxVec = std::vector<std::unique_ptr<platform::CUDADeviceContext>>; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = 3.14159 * i; } } void CheckKernelOutput(float *x, int n) { auto host_x = std::unique_ptr<float[]>(new float[n]); for (int i = 0; i < n; ++i) { EXPECT_TRUE(hipSuccess == hipMemcpy(host_x.get(), x, n * sizeof(float), hipMemcpyDeviceToHost)); EXPECT_GE(host_x[i] + DELTA, 3.14159f * i); EXPECT_LE(host_x[i] - DELTA, 3.14159f * i); } } void MultiStreamCompute(float **data, float **second_data, const platform::CUDADeviceContext &ctx) { // multi-streams AllocationPtr allocation_ptr = Alloc(ctx, N * sizeof(float)); EXPECT_GE(allocation_ptr->size(), N * sizeof(float)); *data = reinterpret_cast<float *>(allocation_ptr->ptr()); hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, ctx.stream(), *data, N); // allocate and compute on same stream again allocation_ptr = Alloc(ctx, N * sizeof(float)); EXPECT_GE(allocation_ptr->size(), N * sizeof(float)); *second_data = reinterpret_cast<float *>(allocation_ptr->ptr()); hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, ctx.stream(), *second_data, N); } TEST(Malloc, CUDADeviceContextMultiStream) { auto place = platform::CUDAPlace(0); platform::SetDeviceId(0); AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float)); EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float)); float *main_stream_data = reinterpret_cast<float *>(main_stream_alloc_ptr->ptr()); float *data[NUM_STREAMS]; float *second_data[NUM_STREAMS]; CudaDevCtxVec dev_ctx; // default stream hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, 0, main_stream_data, N); main_stream_alloc_ptr.reset(); for (int i = 0; i < NUM_STREAMS; ++i) { dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>( new platform::CUDADeviceContext(place))); MultiStreamCompute(&data[i], &second_data[i], *dev_ctx[i]); } EXPECT_TRUE(hipSuccess == hipDeviceSynchronize()); for (int i = 0; i < NUM_STREAMS; ++i) { CheckKernelOutput(data[i], N); CheckKernelOutput(second_data[i], N); } } TEST(Malloc, CUDADeviceContextMultiThreadMultiStream) { auto place = platform::CUDAPlace(0); platform::SetDeviceId(0); AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float)); EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float)); float *main_stream_data = reinterpret_cast<float *>(main_stream_alloc_ptr->ptr()); float *data[NUM_STREAMS]; float *second_data[NUM_STREAMS]; CudaDevCtxVec dev_ctx; std::vector<std::thread> threads; // default stream hipLaunchKernelGGL(( kernel), dim3(1), dim3(64), 0, 0, main_stream_data, N); main_stream_alloc_ptr.reset(); for (int i = 0; i < NUM_STREAMS; ++i) { dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>( new platform::CUDADeviceContext(place))); threads.push_back(std::thread(MultiStreamCompute, &data[i], &second_data[i], std::cref(*dev_ctx[i]))); } for (int i = 0; i < NUM_STREAMS; ++i) { threads[i].join(); } EXPECT_TRUE(hipSuccess == hipDeviceSynchronize()); for (int i = 0; i < NUM_STREAMS; ++i) { CheckKernelOutput(data[i], N); CheckKernelOutput(second_data[i], N); } } TEST(Malloc, AllocZero) { auto place = platform::CUDAPlace(0); AllocationPtr allocation_ptr = Alloc(place, 0); EXPECT_GE(allocation_ptr->size(), 0); } } // namespace memory } // namespace paddle
d2230ff0ab03535c9fe2018b5275ef319db494ad.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda.h> #include <cuda_runtime.h> #include <thread> // NOLINT #include <vector> #include "gtest/gtest.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { namespace memory { const int NUM_STREAMS = 8; const int N = 2; const float DELTA = 1e-1; using CudaDevCtxVec = std::vector<std::unique_ptr<platform::CUDADeviceContext>>; __global__ void kernel(float *x, int n) { int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < n; i += blockDim.x * gridDim.x) { x[i] = 3.14159 * i; } } void CheckKernelOutput(float *x, int n) { auto host_x = std::unique_ptr<float[]>(new float[n]); for (int i = 0; i < n; ++i) { EXPECT_TRUE(cudaSuccess == cudaMemcpy(host_x.get(), x, n * sizeof(float), cudaMemcpyDeviceToHost)); EXPECT_GE(host_x[i] + DELTA, 3.14159f * i); EXPECT_LE(host_x[i] - DELTA, 3.14159f * i); } } void MultiStreamCompute(float **data, float **second_data, const platform::CUDADeviceContext &ctx) { // multi-streams AllocationPtr allocation_ptr = Alloc(ctx, N * sizeof(float)); EXPECT_GE(allocation_ptr->size(), N * sizeof(float)); *data = reinterpret_cast<float *>(allocation_ptr->ptr()); kernel<<<1, 64, 0, ctx.stream()>>>(*data, N); // allocate and compute on same stream again allocation_ptr = Alloc(ctx, N * sizeof(float)); EXPECT_GE(allocation_ptr->size(), N * sizeof(float)); *second_data = reinterpret_cast<float *>(allocation_ptr->ptr()); kernel<<<1, 64, 0, ctx.stream()>>>(*second_data, N); } TEST(Malloc, CUDADeviceContextMultiStream) { auto place = platform::CUDAPlace(0); platform::SetDeviceId(0); AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float)); EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float)); float *main_stream_data = reinterpret_cast<float *>(main_stream_alloc_ptr->ptr()); float *data[NUM_STREAMS]; float *second_data[NUM_STREAMS]; CudaDevCtxVec dev_ctx; // default stream kernel<<<1, 64>>>(main_stream_data, N); main_stream_alloc_ptr.reset(); for (int i = 0; i < NUM_STREAMS; ++i) { dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>( new platform::CUDADeviceContext(place))); MultiStreamCompute(&data[i], &second_data[i], *dev_ctx[i]); } EXPECT_TRUE(cudaSuccess == cudaDeviceSynchronize()); for (int i = 0; i < NUM_STREAMS; ++i) { CheckKernelOutput(data[i], N); CheckKernelOutput(second_data[i], N); } } TEST(Malloc, CUDADeviceContextMultiThreadMultiStream) { auto place = platform::CUDAPlace(0); platform::SetDeviceId(0); AllocationPtr main_stream_alloc_ptr = Alloc(place, N * sizeof(float)); EXPECT_GE(main_stream_alloc_ptr->size(), N * sizeof(float)); float *main_stream_data = reinterpret_cast<float *>(main_stream_alloc_ptr->ptr()); float *data[NUM_STREAMS]; float *second_data[NUM_STREAMS]; CudaDevCtxVec dev_ctx; std::vector<std::thread> threads; // default stream kernel<<<1, 64>>>(main_stream_data, N); main_stream_alloc_ptr.reset(); for (int i = 0; i < NUM_STREAMS; ++i) { dev_ctx.push_back(std::unique_ptr<platform::CUDADeviceContext>( new platform::CUDADeviceContext(place))); threads.push_back(std::thread(MultiStreamCompute, &data[i], &second_data[i], std::cref(*dev_ctx[i]))); } for (int i = 0; i < NUM_STREAMS; ++i) { threads[i].join(); } EXPECT_TRUE(cudaSuccess == cudaDeviceSynchronize()); for (int i = 0; i < NUM_STREAMS; ++i) { CheckKernelOutput(data[i], N); CheckKernelOutput(second_data[i], N); } } TEST(Malloc, AllocZero) { auto place = platform::CUDAPlace(0); AllocationPtr allocation_ptr = Alloc(place, 0); EXPECT_GE(allocation_ptr->size(), 0); } } // namespace memory } // namespace paddle
a447ec837b3f427dc4447555955bf4a44a03659c.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> //#include "utils.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <string> cv::Mat imageRGBA; cv::Mat imageOut; uchar4 *d_rgbaImage__; uchar4 *d_blurImage__; size_t numRows() { return imageRGBA.rows; } size_t numCols() { return imageRGBA.cols; } //return types are void since any internal error will be handled by quitting //no point in returning error codes... //returns a pointer to an RGBA version of the input image //and a pointer to the single channel grey-scale output //on both the host and device void preProcess(uchar4 **inputImage, uchar4 **blurImage, uchar4 **d_rgbaImage, uchar4 **d_blurImage, const std::string &filename) { //make sure the context initializes ok hipFree(0);//checkCudaErrors(hipFree(0)); cv::Mat image; image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << filename << std::endl; exit(1); } cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); //allocate memory for the output imageOut.create(image.rows, image.cols, CV_8UC4); //CV_8UC1 //This shouldn't ever happen given the way the images are created //at least based upon my limited understanding of OpenCV, but better to check if (!imageRGBA.isContinuous() || !imageOut.isContinuous()) { std::cerr << "Images aren't continuous!! Exiting." << std::endl; exit(1); } *inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0); *blurImage = (uchar4 *)imageOut.ptr<unsigned char>(0); const size_t numPixels = numRows() * numCols(); //allocate memory on the device for both input and output hipMalloc(d_rgbaImage, sizeof(uchar4) * numPixels); // checkCudaErrors hipMalloc(d_blurImage, sizeof(uchar4) * numPixels); // checkCudaErrors hipMemset(*d_blurImage, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around; checkCudaErrors //copy input array to the GPU hipMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice); // checkCudaErrors d_rgbaImage__ = *d_rgbaImage; d_blurImage__ = *d_blurImage; } void postProcess(const std::string& output_file) { const int numPixels = numRows() * numCols(); //copy the output back to the host hipMemcpy(imageOut.ptr<uchar4>(0), d_blurImage__, sizeof(uchar4) * numPixels, hipMemcpyDeviceToHost); // checkCudaErrors //output the image cv::Mat imageOut2; imageOut2.create(imageOut.rows, imageOut.cols, CV_8UC4); cv::cvtColor(imageOut, imageOut2, CV_RGBA2BGR); cv::imwrite(output_file.c_str(), imageOut2); //cleanup hipFree(d_rgbaImage__); hipFree(d_blurImage__); }
a447ec837b3f427dc4447555955bf4a44a03659c.cu
#include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> //#include "utils.h" #include <cuda.h> #include <cuda_runtime.h> #include <string> cv::Mat imageRGBA; cv::Mat imageOut; uchar4 *d_rgbaImage__; uchar4 *d_blurImage__; size_t numRows() { return imageRGBA.rows; } size_t numCols() { return imageRGBA.cols; } //return types are void since any internal error will be handled by quitting //no point in returning error codes... //returns a pointer to an RGBA version of the input image //and a pointer to the single channel grey-scale output //on both the host and device void preProcess(uchar4 **inputImage, uchar4 **blurImage, uchar4 **d_rgbaImage, uchar4 **d_blurImage, const std::string &filename) { //make sure the context initializes ok cudaFree(0);//checkCudaErrors(cudaFree(0)); cv::Mat image; image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << filename << std::endl; exit(1); } cv::cvtColor(image, imageRGBA, CV_BGR2RGBA); //allocate memory for the output imageOut.create(image.rows, image.cols, CV_8UC4); //CV_8UC1 //This shouldn't ever happen given the way the images are created //at least based upon my limited understanding of OpenCV, but better to check if (!imageRGBA.isContinuous() || !imageOut.isContinuous()) { std::cerr << "Images aren't continuous!! Exiting." << std::endl; exit(1); } *inputImage = (uchar4 *)imageRGBA.ptr<unsigned char>(0); *blurImage = (uchar4 *)imageOut.ptr<unsigned char>(0); const size_t numPixels = numRows() * numCols(); //allocate memory on the device for both input and output cudaMalloc(d_rgbaImage, sizeof(uchar4) * numPixels); // checkCudaErrors cudaMalloc(d_blurImage, sizeof(uchar4) * numPixels); // checkCudaErrors cudaMemset(*d_blurImage, 0, numPixels * sizeof(uchar4)); //make sure no memory is left laying around; checkCudaErrors //copy input array to the GPU cudaMemcpy(*d_rgbaImage, *inputImage, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice); // checkCudaErrors d_rgbaImage__ = *d_rgbaImage; d_blurImage__ = *d_blurImage; } void postProcess(const std::string& output_file) { const int numPixels = numRows() * numCols(); //copy the output back to the host cudaMemcpy(imageOut.ptr<uchar4>(0), d_blurImage__, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost); // checkCudaErrors //output the image cv::Mat imageOut2; imageOut2.create(imageOut.rows, imageOut.cols, CV_8UC4); cv::cvtColor(imageOut, imageOut2, CV_RGBA2BGR); cv::imwrite(output_file.c_str(), imageOut2); //cleanup cudaFree(d_rgbaImage__); cudaFree(d_blurImage__); }
d39db71e5d8f2498021cebf8bcf9e2df1aea97d4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <stdlib.h> #include <armadillo> #include <vector> #include <string> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> using namespace std; __global__ void suma_vectores(float *c ,float *a , float *b,int N) { int idx=blockIdx.x * blockDim.x+ threadIdx.x; if(idx<N) { c[idx]=a[idx] + b[idx]; } } int main(void) { float *a_h,*b_h,*c_h; float *a_d,*b_d,*c_d; int N=1000000; size_t size=N*sizeof(float); a_h = (float *) malloc (size); b_h = (float *) malloc (size); c_h = (float *) malloc (size); for (int i=0;i<N;i++) { a_h[i]=(float)i; b_h[i]=(float)(i+1); } hipMalloc((void**)& a_d,size); hipMalloc((void**)& b_d,size); hipMalloc((void**)& c_d,size); hipMemcpy(a_d,a_h,size,hipMemcpyHostToDevice); hipMemcpy(b_d,a_h,size,hipMemcpyHostToDevice); int block_size=8; int n_blocks=N/block_size + (N%block_size ==0 ? 0:1); hipLaunchKernelGGL(( suma_vectores) , dim3(n_blocks),dim3(block_size) , 0, 0, c_d,a_d,b_d,N); hipMemcpy (c_h,c_d,size,hipMemcpyDeviceToHost); for (int i=0;i<N;i++) { cout<<c_h[i]<<" "<<endl; } _getche(); free(a_h); free(b_h); free(c_h); return(0); }
d39db71e5d8f2498021cebf8bcf9e2df1aea97d4.cu
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <armadillo> #include <vector> #include <string> #include <cuda_runtime.h> #include <cuda.h> using namespace std; __global__ void suma_vectores(float *c ,float *a , float *b,int N) { int idx=blockIdx.x * blockDim.x+ threadIdx.x; if(idx<N) { c[idx]=a[idx] + b[idx]; } } int main(void) { float *a_h,*b_h,*c_h; float *a_d,*b_d,*c_d; int N=1000000; size_t size=N*sizeof(float); a_h = (float *) malloc (size); b_h = (float *) malloc (size); c_h = (float *) malloc (size); for (int i=0;i<N;i++) { a_h[i]=(float)i; b_h[i]=(float)(i+1); } cudaMalloc((void**)& a_d,size); cudaMalloc((void**)& b_d,size); cudaMalloc((void**)& c_d,size); cudaMemcpy(a_d,a_h,size,cudaMemcpyHostToDevice); cudaMemcpy(b_d,a_h,size,cudaMemcpyHostToDevice); int block_size=8; int n_blocks=N/block_size + (N%block_size ==0 ? 0:1); suma_vectores <<< n_blocks,block_size >>> (c_d,a_d,b_d,N); cudaMemcpy (c_h,c_d,size,cudaMemcpyDeviceToHost); for (int i=0;i<N;i++) { cout<<c_h[i]<<" "<<endl; } _getche(); free(a_h); free(b_h); free(c_h); return(0); }
364a819bafb30bf048b15dff6f515c9e7efdd9a5.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/pair.h> #include <thrust/tuple.h> #include <algorithm> #include <array> #include <limits> #include <sstream> #include <tuple> #include <type_traits> #include "open3d/core/Blob.h" #include "open3d/core/CUDAUtils.h" #include "open3d/core/Device.h" #include "open3d/core/Dispatch.h" #include "open3d/core/FunctionTraits.h" #include "open3d/core/Indexer.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/ParallelFor.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/core/kernel/Reduction.h" #include "open3d/utility/Logging.h" // CUDA reduction is based on PyTorch's CUDA reduction implementation. // See: aten/src/ATen/native/cuda/Reduce.cuh #if __CUDA_ARCH__ >= 750 constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024; #else constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048; #endif constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024; constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256; #define OPEN3D_MAX_THREADS_PER_BLOCK(val) \ (((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \ : CUDA_THREADS_PER_BLOCK_FALLBACK) #define OPEN3D_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \ ((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \ ? (blocks_per_sm) \ : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \ (threads_per_block)))) #define OPEN3D_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \ __launch_bounds__((OPEN3D_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \ (OPEN3D_MIN_BLOCKS_PER_SM((max_threads_per_block), \ (min_blocks_per_sm)))) template <typename T> OPEN3D_DEVICE __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) { #if TORCH_HIP_VERSION >= 9000 return __shfl_down_sync(mask, value, delta, width); #else return __shfl_down(value, delta, width); #endif } namespace open3d { namespace core { namespace kernel { static inline int64_t DivUp(int64_t a, int64_t b) { return (a + b - 1) / b; } // Returns reduced fraction numerator & denominator OPEN3D_HOST_DEVICE static void ReduceFraction(int64_t& numerator, int64_t& denominator) { // Get GCD of num and denom using Euclid's algorithm. // Can replace this with std::gcd if we ever support c++17. int64_t a = denominator; int64_t b = numerator; while (b != 0) { a %= b; int64_t tmp = a; a = b; b = tmp; } // a is now the GCD numerator /= a; denominator /= a; } class ReduceConfig { public: static constexpr int BLOCK_X = 0; static constexpr int BLOCK_Y = 1; static constexpr int CTA = 2; static constexpr int MAX_NUM_THREADS = 512; int num_inputs_per_output_; int num_outputs_; int step_input_ = 1; int step_output_ = 1; int ctas_per_output_ = 1; private: int element_size_bytes_; int input_mult_[3] = {0, 0, 0}; int output_mult_[2] = {0, 0}; int block_width_; int block_height_; int num_threads_; public: ReduceConfig(int element_size_bytes, const Indexer& indexer) : element_size_bytes_(element_size_bytes) { num_outputs_ = indexer.NumOutputElements(); num_inputs_per_output_ = indexer.NumWorkloads() / num_outputs_; // Adjust block size to map block width to fastest changing dimension of // input tensor. This grants the best possible memory accessing pattern, // given that for non-contiguous tensor with space in between, we cannot // have perfect memory coalescing. bool reduction_on_fastest_striding_dimension = (indexer.NumReductionDims() == indexer.NumDims()) || (indexer.GetInput(0).byte_strides_[0] < indexer.GetInput(0).byte_strides_[indexer.NumReductionDims()]); // Notice that dim0 & dim1 does NOT guarantee any launch configuration // here! dim0 & dim1 are more like the upper bound of the block // dimension. The actual launch config and reduction scheme is // determined by setting values to `input_mult_` and // `output_mult_`. We try to max out dim1 so that we have enough // threads per CTA to deliver performance for larger problem size. int64_t dim0; int64_t dim1; if (reduction_on_fastest_striding_dimension) { // Map block.x to the fastest reducing dimension. It implies: // 1. BlockXReduce is required. // 2. block.y now max out to num_outputs. dim0 = indexer.GetMasterShape()[0]; dim1 = num_outputs_; } else { // Map block.x to the fastest non reducing dimension. It implies: // 1. BlockXReduce is turned off. // 2. block.y now max out to num_inputs_per_output_. dim0 = indexer.GetMasterShape()[indexer.NumReductionDims()]; dim1 = num_inputs_per_output_; } // Adjust block_width and block_height SetBlockDimension(dim0, dim1); int block_width = block_width_; int block_height = block_height_; if (indexer.NumDims() == 0 || reduction_on_fastest_striding_dimension) { // Split the input across lanes if the input is contiguous in the // reduced dimension. This will require reduction between threads // using warp shuffle instructions and shared memory (if // block_width > warpSize). input_mult_[0] = SplitInput(block_width); } else { // Otherwise split the output across lanes in a warp. output_mult_[0] = SplitOutput(block_width); } if (ValuesPerThread() >= block_height * 16 || ValuesPerThread() >= 256) { // Divide the input across warps in a thread-block, if that leaves // at least 16 elements to be summed by each thread. This will // require inter-warp reduction using shared memory. input_mult_[1] = SplitInput(block_height); } else { // Otherwise, each warp handles a separate output. output_mult_[1] = SplitOutput(block_height); } if (input_mult_[1] != 0 && ValuesPerThread() >= 256 && num_outputs_ <= 4096) { // Divide the input across thread-blocks if the amount of work // per-thread is large enough and the size of the output is small // enough. This will require a reduction using global memory. ctas_per_output_ = DivUp(ValuesPerThread(), 16); if (ctas_per_output_ > 65535) { ctas_per_output_ = 65535; } input_mult_[2] = SplitInput(ctas_per_output_); } } /// Returns floor(log2(n)) static inline int LastPow2(int n) { // Dtype.h asserts sizeof(int) == 4. n |= (n >> 1); n |= (n >> 2); n |= (n >> 4); n |= (n >> 8); n |= (n >> 16); return ::max(1, n - (n >> 1)); } void SetBlockDimension(int64_t dim0, int64_t dim1) { int dim0_pow2 = dim0 < MAX_NUM_THREADS ? static_cast<int>(LastPow2(dim0)) : MAX_NUM_THREADS; int dim1_pow2 = dim1 < MAX_NUM_THREADS ? static_cast<int>(LastPow2(dim1)) : MAX_NUM_THREADS; block_width_ = ::min(dim0_pow2, CUDAState::GetInstance()->GetWarpSize()); block_height_ = ::min(dim1_pow2, int(MAX_NUM_THREADS / block_width_)); block_width_ = ::min(dim0_pow2, int(MAX_NUM_THREADS / block_height_)); num_threads_ = block_width_ * block_height_; } int SplitInput(int parallelism) { int step = step_input_; step_input_ *= parallelism; return step; } int SplitOutput(int parallelism) { int step = step_output_; step_output_ *= parallelism; return step; } dim3 BlockDim() const { return dim3(block_width_, block_height_); } dim3 GridDim() const { return dim3(DivUp(num_outputs_, step_output_), ctas_per_output_); } OPEN3D_HOST_DEVICE bool ShouldBlockXReduce() const { return input_mult_[BLOCK_X] != 0; } OPEN3D_HOST_DEVICE bool ShouldBlockYReduce() const { return input_mult_[BLOCK_Y] != 0; } OPEN3D_HOST_DEVICE bool ShouldGlobalReduce() const { return input_mult_[CTA] != 0; } OPEN3D_DEVICE bool ShouldStore(int output_idx) const { return output_idx < num_outputs_ && (!ShouldBlockXReduce() || threadIdx.x == 0) && (!ShouldBlockYReduce() || threadIdx.y == 0); } OPEN3D_HOST_DEVICE int InputIdx() const { int lane = threadIdx.x; int warp = threadIdx.y; int cta2 = blockIdx.y; return (lane * input_mult_[BLOCK_X] + warp * input_mult_[BLOCK_Y] + cta2 * input_mult_[CTA]); } OPEN3D_HOST_DEVICE int OutputIdx() const { int lane = threadIdx.x; int warp = threadIdx.y; int cta1 = blockIdx.x; return (lane * output_mult_[BLOCK_X] + warp * output_mult_[BLOCK_Y] + cta1 * step_output_); } OPEN3D_DEVICE int SharedMemoryOffset(int offset) const { return threadIdx.x + (threadIdx.y + offset) * blockDim.x; } OPEN3D_DEVICE int StagingMemoryOffset(int cta2) const { int offset = cta2 + blockIdx.x * gridDim.y; if (!ShouldBlockXReduce()) { offset = threadIdx.x + offset * blockDim.x; } return offset; } int SharedMemorySize() const { if (!ShouldBlockYReduce() && (!ShouldBlockXReduce() || block_width_ <= CUDAState::GetInstance()->GetWarpSize())) { return 0; } return element_size_bytes_ * num_threads_; } int64_t GlobalMemorySize() const { if (!ShouldGlobalReduce()) { return 0; } auto size = (int64_t)element_size_bytes_ * num_outputs_ * ctas_per_output_; if (!ShouldBlockXReduce()) { size *= BlockDim().x; } return size; } int SemaphoreSize() const { if (!ShouldGlobalReduce()) { return 0; } return sizeof(int) * GridDim().x; } int ValuesPerThread() const { return DivUp(num_inputs_per_output_, step_input_); } std::string ToString() const { std::string input_mult_str = fmt::format( "[{},{},{}]", input_mult_[0], input_mult_[1], input_mult_[2]); std::string output_mult_str = fmt::format("[{},{}]", output_mult_[0], output_mult_[1]); std::string block_str = fmt::format("[{},{},{}]", BlockDim().x, BlockDim().y, BlockDim().z); std::string grid_str = fmt::format("[{},{},{}]", GridDim().x, GridDim().y, GridDim().z); std::string str = fmt::format( "REDUCEConfig(element_size_bytes_={}, " "num_inputs_per_output_={}, num_outputs_={}, " "step_input_={}, step_output_={}, ctas_per_output_={}, " "input_mult_={}, output_mult_={}, values_per_thread={}, " "block={}, grid={}, global_memory_size={})", element_size_bytes_, num_inputs_per_output_, num_outputs_, step_input_, step_output_, ctas_per_output_, input_mult_str, output_mult_str, ValuesPerThread(), block_str, grid_str, GlobalMemorySize()); return str; } }; template <int nt, typename R> OPEN3D_LAUNCH_BOUNDS_2(nt, 4) __global__ void ReduceKernel(R reduction) { reduction.Run(); } template <typename index_t> static OffsetCalculator<2, index_t> MakeOutputCalculator( const Indexer& indexer) { int num_reduction_dims = indexer.NumReductionDims(); int num_output_dims = indexer.NumDims() - num_reduction_dims; std::array<const int64_t*, 2> strides = { indexer.GetOutput().byte_strides_ + num_reduction_dims, indexer.GetInput(0).byte_strides_ + num_reduction_dims, }; const int64_t* shape = indexer.GetMasterShape() + num_reduction_dims; return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data()); } template <typename index_t> static OffsetCalculator<1, index_t> MakeInputCalculator( const Indexer& indexer) { int num_reduction_dims = indexer.NumReductionDims(); std::array<const int64_t*, 1> strides = { indexer.GetInput(0).byte_strides_, }; return OffsetCalculator<1, index_t>( num_reduction_dims, indexer.GetMasterShape(), strides.data()); } template <int vt, typename index_t, typename func_t> OPEN3D_DEVICE void StridedIterate(func_t f, index_t begin, index_t end, index_t stride) { if (begin + (vt - 1) * stride < end) { #pragma unroll for (index_t i = 0; i < vt; i++) { f(i, begin + i * stride); } } else { #pragma unroll for (index_t i = 0; i < vt; i++) { index_t idx = begin + i * stride; if (idx < end) { f(i, idx); } } } } /// Combime() and Reduce() are the same for regular reduction ops. template <typename out_scalar_t, typename func_t> class RegularReduceOps { using arg_t = typename BinaryFunctionTraits<func_t>::arg0_t; using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t; public: RegularReduceOps(const func_t& op) : reduce_func_(op) {} static inline OPEN3D_DEVICE out_scalar_t Project(arg_t arg) { return (out_scalar_t)arg; } static inline OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) { return WARP_SHFL_DOWN(arg, offset); } OPEN3D_DEVICE inline arg_t Combine(arg_t acc, scalar_t val) const { return reduce_func_(acc, val); } /// Idx is ignored for RegularReduceOps. OPEN3D_DEVICE inline arg_t Reduce(arg_t acc, scalar_t val, int64_t idx) const { return reduce_func_(acc, val); } private: func_t reduce_func_ = nullptr; }; template <typename scalar_t, typename func_t> RegularReduceOps<scalar_t, func_t> WrapRegularReduceOps(const func_t& op) { return RegularReduceOps<scalar_t, func_t>{op}; } template <typename func_t> class ArgReduceOps { using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t; using index_t = int64_t; using arg_t = thrust::pair<scalar_t, index_t>; public: ArgReduceOps(const func_t comp_func) : comp_func_(comp_func) {} static OPEN3D_DEVICE index_t Project(arg_t arg) { return arg.second; } static OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) { return arg_t(WARP_SHFL_DOWN(arg.first, offset), WARP_SHFL_DOWN(arg.second, offset)); } /// Combine(pair<val_t, idx_t>, pair<val_t, idx_t>) -> pair<val_t, idx_t>. /// Called at subsequent rounds of reduction, when values are already /// associated with indices. OPEN3D_DEVICE inline arg_t Combine(arg_t a, arg_t b) const { return comp_func_(a.first, b.first) ? a : b; } /// Reduce(pair<val_t, idx_t>, val_t, idx_t) -> pair<val_t, idx_t>. /// Called at the first round of reduction, when values are not yet /// associated with indices. OPEN3D_DEVICE inline arg_t Reduce(arg_t arg, scalar_t val, int64_t idx) const { return comp_func_(arg.first, val) ? arg : arg_t(val, idx); } private: func_t comp_func_ = nullptr; }; template <typename func_t> ArgReduceOps<func_t> WrapArgReduceOps(const func_t& comp_func) { return ArgReduceOps<func_t>{comp_func}; } template <typename scalar_t, typename ops_t, typename index_t, typename out_scalar_t = scalar_t, int vt0 = 4> class ReduceOp { using traits = FunctionTraits<decltype(&ops_t::Reduce)>; using arg_t = typename std::decay<typename traits::template arg<0>::type>::type; using InputCalculator = OffsetCalculator<1, index_t>; using OutputCalculator = OffsetCalculator<2, index_t>; public: ReduceOp(ops_t ops, ReduceConfig config, InputCalculator input_calc, OutputCalculator output_calc, const void* src, char* dst, void* acc_buf, void* cta_buf, int* semaphores, arg_t identity, bool accumulate, bool final_output) : ops_(ops), config_(config), input_calc_(input_calc), output_calc_(output_calc), src_(src), dst_(dst), acc_buf_(acc_buf), cta_buf_(cta_buf), semaphores_(semaphores), identity_(identity), accumulate_(accumulate), final_output_(final_output) {} OPEN3D_DEVICE void Run() const { extern __shared__ char shared_memory[]; index_t output_idx = config_.OutputIdx(); index_t input_idx = config_.InputIdx(); auto base_offsets = output_calc_.get(output_idx); arg_t value = identity_; if (output_idx < config_.num_outputs_ && input_idx < config_.num_inputs_per_output_) { auto input_slice = (const char*)src_ + base_offsets[1]; value = ThreadReduce((const scalar_t*)input_slice); } if (config_.ShouldBlockYReduce()) { value = BlockYReduce(value, shared_memory); } if (config_.ShouldBlockXReduce()) { value = BlockXReduce(value, shared_memory); } auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]); arg_t* acc = nullptr; if (acc_buf_ != nullptr) { int64_t numerator = (int64_t)sizeof(arg_t); int64_t denominator = (int64_t)sizeof(out_scalar_t); ReduceFraction(numerator, denominator); acc = (arg_t*)((char*)acc_buf_ + (base_offsets[0] * numerator / denominator)); } if (config_.ShouldGlobalReduce()) { value = GlobalReduce(value, acc, shared_memory); } else if (config_.ShouldStore(output_idx)) { if (acc == nullptr) { if (accumulate_) { value = AccumulateInOutput<can_accumulate_in_output>(out, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *out = GetAccumulatedOutput<can_accumulate_in_output>( out, value); } } else { if (accumulate_) { value = ops_.Combine(*acc, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *acc = value; } } } } OPEN3D_DEVICE arg_t ThreadReduce(const scalar_t* data) const { index_t idx = config_.InputIdx(); // Multiple accumulators to remove dependency between unrolled loops. arg_t value_list[vt0]; #pragma unroll for (int i = 0; i < vt0; i++) { value_list[i] = identity_; } index_t end = config_.num_inputs_per_output_; index_t stride = config_.step_input_; index_t element_stride = input_calc_.strides_[0][0] / sizeof(scalar_t); // Reducing layers of function calls so compiler could do proper loop // unroll that exposes instruction level parallelism. while (idx < config_.num_inputs_per_output_) { // load input SmallArray<scalar_t, vt0> values; if (input_calc_.dims_ == 1) { StridedIterate<vt0>( [&](index_t i, index_t idx) { values[i] = data[idx * element_stride]; }, idx, end, stride); } else { StridedIterate<vt0>( [&](index_t i, index_t idx) { values[i] = data[input_calc_.get(idx)[0] / sizeof(scalar_t)]; }, idx, end, stride); } // compute StridedIterate<vt0, index_t>( [&](index_t i, index_t idx) { value_list[i] = ops_.Reduce(value_list[i], values[i], idx); }, idx, config_.num_inputs_per_output_, config_.step_input_); // step offset idx += config_.step_input_ * vt0; } #pragma unroll for (int i = 1; i < vt0; i++) { value_list[0] = ops_.Combine(value_list[0], value_list[i]); } return value_list[0]; } OPEN3D_DEVICE arg_t BlockXReduce(arg_t value, char* shared_memory) const { int dim_x = blockDim.x; arg_t* shared = (arg_t*)shared_memory; if (dim_x > warpSize) { int address_base = threadIdx.x + threadIdx.y * blockDim.x; shared[address_base] = value; for (int offset = dim_x / 2; offset >= warpSize; offset >>= 1) { __syncthreads(); if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) { arg_t other = shared[address_base + offset]; value = ops_.Combine(value, other); shared[address_base] = value; } } dim_x = warpSize; } __syncthreads(); for (int offset = 1; offset < dim_x; offset <<= 1) { arg_t other = ops_.WarpShflDown(value, offset); value = ops_.Combine(value, other); } return value; } OPEN3D_DEVICE arg_t BlockYReduce(arg_t value, char* shared_memory) const { arg_t* shared = (arg_t*)shared_memory; shared[config_.SharedMemoryOffset(0)] = value; for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { __syncthreads(); if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { arg_t other = shared[config_.SharedMemoryOffset(offset)]; value = ops_.Combine(value, other); shared[config_.SharedMemoryOffset(0)] = value; } } return value; } OPEN3D_DEVICE bool MarkBlockFinished() const { __shared__ bool is_last_block_done_shared; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int prev_blocks_finished = atomicAdd(&semaphores_[blockIdx.x], 1); is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1); } __syncthreads(); return is_last_block_done_shared; } template <bool can_acc> OPEN3D_DEVICE arg_t AccumulateInOutput( out_scalar_t* out, arg_t value, typename std::enable_if<can_acc>::type* = nullptr) const { return ops_.Combine(*out, value); } // This function should never be called -- // it's the version of `AccumulateInOutput` // when accumulation in the output is not possible. template <bool can_acc> OPEN3D_DEVICE arg_t AccumulateInOutput( out_scalar_t*, arg_t, typename std::enable_if<!can_acc>::type* = nullptr) const { OPEN3D_ASSERT(false); return arg_t{}; } template <bool can_acc> OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput( out_scalar_t* out, arg_t value, typename std::enable_if<can_acc>::type* = nullptr) const { OPEN3D_ASSERT(!final_output_); return (out_scalar_t)value; } // This function should never be called -- // it's the version of `GetAccumulatedOutput` // when accumulation in the output is not possible. template <bool can_acc> OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput( out_scalar_t* out, arg_t value, typename std::enable_if<!can_acc>::type* = nullptr) const { OPEN3D_ASSERT(false); return *out; } template <class T> OPEN3D_DEVICE void SetResults(const T x, const index_t base_offset) const { auto res = (out_scalar_t*)((char*)dst_ + base_offset); *res = x; } OPEN3D_DEVICE void SetResultsToOutput(arg_t value, index_t base_offset) const { OPEN3D_ASSERT(final_output_); SetResults(ops_.Project(value), base_offset); } OPEN3D_DEVICE arg_t GlobalReduce(arg_t value, arg_t* acc, char* shared_memory) const { arg_t* reduce_buffer = (arg_t*)cta_buf_; index_t output_idx = config_.OutputIdx(); auto base_offsets = output_calc_.get(output_idx); auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]); bool should_store = config_.ShouldStore(config_.OutputIdx()); if (should_store) { index_t offset = config_.StagingMemoryOffset(blockIdx.y); reduce_buffer[offset] = value; } __threadfence(); // make sure writes are globally visible __syncthreads(); // if multiple warps in this block wrote to staging, // make sure they're all done bool is_last_block_done = MarkBlockFinished(); if (is_last_block_done) { value = identity_; if (config_.ShouldBlockXReduce()) { index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x; index_t step = blockDim.x * blockDim.y; for (; input_offset < config_.ctas_per_output_; input_offset += step) { index_t idx = config_.StagingMemoryOffset(input_offset); arg_t next = reduce_buffer[idx]; value = ops_.Combine(value, next); } } else { index_t input_offset = threadIdx.y; index_t step = blockDim.y; for (; input_offset < config_.ctas_per_output_; input_offset += step) { index_t idx = config_.StagingMemoryOffset(input_offset); arg_t next = reduce_buffer[idx]; value = ops_.Combine(value, next); } } value = BlockYReduce(value, shared_memory); if (config_.ShouldBlockXReduce()) { value = BlockXReduce(value, shared_memory); } if (should_store) { if (acc == nullptr) { if (accumulate_) { value = AccumulateInOutput<can_accumulate_in_output>( out, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *out = GetAccumulatedOutput<can_accumulate_in_output>( out, value); } } else { if (accumulate_) { value = ops_.Combine(*acc, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *acc = value; } } } } return value; } private: static constexpr bool can_accumulate_in_output = std::is_convertible<arg_t, out_scalar_t>::value && std::is_convertible<out_scalar_t, arg_t>::value; static constexpr float acc_buffer_multiplier = (float)sizeof(arg_t) / sizeof(out_scalar_t); ops_t ops_; ReduceConfig config_; InputCalculator input_calc_; OutputCalculator output_calc_; const void* src_; const char* dst_; // acc_buf_ used for accumulation among sub Tensor Iterator when // accumulation on output is not permissible void* acc_buf_; // cta_buf_ used for accumulation between blocks during global reduction void* cta_buf_; int* semaphores_; arg_t identity_; bool accumulate_; bool final_output_; }; class AccumulationBuffer { public: AccumulationBuffer() {} AccumulationBuffer(int64_t acc_t_size, int64_t out_t_size, char* out_ptr, int64_t size) { out_ptr_ = (char*)out_ptr; if (out_t_size >= acc_t_size) { // reusing output buffer for accumulation. acc_ptr_ = (char*)out_ptr; numerator_ = 1; denominator_ = 1; } else { int device_id = CUDAState::GetInstance()->GetCurrentDeviceID(); Device device(Device::DeviceType::CUDA, device_id); buffer_ = std::make_unique<Blob>(size, device); acc_ptr_ = (char*)buffer_->GetDataPtr(); numerator_ = acc_t_size; denominator_ = out_t_size; ReduceFraction(numerator_, denominator_); } } char* GetAccSlice(char* out_ptr) { if (numerator_ == -1 || acc_ptr_ == nullptr) { return nullptr; } return acc_ptr_ + ((out_ptr - out_ptr_) * numerator_ / denominator_); } private: std::unique_ptr<Blob> buffer_; char* acc_ptr_ = nullptr; char* out_ptr_ = nullptr; float size_factor_ = -1; int64_t numerator_ = -1; int64_t denominator_ = -1; }; class CUDAReductionEngine { public: CUDAReductionEngine(const CUDAReductionEngine&) = delete; CUDAReductionEngine& operator=(const CUDAReductionEngine&) = delete; CUDAReductionEngine(const Indexer& indexer) : indexer_(indexer) {} template <typename func_t, typename scalar_t> void Run(const func_t& reduce_func, scalar_t identity) { if (indexer_.NumWorkloads() == 0) { utility::LogError( "0-sized input should be handled outside of the reudction " "engine."); } if (indexer_.NumInputs() != 1) { utility::LogError("Reduction op must have exactly one input."); } OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t); using arg0_t = typename BinaryFunctionTraits<func_t>::arg0_t; using arg1_t = typename BinaryFunctionTraits<func_t>::arg1_t; if (!std::is_same<scalar_t, arg0_t>::value || !std::is_same<scalar_t, arg1_t>::value) { utility::LogError( "Function input type must match with the identity's type."); } using res_t = typename BinaryFunctionTraits<func_t>::res_t; if (std::is_same<res_t, bool>::value) { // func_t is a comparison function (for arg-reduction). // Signature: (scalar_t, scalar_t) -> bool. RunReduce<scalar_t, int64_t>( indexer_, WrapArgReduceOps(reduce_func), thrust::pair<scalar_t, int64_t>(identity, 0)); } else { // func_t is a regular reduction function. // Signature: (scalar_t, scalar_t) -> scalar_t. RunReduce<scalar_t, scalar_t>( indexer_, WrapRegularReduceOps<scalar_t>(reduce_func), identity); } } private: /// If the index cannot be represented in 32 bits, RunReduce calls itself /// recursively. template <typename scalar_t, typename out_scalar_t, int vt0 = 4, typename ops_t, typename ident_t> static void RunReduce(Indexer& indexer, const ops_t& ops, ident_t identity, AccumulationBuffer* acc_buf_ptr = nullptr) { using traits = FunctionTraits<decltype(&ops_t::Reduce)>; using arg_t = typename traits::template arg<0>::type; static constexpr bool can_accumulate_in_output = std::is_convertible<arg_t, out_scalar_t>::value; bool can_use_32bit_indexing = indexer.CanUse32BitIndexing(); std::unique_ptr<AccumulationBuffer> owned_buf_ptr; // The acc_buf_ptr is a shared pointer. It is create at the first // entrance reused by all recursive function calls. if (acc_buf_ptr == nullptr) { // acc_buf_ptr holds buffer used for accumulation among multiple // sub_iter when accumulation in output is not possible. if (!can_accumulate_in_output && !can_use_32bit_indexing) { int64_t output_memory_size = 1; for (int dim = 0; dim < indexer.NumDims(); dim++) { output_memory_size = ::max( output_memory_size, indexer.GetMasterShape()[dim] * indexer.GetOutput().byte_strides_[dim]); } owned_buf_ptr.reset(new AccumulationBuffer( sizeof(arg_t), sizeof(out_scalar_t), (char*)indexer.GetOutput().data_ptr_, output_memory_size * sizeof(arg_t))); } else { owned_buf_ptr.reset(new AccumulationBuffer()); } acc_buf_ptr = owned_buf_ptr.get(); } if (!can_use_32bit_indexing) { for (auto& sub_indexer : indexer.SplitTo32BitIndexing()) { RunReduce<scalar_t, out_scalar_t, vt0>(sub_indexer, ops, identity, acc_buf_ptr); } return; } ReduceConfig config(sizeof(arg_t), indexer); std::unique_ptr<Blob> buffer_blob; std::unique_ptr<Blob> semaphores_blob; void* buffer = nullptr; void* semaphores = nullptr; if (config.ShouldGlobalReduce()) { int device_id = CUDAState::GetInstance()->GetCurrentDeviceID(); Device device(Device::DeviceType::CUDA, device_id); buffer_blob = std::make_unique<Blob>(config.GlobalMemorySize(), device); semaphores_blob = std::make_unique<Blob>(config.SemaphoreSize(), device); buffer = buffer_blob->GetDataPtr(); semaphores = semaphores_blob->GetDataPtr(); OPEN3D_CUDA_CHECK( hipMemset(semaphores, 0, config.SemaphoreSize())); } OPEN3D_ASSERT(can_use_32bit_indexing); const char* in_data = (char*)indexer.GetInput(0).data_ptr_; char* out_data = (char*)indexer.GetOutput().data_ptr_; char* acc_data = acc_buf_ptr->GetAccSlice(out_data); auto output_calc = MakeOutputCalculator<uint32_t>(indexer); auto input_calc = MakeInputCalculator<uint32_t>(indexer); auto reduce_op = ReduceOp<scalar_t, ops_t, uint32_t, out_scalar_t, vt0>( ops, config, input_calc, output_calc, in_data, out_data, acc_data, buffer, (int*)semaphores, identity, indexer.ShouldAccumulate(), indexer.IsFinalOutput()); // Launch reduce kernel int shared_memory = config.SharedMemorySize(); hipLaunchKernelGGL(( ReduceKernel<ReduceConfig::MAX_NUM_THREADS>) , dim3(config.GridDim()), dim3(config.BlockDim()), shared_memory, core::cuda::GetStream(), reduce_op); cuda::Synchronize(); OPEN3D_CUDA_CHECK(hipGetLastError()); } private: Indexer indexer_; }; void ReductionCUDA(const Tensor& src, Tensor& dst, const SizeVector& dims, bool keepdim, ReductionOpCode op_code) { if (s_regular_reduce_ops.find(op_code) != s_regular_reduce_ops.end()) { Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims); CUDAReductionEngine re(indexer); Dtype dtype = src.GetDtype(); CUDAScopedDevice scoped_device(src.GetDevice()); DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() { switch (op_code) { case ReductionOpCode::Sum: if (indexer.NumWorkloads() == 0) { // 0-sized input can be reduced to non-0-sized outputs, // where identity elements should be filled. // E.g. np.sum(np.ones((0, 5)), axis=0).shape == (5,). dst.Fill(0); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a + b; }, static_cast<scalar_t>(0)); } break; case ReductionOpCode::Prod: if (indexer.NumWorkloads() == 0) { dst.Fill(1); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a * b; }, static_cast<scalar_t>(1)); } break; case ReductionOpCode::Min: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport Min."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a < b ? a : b; }, static_cast<scalar_t>( std::numeric_limits<scalar_t>::max())); } break; case ReductionOpCode::Max: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport Max."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a > b ? a : b; }, static_cast<scalar_t>(std::numeric_limits< scalar_t>::lowest())); } break; default: utility::LogError("Unsupported op code."); break; } }); } else if (s_arg_reduce_ops.find(op_code) != s_arg_reduce_ops.end()) { if (dst.GetDtype() != core::Int64) { utility::LogError("Arg-reduction must have int64 output dtype."); } Indexer indexer({src}, dst, DtypePolicy::INPUT_SAME, dims); CUDAReductionEngine re(indexer); Dtype dtype = src.GetDtype(); CUDAScopedDevice scoped_device(src.GetDevice()); DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() { switch (op_code) { case ReductionOpCode::ArgMin: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport ArgMin."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> bool { return a < b; }, static_cast<scalar_t>( std::numeric_limits<scalar_t>::max())); } break; case ReductionOpCode::ArgMax: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport ArgMax."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> bool { return a > b; }, static_cast<scalar_t>(std::numeric_limits< scalar_t>::lowest())); } break; default: utility::LogError("Unsupported op code."); break; } }); } else if (s_boolean_reduce_ops.find(op_code) != s_boolean_reduce_ops.end()) { if (src.GetDtype() != core::Bool) { utility::LogError( "Boolean reduction only supports boolean input tensor."); } if (dst.GetDtype() != core::Bool) { utility::LogError( "Boolean reduction only supports boolean output tensor."); } Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims); CUDAReductionEngine re(indexer); CUDAScopedDevice scoped_device(src.GetDevice()); switch (op_code) { case ReductionOpCode::All: if (indexer.NumWorkloads() == 0) { dst.Fill(true); } else { re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b) -> uint8_t { return a && b; }, static_cast<uint8_t>(true)); } break; case ReductionOpCode::Any: if (indexer.NumWorkloads() == 0) { dst.Fill(false); } else { re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b) -> uint8_t { return a || b; }, static_cast<uint8_t>(false)); } break; default: utility::LogError("Unsupported op code."); break; } } else { utility::LogError("Unsupported op code."); } } } // namespace kernel } // namespace core } // namespace open3d
364a819bafb30bf048b15dff6f515c9e7efdd9a5.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <cuda.h> #include <cuda_runtime.h> #include <thrust/pair.h> #include <thrust/tuple.h> #include <algorithm> #include <array> #include <limits> #include <sstream> #include <tuple> #include <type_traits> #include "open3d/core/Blob.h" #include "open3d/core/CUDAUtils.h" #include "open3d/core/Device.h" #include "open3d/core/Dispatch.h" #include "open3d/core/FunctionTraits.h" #include "open3d/core/Indexer.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/ParallelFor.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/core/kernel/Reduction.h" #include "open3d/utility/Logging.h" // CUDA reduction is based on PyTorch's CUDA reduction implementation. // See: aten/src/ATen/native/cuda/Reduce.cuh #if __CUDA_ARCH__ >= 750 constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 1024; #else constexpr uint32_t CUDA_MAX_THREADS_PER_SM = 2048; #endif constexpr uint32_t CUDA_MAX_THREADS_PER_BLOCK = 1024; constexpr uint32_t CUDA_THREADS_PER_BLOCK_FALLBACK = 256; #define OPEN3D_MAX_THREADS_PER_BLOCK(val) \ (((val) <= CUDA_MAX_THREADS_PER_BLOCK) ? (val) \ : CUDA_THREADS_PER_BLOCK_FALLBACK) #define OPEN3D_MIN_BLOCKS_PER_SM(threads_per_block, blocks_per_sm) \ ((((threads_per_block) * (blocks_per_sm) <= CUDA_MAX_THREADS_PER_SM) \ ? (blocks_per_sm) \ : ((CUDA_MAX_THREADS_PER_SM + (threads_per_block)-1) / \ (threads_per_block)))) #define OPEN3D_LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \ __launch_bounds__((OPEN3D_MAX_THREADS_PER_BLOCK((max_threads_per_block))), \ (OPEN3D_MIN_BLOCKS_PER_SM((max_threads_per_block), \ (min_blocks_per_sm)))) template <typename T> OPEN3D_DEVICE __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff) { #if CUDA_VERSION >= 9000 return __shfl_down_sync(mask, value, delta, width); #else return __shfl_down(value, delta, width); #endif } namespace open3d { namespace core { namespace kernel { static inline int64_t DivUp(int64_t a, int64_t b) { return (a + b - 1) / b; } // Returns reduced fraction numerator & denominator OPEN3D_HOST_DEVICE static void ReduceFraction(int64_t& numerator, int64_t& denominator) { // Get GCD of num and denom using Euclid's algorithm. // Can replace this with std::gcd if we ever support c++17. int64_t a = denominator; int64_t b = numerator; while (b != 0) { a %= b; int64_t tmp = a; a = b; b = tmp; } // a is now the GCD numerator /= a; denominator /= a; } class ReduceConfig { public: static constexpr int BLOCK_X = 0; static constexpr int BLOCK_Y = 1; static constexpr int CTA = 2; static constexpr int MAX_NUM_THREADS = 512; int num_inputs_per_output_; int num_outputs_; int step_input_ = 1; int step_output_ = 1; int ctas_per_output_ = 1; private: int element_size_bytes_; int input_mult_[3] = {0, 0, 0}; int output_mult_[2] = {0, 0}; int block_width_; int block_height_; int num_threads_; public: ReduceConfig(int element_size_bytes, const Indexer& indexer) : element_size_bytes_(element_size_bytes) { num_outputs_ = indexer.NumOutputElements(); num_inputs_per_output_ = indexer.NumWorkloads() / num_outputs_; // Adjust block size to map block width to fastest changing dimension of // input tensor. This grants the best possible memory accessing pattern, // given that for non-contiguous tensor with space in between, we cannot // have perfect memory coalescing. bool reduction_on_fastest_striding_dimension = (indexer.NumReductionDims() == indexer.NumDims()) || (indexer.GetInput(0).byte_strides_[0] < indexer.GetInput(0).byte_strides_[indexer.NumReductionDims()]); // Notice that dim0 & dim1 does NOT guarantee any launch configuration // here! dim0 & dim1 are more like the upper bound of the block // dimension. The actual launch config and reduction scheme is // determined by setting values to `input_mult_` and // `output_mult_`. We try to max out dim1 so that we have enough // threads per CTA to deliver performance for larger problem size. int64_t dim0; int64_t dim1; if (reduction_on_fastest_striding_dimension) { // Map block.x to the fastest reducing dimension. It implies: // 1. BlockXReduce is required. // 2. block.y now max out to num_outputs. dim0 = indexer.GetMasterShape()[0]; dim1 = num_outputs_; } else { // Map block.x to the fastest non reducing dimension. It implies: // 1. BlockXReduce is turned off. // 2. block.y now max out to num_inputs_per_output_. dim0 = indexer.GetMasterShape()[indexer.NumReductionDims()]; dim1 = num_inputs_per_output_; } // Adjust block_width and block_height SetBlockDimension(dim0, dim1); int block_width = block_width_; int block_height = block_height_; if (indexer.NumDims() == 0 || reduction_on_fastest_striding_dimension) { // Split the input across lanes if the input is contiguous in the // reduced dimension. This will require reduction between threads // using warp shuffle instructions and shared memory (if // block_width > warpSize). input_mult_[0] = SplitInput(block_width); } else { // Otherwise split the output across lanes in a warp. output_mult_[0] = SplitOutput(block_width); } if (ValuesPerThread() >= block_height * 16 || ValuesPerThread() >= 256) { // Divide the input across warps in a thread-block, if that leaves // at least 16 elements to be summed by each thread. This will // require inter-warp reduction using shared memory. input_mult_[1] = SplitInput(block_height); } else { // Otherwise, each warp handles a separate output. output_mult_[1] = SplitOutput(block_height); } if (input_mult_[1] != 0 && ValuesPerThread() >= 256 && num_outputs_ <= 4096) { // Divide the input across thread-blocks if the amount of work // per-thread is large enough and the size of the output is small // enough. This will require a reduction using global memory. ctas_per_output_ = DivUp(ValuesPerThread(), 16); if (ctas_per_output_ > 65535) { ctas_per_output_ = 65535; } input_mult_[2] = SplitInput(ctas_per_output_); } } /// Returns floor(log2(n)) static inline int LastPow2(int n) { // Dtype.h asserts sizeof(int) == 4. n |= (n >> 1); n |= (n >> 2); n |= (n >> 4); n |= (n >> 8); n |= (n >> 16); return std::max(1, n - (n >> 1)); } void SetBlockDimension(int64_t dim0, int64_t dim1) { int dim0_pow2 = dim0 < MAX_NUM_THREADS ? static_cast<int>(LastPow2(dim0)) : MAX_NUM_THREADS; int dim1_pow2 = dim1 < MAX_NUM_THREADS ? static_cast<int>(LastPow2(dim1)) : MAX_NUM_THREADS; block_width_ = std::min(dim0_pow2, CUDAState::GetInstance()->GetWarpSize()); block_height_ = std::min(dim1_pow2, int(MAX_NUM_THREADS / block_width_)); block_width_ = std::min(dim0_pow2, int(MAX_NUM_THREADS / block_height_)); num_threads_ = block_width_ * block_height_; } int SplitInput(int parallelism) { int step = step_input_; step_input_ *= parallelism; return step; } int SplitOutput(int parallelism) { int step = step_output_; step_output_ *= parallelism; return step; } dim3 BlockDim() const { return dim3(block_width_, block_height_); } dim3 GridDim() const { return dim3(DivUp(num_outputs_, step_output_), ctas_per_output_); } OPEN3D_HOST_DEVICE bool ShouldBlockXReduce() const { return input_mult_[BLOCK_X] != 0; } OPEN3D_HOST_DEVICE bool ShouldBlockYReduce() const { return input_mult_[BLOCK_Y] != 0; } OPEN3D_HOST_DEVICE bool ShouldGlobalReduce() const { return input_mult_[CTA] != 0; } OPEN3D_DEVICE bool ShouldStore(int output_idx) const { return output_idx < num_outputs_ && (!ShouldBlockXReduce() || threadIdx.x == 0) && (!ShouldBlockYReduce() || threadIdx.y == 0); } OPEN3D_HOST_DEVICE int InputIdx() const { int lane = threadIdx.x; int warp = threadIdx.y; int cta2 = blockIdx.y; return (lane * input_mult_[BLOCK_X] + warp * input_mult_[BLOCK_Y] + cta2 * input_mult_[CTA]); } OPEN3D_HOST_DEVICE int OutputIdx() const { int lane = threadIdx.x; int warp = threadIdx.y; int cta1 = blockIdx.x; return (lane * output_mult_[BLOCK_X] + warp * output_mult_[BLOCK_Y] + cta1 * step_output_); } OPEN3D_DEVICE int SharedMemoryOffset(int offset) const { return threadIdx.x + (threadIdx.y + offset) * blockDim.x; } OPEN3D_DEVICE int StagingMemoryOffset(int cta2) const { int offset = cta2 + blockIdx.x * gridDim.y; if (!ShouldBlockXReduce()) { offset = threadIdx.x + offset * blockDim.x; } return offset; } int SharedMemorySize() const { if (!ShouldBlockYReduce() && (!ShouldBlockXReduce() || block_width_ <= CUDAState::GetInstance()->GetWarpSize())) { return 0; } return element_size_bytes_ * num_threads_; } int64_t GlobalMemorySize() const { if (!ShouldGlobalReduce()) { return 0; } auto size = (int64_t)element_size_bytes_ * num_outputs_ * ctas_per_output_; if (!ShouldBlockXReduce()) { size *= BlockDim().x; } return size; } int SemaphoreSize() const { if (!ShouldGlobalReduce()) { return 0; } return sizeof(int) * GridDim().x; } int ValuesPerThread() const { return DivUp(num_inputs_per_output_, step_input_); } std::string ToString() const { std::string input_mult_str = fmt::format( "[{},{},{}]", input_mult_[0], input_mult_[1], input_mult_[2]); std::string output_mult_str = fmt::format("[{},{}]", output_mult_[0], output_mult_[1]); std::string block_str = fmt::format("[{},{},{}]", BlockDim().x, BlockDim().y, BlockDim().z); std::string grid_str = fmt::format("[{},{},{}]", GridDim().x, GridDim().y, GridDim().z); std::string str = fmt::format( "REDUCEConfig(element_size_bytes_={}, " "num_inputs_per_output_={}, num_outputs_={}, " "step_input_={}, step_output_={}, ctas_per_output_={}, " "input_mult_={}, output_mult_={}, values_per_thread={}, " "block={}, grid={}, global_memory_size={})", element_size_bytes_, num_inputs_per_output_, num_outputs_, step_input_, step_output_, ctas_per_output_, input_mult_str, output_mult_str, ValuesPerThread(), block_str, grid_str, GlobalMemorySize()); return str; } }; template <int nt, typename R> OPEN3D_LAUNCH_BOUNDS_2(nt, 4) __global__ void ReduceKernel(R reduction) { reduction.Run(); } template <typename index_t> static OffsetCalculator<2, index_t> MakeOutputCalculator( const Indexer& indexer) { int num_reduction_dims = indexer.NumReductionDims(); int num_output_dims = indexer.NumDims() - num_reduction_dims; std::array<const int64_t*, 2> strides = { indexer.GetOutput().byte_strides_ + num_reduction_dims, indexer.GetInput(0).byte_strides_ + num_reduction_dims, }; const int64_t* shape = indexer.GetMasterShape() + num_reduction_dims; return OffsetCalculator<2, index_t>(num_output_dims, shape, strides.data()); } template <typename index_t> static OffsetCalculator<1, index_t> MakeInputCalculator( const Indexer& indexer) { int num_reduction_dims = indexer.NumReductionDims(); std::array<const int64_t*, 1> strides = { indexer.GetInput(0).byte_strides_, }; return OffsetCalculator<1, index_t>( num_reduction_dims, indexer.GetMasterShape(), strides.data()); } template <int vt, typename index_t, typename func_t> OPEN3D_DEVICE void StridedIterate(func_t f, index_t begin, index_t end, index_t stride) { if (begin + (vt - 1) * stride < end) { #pragma unroll for (index_t i = 0; i < vt; i++) { f(i, begin + i * stride); } } else { #pragma unroll for (index_t i = 0; i < vt; i++) { index_t idx = begin + i * stride; if (idx < end) { f(i, idx); } } } } /// Combime() and Reduce() are the same for regular reduction ops. template <typename out_scalar_t, typename func_t> class RegularReduceOps { using arg_t = typename BinaryFunctionTraits<func_t>::arg0_t; using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t; public: RegularReduceOps(const func_t& op) : reduce_func_(op) {} static inline OPEN3D_DEVICE out_scalar_t Project(arg_t arg) { return (out_scalar_t)arg; } static inline OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) { return WARP_SHFL_DOWN(arg, offset); } OPEN3D_DEVICE inline arg_t Combine(arg_t acc, scalar_t val) const { return reduce_func_(acc, val); } /// Idx is ignored for RegularReduceOps. OPEN3D_DEVICE inline arg_t Reduce(arg_t acc, scalar_t val, int64_t idx) const { return reduce_func_(acc, val); } private: func_t reduce_func_ = nullptr; }; template <typename scalar_t, typename func_t> RegularReduceOps<scalar_t, func_t> WrapRegularReduceOps(const func_t& op) { return RegularReduceOps<scalar_t, func_t>{op}; } template <typename func_t> class ArgReduceOps { using scalar_t = typename BinaryFunctionTraits<func_t>::arg1_t; using index_t = int64_t; using arg_t = thrust::pair<scalar_t, index_t>; public: ArgReduceOps(const func_t comp_func) : comp_func_(comp_func) {} static OPEN3D_DEVICE index_t Project(arg_t arg) { return arg.second; } static OPEN3D_DEVICE arg_t WarpShflDown(arg_t arg, int offset) { return arg_t(WARP_SHFL_DOWN(arg.first, offset), WARP_SHFL_DOWN(arg.second, offset)); } /// Combine(pair<val_t, idx_t>, pair<val_t, idx_t>) -> pair<val_t, idx_t>. /// Called at subsequent rounds of reduction, when values are already /// associated with indices. OPEN3D_DEVICE inline arg_t Combine(arg_t a, arg_t b) const { return comp_func_(a.first, b.first) ? a : b; } /// Reduce(pair<val_t, idx_t>, val_t, idx_t) -> pair<val_t, idx_t>. /// Called at the first round of reduction, when values are not yet /// associated with indices. OPEN3D_DEVICE inline arg_t Reduce(arg_t arg, scalar_t val, int64_t idx) const { return comp_func_(arg.first, val) ? arg : arg_t(val, idx); } private: func_t comp_func_ = nullptr; }; template <typename func_t> ArgReduceOps<func_t> WrapArgReduceOps(const func_t& comp_func) { return ArgReduceOps<func_t>{comp_func}; } template <typename scalar_t, typename ops_t, typename index_t, typename out_scalar_t = scalar_t, int vt0 = 4> class ReduceOp { using traits = FunctionTraits<decltype(&ops_t::Reduce)>; using arg_t = typename std::decay<typename traits::template arg<0>::type>::type; using InputCalculator = OffsetCalculator<1, index_t>; using OutputCalculator = OffsetCalculator<2, index_t>; public: ReduceOp(ops_t ops, ReduceConfig config, InputCalculator input_calc, OutputCalculator output_calc, const void* src, char* dst, void* acc_buf, void* cta_buf, int* semaphores, arg_t identity, bool accumulate, bool final_output) : ops_(ops), config_(config), input_calc_(input_calc), output_calc_(output_calc), src_(src), dst_(dst), acc_buf_(acc_buf), cta_buf_(cta_buf), semaphores_(semaphores), identity_(identity), accumulate_(accumulate), final_output_(final_output) {} OPEN3D_DEVICE void Run() const { extern __shared__ char shared_memory[]; index_t output_idx = config_.OutputIdx(); index_t input_idx = config_.InputIdx(); auto base_offsets = output_calc_.get(output_idx); arg_t value = identity_; if (output_idx < config_.num_outputs_ && input_idx < config_.num_inputs_per_output_) { auto input_slice = (const char*)src_ + base_offsets[1]; value = ThreadReduce((const scalar_t*)input_slice); } if (config_.ShouldBlockYReduce()) { value = BlockYReduce(value, shared_memory); } if (config_.ShouldBlockXReduce()) { value = BlockXReduce(value, shared_memory); } auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]); arg_t* acc = nullptr; if (acc_buf_ != nullptr) { int64_t numerator = (int64_t)sizeof(arg_t); int64_t denominator = (int64_t)sizeof(out_scalar_t); ReduceFraction(numerator, denominator); acc = (arg_t*)((char*)acc_buf_ + (base_offsets[0] * numerator / denominator)); } if (config_.ShouldGlobalReduce()) { value = GlobalReduce(value, acc, shared_memory); } else if (config_.ShouldStore(output_idx)) { if (acc == nullptr) { if (accumulate_) { value = AccumulateInOutput<can_accumulate_in_output>(out, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *out = GetAccumulatedOutput<can_accumulate_in_output>( out, value); } } else { if (accumulate_) { value = ops_.Combine(*acc, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *acc = value; } } } } OPEN3D_DEVICE arg_t ThreadReduce(const scalar_t* data) const { index_t idx = config_.InputIdx(); // Multiple accumulators to remove dependency between unrolled loops. arg_t value_list[vt0]; #pragma unroll for (int i = 0; i < vt0; i++) { value_list[i] = identity_; } index_t end = config_.num_inputs_per_output_; index_t stride = config_.step_input_; index_t element_stride = input_calc_.strides_[0][0] / sizeof(scalar_t); // Reducing layers of function calls so compiler could do proper loop // unroll that exposes instruction level parallelism. while (idx < config_.num_inputs_per_output_) { // load input SmallArray<scalar_t, vt0> values; if (input_calc_.dims_ == 1) { StridedIterate<vt0>( [&](index_t i, index_t idx) { values[i] = data[idx * element_stride]; }, idx, end, stride); } else { StridedIterate<vt0>( [&](index_t i, index_t idx) { values[i] = data[input_calc_.get(idx)[0] / sizeof(scalar_t)]; }, idx, end, stride); } // compute StridedIterate<vt0, index_t>( [&](index_t i, index_t idx) { value_list[i] = ops_.Reduce(value_list[i], values[i], idx); }, idx, config_.num_inputs_per_output_, config_.step_input_); // step offset idx += config_.step_input_ * vt0; } #pragma unroll for (int i = 1; i < vt0; i++) { value_list[0] = ops_.Combine(value_list[0], value_list[i]); } return value_list[0]; } OPEN3D_DEVICE arg_t BlockXReduce(arg_t value, char* shared_memory) const { int dim_x = blockDim.x; arg_t* shared = (arg_t*)shared_memory; if (dim_x > warpSize) { int address_base = threadIdx.x + threadIdx.y * blockDim.x; shared[address_base] = value; for (int offset = dim_x / 2; offset >= warpSize; offset >>= 1) { __syncthreads(); if (threadIdx.x < offset && threadIdx.x + offset < blockDim.x) { arg_t other = shared[address_base + offset]; value = ops_.Combine(value, other); shared[address_base] = value; } } dim_x = warpSize; } __syncthreads(); for (int offset = 1; offset < dim_x; offset <<= 1) { arg_t other = ops_.WarpShflDown(value, offset); value = ops_.Combine(value, other); } return value; } OPEN3D_DEVICE arg_t BlockYReduce(arg_t value, char* shared_memory) const { arg_t* shared = (arg_t*)shared_memory; shared[config_.SharedMemoryOffset(0)] = value; for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { __syncthreads(); if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) { arg_t other = shared[config_.SharedMemoryOffset(offset)]; value = ops_.Combine(value, other); shared[config_.SharedMemoryOffset(0)] = value; } } return value; } OPEN3D_DEVICE bool MarkBlockFinished() const { __shared__ bool is_last_block_done_shared; __syncthreads(); if (threadIdx.x == 0 && threadIdx.y == 0) { int prev_blocks_finished = atomicAdd(&semaphores_[blockIdx.x], 1); is_last_block_done_shared = (prev_blocks_finished == gridDim.y - 1); } __syncthreads(); return is_last_block_done_shared; } template <bool can_acc> OPEN3D_DEVICE arg_t AccumulateInOutput( out_scalar_t* out, arg_t value, typename std::enable_if<can_acc>::type* = nullptr) const { return ops_.Combine(*out, value); } // This function should never be called -- // it's the version of `AccumulateInOutput` // when accumulation in the output is not possible. template <bool can_acc> OPEN3D_DEVICE arg_t AccumulateInOutput( out_scalar_t*, arg_t, typename std::enable_if<!can_acc>::type* = nullptr) const { OPEN3D_ASSERT(false); return arg_t{}; } template <bool can_acc> OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput( out_scalar_t* out, arg_t value, typename std::enable_if<can_acc>::type* = nullptr) const { OPEN3D_ASSERT(!final_output_); return (out_scalar_t)value; } // This function should never be called -- // it's the version of `GetAccumulatedOutput` // when accumulation in the output is not possible. template <bool can_acc> OPEN3D_DEVICE out_scalar_t GetAccumulatedOutput( out_scalar_t* out, arg_t value, typename std::enable_if<!can_acc>::type* = nullptr) const { OPEN3D_ASSERT(false); return *out; } template <class T> OPEN3D_DEVICE void SetResults(const T x, const index_t base_offset) const { auto res = (out_scalar_t*)((char*)dst_ + base_offset); *res = x; } OPEN3D_DEVICE void SetResultsToOutput(arg_t value, index_t base_offset) const { OPEN3D_ASSERT(final_output_); SetResults(ops_.Project(value), base_offset); } OPEN3D_DEVICE arg_t GlobalReduce(arg_t value, arg_t* acc, char* shared_memory) const { arg_t* reduce_buffer = (arg_t*)cta_buf_; index_t output_idx = config_.OutputIdx(); auto base_offsets = output_calc_.get(output_idx); auto out = (out_scalar_t*)((char*)dst_ + base_offsets[0]); bool should_store = config_.ShouldStore(config_.OutputIdx()); if (should_store) { index_t offset = config_.StagingMemoryOffset(blockIdx.y); reduce_buffer[offset] = value; } __threadfence(); // make sure writes are globally visible __syncthreads(); // if multiple warps in this block wrote to staging, // make sure they're all done bool is_last_block_done = MarkBlockFinished(); if (is_last_block_done) { value = identity_; if (config_.ShouldBlockXReduce()) { index_t input_offset = threadIdx.x + threadIdx.y * blockDim.x; index_t step = blockDim.x * blockDim.y; for (; input_offset < config_.ctas_per_output_; input_offset += step) { index_t idx = config_.StagingMemoryOffset(input_offset); arg_t next = reduce_buffer[idx]; value = ops_.Combine(value, next); } } else { index_t input_offset = threadIdx.y; index_t step = blockDim.y; for (; input_offset < config_.ctas_per_output_; input_offset += step) { index_t idx = config_.StagingMemoryOffset(input_offset); arg_t next = reduce_buffer[idx]; value = ops_.Combine(value, next); } } value = BlockYReduce(value, shared_memory); if (config_.ShouldBlockXReduce()) { value = BlockXReduce(value, shared_memory); } if (should_store) { if (acc == nullptr) { if (accumulate_) { value = AccumulateInOutput<can_accumulate_in_output>( out, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *out = GetAccumulatedOutput<can_accumulate_in_output>( out, value); } } else { if (accumulate_) { value = ops_.Combine(*acc, value); } if (final_output_) { SetResultsToOutput(value, base_offsets[0]); } else { *acc = value; } } } } return value; } private: static constexpr bool can_accumulate_in_output = std::is_convertible<arg_t, out_scalar_t>::value && std::is_convertible<out_scalar_t, arg_t>::value; static constexpr float acc_buffer_multiplier = (float)sizeof(arg_t) / sizeof(out_scalar_t); ops_t ops_; ReduceConfig config_; InputCalculator input_calc_; OutputCalculator output_calc_; const void* src_; const char* dst_; // acc_buf_ used for accumulation among sub Tensor Iterator when // accumulation on output is not permissible void* acc_buf_; // cta_buf_ used for accumulation between blocks during global reduction void* cta_buf_; int* semaphores_; arg_t identity_; bool accumulate_; bool final_output_; }; class AccumulationBuffer { public: AccumulationBuffer() {} AccumulationBuffer(int64_t acc_t_size, int64_t out_t_size, char* out_ptr, int64_t size) { out_ptr_ = (char*)out_ptr; if (out_t_size >= acc_t_size) { // reusing output buffer for accumulation. acc_ptr_ = (char*)out_ptr; numerator_ = 1; denominator_ = 1; } else { int device_id = CUDAState::GetInstance()->GetCurrentDeviceID(); Device device(Device::DeviceType::CUDA, device_id); buffer_ = std::make_unique<Blob>(size, device); acc_ptr_ = (char*)buffer_->GetDataPtr(); numerator_ = acc_t_size; denominator_ = out_t_size; ReduceFraction(numerator_, denominator_); } } char* GetAccSlice(char* out_ptr) { if (numerator_ == -1 || acc_ptr_ == nullptr) { return nullptr; } return acc_ptr_ + ((out_ptr - out_ptr_) * numerator_ / denominator_); } private: std::unique_ptr<Blob> buffer_; char* acc_ptr_ = nullptr; char* out_ptr_ = nullptr; float size_factor_ = -1; int64_t numerator_ = -1; int64_t denominator_ = -1; }; class CUDAReductionEngine { public: CUDAReductionEngine(const CUDAReductionEngine&) = delete; CUDAReductionEngine& operator=(const CUDAReductionEngine&) = delete; CUDAReductionEngine(const Indexer& indexer) : indexer_(indexer) {} template <typename func_t, typename scalar_t> void Run(const func_t& reduce_func, scalar_t identity) { if (indexer_.NumWorkloads() == 0) { utility::LogError( "0-sized input should be handled outside of the reudction " "engine."); } if (indexer_.NumInputs() != 1) { utility::LogError("Reduction op must have exactly one input."); } OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t); using arg0_t = typename BinaryFunctionTraits<func_t>::arg0_t; using arg1_t = typename BinaryFunctionTraits<func_t>::arg1_t; if (!std::is_same<scalar_t, arg0_t>::value || !std::is_same<scalar_t, arg1_t>::value) { utility::LogError( "Function input type must match with the identity's type."); } using res_t = typename BinaryFunctionTraits<func_t>::res_t; if (std::is_same<res_t, bool>::value) { // func_t is a comparison function (for arg-reduction). // Signature: (scalar_t, scalar_t) -> bool. RunReduce<scalar_t, int64_t>( indexer_, WrapArgReduceOps(reduce_func), thrust::pair<scalar_t, int64_t>(identity, 0)); } else { // func_t is a regular reduction function. // Signature: (scalar_t, scalar_t) -> scalar_t. RunReduce<scalar_t, scalar_t>( indexer_, WrapRegularReduceOps<scalar_t>(reduce_func), identity); } } private: /// If the index cannot be represented in 32 bits, RunReduce calls itself /// recursively. template <typename scalar_t, typename out_scalar_t, int vt0 = 4, typename ops_t, typename ident_t> static void RunReduce(Indexer& indexer, const ops_t& ops, ident_t identity, AccumulationBuffer* acc_buf_ptr = nullptr) { using traits = FunctionTraits<decltype(&ops_t::Reduce)>; using arg_t = typename traits::template arg<0>::type; static constexpr bool can_accumulate_in_output = std::is_convertible<arg_t, out_scalar_t>::value; bool can_use_32bit_indexing = indexer.CanUse32BitIndexing(); std::unique_ptr<AccumulationBuffer> owned_buf_ptr; // The acc_buf_ptr is a shared pointer. It is create at the first // entrance reused by all recursive function calls. if (acc_buf_ptr == nullptr) { // acc_buf_ptr holds buffer used for accumulation among multiple // sub_iter when accumulation in output is not possible. if (!can_accumulate_in_output && !can_use_32bit_indexing) { int64_t output_memory_size = 1; for (int dim = 0; dim < indexer.NumDims(); dim++) { output_memory_size = std::max( output_memory_size, indexer.GetMasterShape()[dim] * indexer.GetOutput().byte_strides_[dim]); } owned_buf_ptr.reset(new AccumulationBuffer( sizeof(arg_t), sizeof(out_scalar_t), (char*)indexer.GetOutput().data_ptr_, output_memory_size * sizeof(arg_t))); } else { owned_buf_ptr.reset(new AccumulationBuffer()); } acc_buf_ptr = owned_buf_ptr.get(); } if (!can_use_32bit_indexing) { for (auto& sub_indexer : indexer.SplitTo32BitIndexing()) { RunReduce<scalar_t, out_scalar_t, vt0>(sub_indexer, ops, identity, acc_buf_ptr); } return; } ReduceConfig config(sizeof(arg_t), indexer); std::unique_ptr<Blob> buffer_blob; std::unique_ptr<Blob> semaphores_blob; void* buffer = nullptr; void* semaphores = nullptr; if (config.ShouldGlobalReduce()) { int device_id = CUDAState::GetInstance()->GetCurrentDeviceID(); Device device(Device::DeviceType::CUDA, device_id); buffer_blob = std::make_unique<Blob>(config.GlobalMemorySize(), device); semaphores_blob = std::make_unique<Blob>(config.SemaphoreSize(), device); buffer = buffer_blob->GetDataPtr(); semaphores = semaphores_blob->GetDataPtr(); OPEN3D_CUDA_CHECK( cudaMemset(semaphores, 0, config.SemaphoreSize())); } OPEN3D_ASSERT(can_use_32bit_indexing); const char* in_data = (char*)indexer.GetInput(0).data_ptr_; char* out_data = (char*)indexer.GetOutput().data_ptr_; char* acc_data = acc_buf_ptr->GetAccSlice(out_data); auto output_calc = MakeOutputCalculator<uint32_t>(indexer); auto input_calc = MakeInputCalculator<uint32_t>(indexer); auto reduce_op = ReduceOp<scalar_t, ops_t, uint32_t, out_scalar_t, vt0>( ops, config, input_calc, output_calc, in_data, out_data, acc_data, buffer, (int*)semaphores, identity, indexer.ShouldAccumulate(), indexer.IsFinalOutput()); // Launch reduce kernel int shared_memory = config.SharedMemorySize(); ReduceKernel<ReduceConfig::MAX_NUM_THREADS> <<<config.GridDim(), config.BlockDim(), shared_memory, core::cuda::GetStream()>>>(reduce_op); cuda::Synchronize(); OPEN3D_CUDA_CHECK(cudaGetLastError()); } private: Indexer indexer_; }; void ReductionCUDA(const Tensor& src, Tensor& dst, const SizeVector& dims, bool keepdim, ReductionOpCode op_code) { if (s_regular_reduce_ops.find(op_code) != s_regular_reduce_ops.end()) { Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims); CUDAReductionEngine re(indexer); Dtype dtype = src.GetDtype(); CUDAScopedDevice scoped_device(src.GetDevice()); DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() { switch (op_code) { case ReductionOpCode::Sum: if (indexer.NumWorkloads() == 0) { // 0-sized input can be reduced to non-0-sized outputs, // where identity elements should be filled. // E.g. np.sum(np.ones((0, 5)), axis=0).shape == (5,). dst.Fill(0); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a + b; }, static_cast<scalar_t>(0)); } break; case ReductionOpCode::Prod: if (indexer.NumWorkloads() == 0) { dst.Fill(1); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a * b; }, static_cast<scalar_t>(1)); } break; case ReductionOpCode::Min: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport Min."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a < b ? a : b; }, static_cast<scalar_t>( std::numeric_limits<scalar_t>::max())); } break; case ReductionOpCode::Max: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport Max."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> scalar_t { return a > b ? a : b; }, static_cast<scalar_t>(std::numeric_limits< scalar_t>::lowest())); } break; default: utility::LogError("Unsupported op code."); break; } }); } else if (s_arg_reduce_ops.find(op_code) != s_arg_reduce_ops.end()) { if (dst.GetDtype() != core::Int64) { utility::LogError("Arg-reduction must have int64 output dtype."); } Indexer indexer({src}, dst, DtypePolicy::INPUT_SAME, dims); CUDAReductionEngine re(indexer); Dtype dtype = src.GetDtype(); CUDAScopedDevice scoped_device(src.GetDevice()); DISPATCH_DTYPE_TO_TEMPLATE(dtype, [&]() { switch (op_code) { case ReductionOpCode::ArgMin: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport ArgMin."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> bool { return a < b; }, static_cast<scalar_t>( std::numeric_limits<scalar_t>::max())); } break; case ReductionOpCode::ArgMax: if (indexer.NumWorkloads() == 0) { utility::LogError( "Zero-size Tensor does not suport ArgMax."); } else { re.Run([] OPEN3D_HOST_DEVICE(scalar_t a, scalar_t b) -> bool { return a > b; }, static_cast<scalar_t>(std::numeric_limits< scalar_t>::lowest())); } break; default: utility::LogError("Unsupported op code."); break; } }); } else if (s_boolean_reduce_ops.find(op_code) != s_boolean_reduce_ops.end()) { if (src.GetDtype() != core::Bool) { utility::LogError( "Boolean reduction only supports boolean input tensor."); } if (dst.GetDtype() != core::Bool) { utility::LogError( "Boolean reduction only supports boolean output tensor."); } Indexer indexer({src}, dst, DtypePolicy::ALL_SAME, dims); CUDAReductionEngine re(indexer); CUDAScopedDevice scoped_device(src.GetDevice()); switch (op_code) { case ReductionOpCode::All: if (indexer.NumWorkloads() == 0) { dst.Fill(true); } else { re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b) -> uint8_t { return a && b; }, static_cast<uint8_t>(true)); } break; case ReductionOpCode::Any: if (indexer.NumWorkloads() == 0) { dst.Fill(false); } else { re.Run([] OPEN3D_HOST_DEVICE(uint8_t a, uint8_t b) -> uint8_t { return a || b; }, static_cast<uint8_t>(false)); } break; default: utility::LogError("Unsupported op code."); break; } } else { utility::LogError("Unsupported op code."); } } } // namespace kernel } // namespace core } // namespace open3d
f8c73bb59bf647c51ca09cbf1f1c91b0348cf994.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <math.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef PAPI # include <papi.h> #endif #include "timemory-interface/interface.h" __global__ void MatAdd(float* A, float* B, float* C, int n, int taskperItem) { // Get our global thread ID int global_id = blockIdx.x * blockDim.x * taskperItem + threadIdx.x; for(int t = 0; t < taskperItem; t++) { int i = t * blockDim.x + global_id; // Make sure we do not go out of bounds if(i < n) C[i] = A[i] + B[i]; } } int matrix_vt_create(int nlin, int ncol, float* m, int rank) { for(int i = 0; i < nlin; i++) for(int j = 0; j < ncol; j++) m[j + i * ncol] = i + (nlin * rank) + j * 2; return 0; } #ifdef PROFILING void matrix_vt_print(int nlin, int ncol, float* m) { for(int i = 0; i < nlin; i++) { for(int j = 0; j < ncol; j++) fprintf(stderr, "%.3f ", m[j + i * ncol]); fprintf(stderr, "\n"); } } #endif int main(int argc, char** argv) { int event_count = 9; const char* events[] = { "infiniband:::mlx5_0_1_ext:port_xmit_data", "infiniband:::mlx5_0_1_ext:port_rcv_data", "infiniband:::mlx5_2_1_ext:port_xmit_data", "infiniband:::mlx5_2_1_ext:port_rcv_data", "infiniband:::mlx5_4_1_ext:port_xmit_data", "infiniband:::mlx5_4_1_ext:port_rcv_data", "infiniband:::mlx5_6_1_ext:port_xmit_data", "infiniband:::mlx5_6_1_ext:port_rcv_data" }; #ifdef PAPI int event_set = PAPI_NULL; long long values[event_count]; #else set_papi_events(event_count, events); #endif int size, rank; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); initialize(&argc, &argv); push_region("main"); int nlin, ncol; if(argc < 3) { nlin = 8; ncol = 8; } else { nlin = atoi(argv[1]); ncol = atoi(argv[2]); } #ifdef PAPI int nodes, cpn; if(argc < 5) { // Run the test on this many nodes. nodes = 2; // Run this many ranks per node in the test. cpn = 40; } else { nodes = atoi(argv[3]); cpn = atoi(argv[4]); } #endif float* matrix_C = (float*) malloc(nlin * ncol * sizeof(float)); if(matrix_C == NULL) { fprintf(stderr, "Error in Matrix A allocation.\n"); return 1; } // Host input and output vectors // Allocate memory for each vector on host float *vec_A, *vec_B, *vec_C; #ifdef PINNED hipError_t status = hipHostMalloc((void**) &vec_A, (nlin / size) * ncol * sizeof(float)); if(status != hipSuccess) { fprintf(stderr, "Error in pinned vector A allocation.\n"); return 1; } status = hipHostMalloc((void**) &vec_B, (nlin / size) * ncol * sizeof(float)); if(status != hipSuccess) { fprintf(stderr, "Error in pinned vector B allocation.\n"); return 1; } status = hipHostMalloc((void**) &vec_C, (nlin / size) * ncol * sizeof(float)); if(status != hipSuccess) { fprintf(stderr, "Error in pinned vector C allocation.\n"); return 1; } #else vec_A = (float*) malloc((nlin / size) * ncol * sizeof(float)); if(vec_A == NULL) { fprintf(stderr, "Error in vector A allocation.\n"); return 1; } vec_B = (float*) malloc((nlin / size) * ncol * sizeof(float)); if(vec_B == NULL) { fprintf(stderr, "Error in vector B allocation.\n"); return 1; } vec_C = (float*) malloc((nlin / size) * ncol * sizeof(float)); if(vec_C == NULL) { fprintf(stderr, "Error in vector C allocation.\n"); return 1; } #endif if(matrix_vt_create(nlin / size, ncol, vec_A, rank)) { fprintf(stderr, "Error in vector A creation.\n"); return 1; } if(matrix_vt_create(nlin / size, ncol, vec_B, rank)) { fprintf(stderr, "Error in vector B creation.\n"); return 1; } #ifdef PAPI PAPI_library_init(PAPI_VER_CURRENT); PAPI_create_eventset(&event_set); int code = 0; for(int i = 0; i < event_count; i++) { PAPI_event_name_to_code(events[i], &code); PAPI_add_event(event_set, code); } #endif // Device input and output vectors float *pA, *pB, *pC; // Allocate memory for each vector on device hipMalloc((void**) &pA, ((nlin / size) * ncol) * sizeof(float)); hipMalloc((void**) &pB, ((nlin / size) * ncol) * sizeof(float)); hipMalloc((void**) &pC, ((nlin / size) * ncol) * sizeof(float)); MPI_Datatype rowtype; MPI_Type_contiguous(ncol, MPI_FLOAT, &rowtype); MPI_Type_commit(&rowtype); int niter = 100; int nblocks, blockSize, taskperItem; taskperItem = 1; // Number of threads in each block if(argc < 6) { blockSize = 1024; } else { blockSize = min(1024, (int) (pow(2.0f, (int) ceil(log2((float) atoi(argv[5])))))); } // Number of blocks; number max is 65535 nblocks = (int) ceil((float) ((nlin / size) * ncol) / blockSize); if(nblocks > 65535) { fprintf(stderr, "Number of blocks is higher than 65535!\n"); } #ifdef PROFILING double t_start = 0.0, t_end = 0.0, t = 0.0; t_start = MPI_Wtime(); #endif push_region("profiling"); #ifdef PAPI PAPI_start(event_set); MPI_Barrier(MPI_COMM_WORLD); #endif // Copy host vectors to device hipMemcpy(pA, vec_A, ((nlin / size) * ncol) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(pB, vec_B, ((nlin / size) * ncol) * sizeof(float), hipMemcpyHostToDevice); for(int i = 0; i < niter; i++) { // Execute the kernel hipLaunchKernelGGL(( MatAdd), dim3(nblocks), dim3(blockSize / taskperItem), 0, 0, pA, pB, pC, (nlin / size) * ncol, taskperItem); } // Copy array back to host hipMemcpy(vec_C, pC, ((nlin / size) * ncol) * sizeof(float), hipMemcpyDeviceToHost); // Non-collective MPI routines // MPI_Status recv_status; // MPI_Request send_status; // for(int r = 0; r < size; r++) { // if(r != rank) { // MPI_Isend(vec_C,(nlin/size),rowtype,r,rank*100+r,MPI_COMM_WORLD,&send_status); // MPI_Recv(matrix_C+r*(nlin/size)*ncol,(nlin/size),rowtype,r,r*100+rank,MPI_COMM_WORLD,&recv_status); // MPI_Wait(&send_status, &recv_status); // } //} // for(int j = rank*(nlin/size)*ncol; j < (nlin/size)*ncol; j++) { // matrix_C[j] = vec_C[j]; //} // Collective MPI routine MPI_Allgather(vec_C, (nlin / size), rowtype, matrix_C, (nlin / size), rowtype, MPI_COMM_WORLD); #ifdef PAPI MPI_Barrier(MPI_COMM_WORLD); PAPI_stop(event_set, values); #endif pop_region("profiling"); #ifdef PROFILING MPI_Barrier(MPI_COMM_WORLD); t_end = MPI_Wtime(); t = t_end - t_start; #endif #ifdef PAPI for(int id = 0; id < nodes; id++) { if(rank == id * cpn) { printf("\n"); long long xmit = 0, rcv = 0; for(int i = 0; i < event_count; i++) { if(strstr(events[i], "xmit")) { xmit = xmit + values[i]; } else if(strstr(events[i], "rcv")) { rcv = rcv + values[i]; } } printf("node %d -> %lld sent bytes\n", id, xmit); printf("node %d -> %lld received bytes\n", id, rcv); } } #endif #ifdef PROFILING if(rank == 0) { // long double sum = 0.; // for(int i=0; i < nlin; i++) // for(int j=0; j < ncol; j++) // sum = sum + matrix_C[j+i*ncol]; // fprintf(stderr,"Sum of all elements of the matrix: %Lf\n",sum); fprintf(stderr, "Time: %lf\n", t); // matrix_vt_print(nlin,ncol,matrix_C); } #endif // Release host memory free(matrix_C); // Release host memory MPI_Type_free(&rowtype); #ifdef PINNED hipHostFree(vec_A); hipHostFree(vec_B); hipHostFree(vec_C); #else free(vec_A); free(vec_B); free(vec_C); #endif // Release device memory hipFree(pA); hipFree(pB); hipFree(pC); #ifdef PAPI PAPI_cleanup_eventset(event_set); PAPI_destroy_eventset(&event_set); PAPI_shutdown(); #endif pop_region("main"); finalize(); MPI_Finalize(); return 0; }
f8c73bb59bf647c51ca09cbf1f1c91b0348cf994.cu
#include <cuda.h> #include <cuda_runtime.h> #include <math.h> #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef PAPI # include <papi.h> #endif #include "timemory-interface/interface.h" __global__ void MatAdd(float* A, float* B, float* C, int n, int taskperItem) { // Get our global thread ID int global_id = blockIdx.x * blockDim.x * taskperItem + threadIdx.x; for(int t = 0; t < taskperItem; t++) { int i = t * blockDim.x + global_id; // Make sure we do not go out of bounds if(i < n) C[i] = A[i] + B[i]; } } int matrix_vt_create(int nlin, int ncol, float* m, int rank) { for(int i = 0; i < nlin; i++) for(int j = 0; j < ncol; j++) m[j + i * ncol] = i + (nlin * rank) + j * 2; return 0; } #ifdef PROFILING void matrix_vt_print(int nlin, int ncol, float* m) { for(int i = 0; i < nlin; i++) { for(int j = 0; j < ncol; j++) fprintf(stderr, "%.3f ", m[j + i * ncol]); fprintf(stderr, "\n"); } } #endif int main(int argc, char** argv) { int event_count = 9; const char* events[] = { "infiniband:::mlx5_0_1_ext:port_xmit_data", "infiniband:::mlx5_0_1_ext:port_rcv_data", "infiniband:::mlx5_2_1_ext:port_xmit_data", "infiniband:::mlx5_2_1_ext:port_rcv_data", "infiniband:::mlx5_4_1_ext:port_xmit_data", "infiniband:::mlx5_4_1_ext:port_rcv_data", "infiniband:::mlx5_6_1_ext:port_xmit_data", "infiniband:::mlx5_6_1_ext:port_rcv_data" }; #ifdef PAPI int event_set = PAPI_NULL; long long values[event_count]; #else set_papi_events(event_count, events); #endif int size, rank; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); initialize(&argc, &argv); push_region("main"); int nlin, ncol; if(argc < 3) { nlin = 8; ncol = 8; } else { nlin = atoi(argv[1]); ncol = atoi(argv[2]); } #ifdef PAPI int nodes, cpn; if(argc < 5) { // Run the test on this many nodes. nodes = 2; // Run this many ranks per node in the test. cpn = 40; } else { nodes = atoi(argv[3]); cpn = atoi(argv[4]); } #endif float* matrix_C = (float*) malloc(nlin * ncol * sizeof(float)); if(matrix_C == NULL) { fprintf(stderr, "Error in Matrix A allocation.\n"); return 1; } // Host input and output vectors // Allocate memory for each vector on host float *vec_A, *vec_B, *vec_C; #ifdef PINNED cudaError_t status = cudaMallocHost((void**) &vec_A, (nlin / size) * ncol * sizeof(float)); if(status != cudaSuccess) { fprintf(stderr, "Error in pinned vector A allocation.\n"); return 1; } status = cudaMallocHost((void**) &vec_B, (nlin / size) * ncol * sizeof(float)); if(status != cudaSuccess) { fprintf(stderr, "Error in pinned vector B allocation.\n"); return 1; } status = cudaMallocHost((void**) &vec_C, (nlin / size) * ncol * sizeof(float)); if(status != cudaSuccess) { fprintf(stderr, "Error in pinned vector C allocation.\n"); return 1; } #else vec_A = (float*) malloc((nlin / size) * ncol * sizeof(float)); if(vec_A == NULL) { fprintf(stderr, "Error in vector A allocation.\n"); return 1; } vec_B = (float*) malloc((nlin / size) * ncol * sizeof(float)); if(vec_B == NULL) { fprintf(stderr, "Error in vector B allocation.\n"); return 1; } vec_C = (float*) malloc((nlin / size) * ncol * sizeof(float)); if(vec_C == NULL) { fprintf(stderr, "Error in vector C allocation.\n"); return 1; } #endif if(matrix_vt_create(nlin / size, ncol, vec_A, rank)) { fprintf(stderr, "Error in vector A creation.\n"); return 1; } if(matrix_vt_create(nlin / size, ncol, vec_B, rank)) { fprintf(stderr, "Error in vector B creation.\n"); return 1; } #ifdef PAPI PAPI_library_init(PAPI_VER_CURRENT); PAPI_create_eventset(&event_set); int code = 0; for(int i = 0; i < event_count; i++) { PAPI_event_name_to_code(events[i], &code); PAPI_add_event(event_set, code); } #endif // Device input and output vectors float *pA, *pB, *pC; // Allocate memory for each vector on device cudaMalloc((void**) &pA, ((nlin / size) * ncol) * sizeof(float)); cudaMalloc((void**) &pB, ((nlin / size) * ncol) * sizeof(float)); cudaMalloc((void**) &pC, ((nlin / size) * ncol) * sizeof(float)); MPI_Datatype rowtype; MPI_Type_contiguous(ncol, MPI_FLOAT, &rowtype); MPI_Type_commit(&rowtype); int niter = 100; int nblocks, blockSize, taskperItem; taskperItem = 1; // Number of threads in each block if(argc < 6) { blockSize = 1024; } else { blockSize = min(1024, (int) (pow(2.0f, (int) ceil(log2((float) atoi(argv[5])))))); } // Number of blocks; number max is 65535 nblocks = (int) ceil((float) ((nlin / size) * ncol) / blockSize); if(nblocks > 65535) { fprintf(stderr, "Number of blocks is higher than 65535!\n"); } #ifdef PROFILING double t_start = 0.0, t_end = 0.0, t = 0.0; t_start = MPI_Wtime(); #endif push_region("profiling"); #ifdef PAPI PAPI_start(event_set); MPI_Barrier(MPI_COMM_WORLD); #endif // Copy host vectors to device cudaMemcpy(pA, vec_A, ((nlin / size) * ncol) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(pB, vec_B, ((nlin / size) * ncol) * sizeof(float), cudaMemcpyHostToDevice); for(int i = 0; i < niter; i++) { // Execute the kernel MatAdd<<<nblocks, blockSize / taskperItem>>>(pA, pB, pC, (nlin / size) * ncol, taskperItem); } // Copy array back to host cudaMemcpy(vec_C, pC, ((nlin / size) * ncol) * sizeof(float), cudaMemcpyDeviceToHost); // Non-collective MPI routines // MPI_Status recv_status; // MPI_Request send_status; // for(int r = 0; r < size; r++) { // if(r != rank) { // MPI_Isend(vec_C,(nlin/size),rowtype,r,rank*100+r,MPI_COMM_WORLD,&send_status); // MPI_Recv(matrix_C+r*(nlin/size)*ncol,(nlin/size),rowtype,r,r*100+rank,MPI_COMM_WORLD,&recv_status); // MPI_Wait(&send_status, &recv_status); // } //} // for(int j = rank*(nlin/size)*ncol; j < (nlin/size)*ncol; j++) { // matrix_C[j] = vec_C[j]; //} // Collective MPI routine MPI_Allgather(vec_C, (nlin / size), rowtype, matrix_C, (nlin / size), rowtype, MPI_COMM_WORLD); #ifdef PAPI MPI_Barrier(MPI_COMM_WORLD); PAPI_stop(event_set, values); #endif pop_region("profiling"); #ifdef PROFILING MPI_Barrier(MPI_COMM_WORLD); t_end = MPI_Wtime(); t = t_end - t_start; #endif #ifdef PAPI for(int id = 0; id < nodes; id++) { if(rank == id * cpn) { printf("\n"); long long xmit = 0, rcv = 0; for(int i = 0; i < event_count; i++) { if(strstr(events[i], "xmit")) { xmit = xmit + values[i]; } else if(strstr(events[i], "rcv")) { rcv = rcv + values[i]; } } printf("node %d -> %lld sent bytes\n", id, xmit); printf("node %d -> %lld received bytes\n", id, rcv); } } #endif #ifdef PROFILING if(rank == 0) { // long double sum = 0.; // for(int i=0; i < nlin; i++) // for(int j=0; j < ncol; j++) // sum = sum + matrix_C[j+i*ncol]; // fprintf(stderr,"Sum of all elements of the matrix: %Lf\n",sum); fprintf(stderr, "Time: %lf\n", t); // matrix_vt_print(nlin,ncol,matrix_C); } #endif // Release host memory free(matrix_C); // Release host memory MPI_Type_free(&rowtype); #ifdef PINNED cudaFreeHost(vec_A); cudaFreeHost(vec_B); cudaFreeHost(vec_C); #else free(vec_A); free(vec_B); free(vec_C); #endif // Release device memory cudaFree(pA); cudaFree(pB); cudaFree(pC); #ifdef PAPI PAPI_cleanup_eventset(event_set); PAPI_destroy_eventset(&event_set); PAPI_shutdown(); #endif pop_region("main"); finalize(); MPI_Finalize(); return 0; }
35f5c829fe5bd06bde4198036a0131435884dc20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper // #include <cutil.h> #include <iostream> #include <fstream> #include "cudacommon.h" #include "ResultDatabase.h" #include "OptionParser.h" #define SEED 7 #if CUDART_VERSION < 3000 struct double3 { double x, y, z; }; #endif /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 128 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0 /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) float kernelTime = 0.0f; float transferTime = 0.0f; hipEvent_t start, stop; float elapsed; /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; CUDA_SAFE_CALL(hipMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { CUDA_SAFE_CALL(hipFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { hipEventRecord(start, 0); CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } template <typename T> void upload(T* dst, T* src, int N) { hipEventRecord(start, 0); CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } template <typename T> void download(T* dst, T* src, int N) { hipEventRecord(start, 0); CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } void dump(double* variables, int nel, int nelr) { double* h_variables = new double[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ double ff_variable[NVAR]; __constant__ double3 ff_flux_contribution_momentum_x[1]; __constant__ double3 ff_flux_contribution_momentum_y[1]; __constant__ double3 ff_flux_contribution_momentum_z[1]; __constant__ double3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, double* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, double* variables) { dim3 Dg(nelr / block_length), Db(block_length); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } __device__ __host__ inline void compute_flux_contribution(double& density, double3& momentum, double& density_energy, double& pressure, double3& velocity, double3& fc_momentum_x, double3& fc_momentum_y, double3& fc_momentum_z, double3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; double de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(double& density, double3& momentum, double3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline double compute_speed_sqd(double3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline double compute_pressure(double& density, double& density_energy, double& speed_sqd) { return (double(GAMMA)-double(1.0))*(density_energy - double(0.5)*density*speed_sqd); } __device__ inline double compute_speed_of_sound(double& density, double& pressure) { return sqrt(double(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double density = variables[i + VAR_DENSITY*nelr]; double3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity; compute_velocity(density, momentum, velocity); double speed_sqd = compute_speed_sqd(velocity); double pressure = compute_pressure(density, density_energy, speed_sqd); double speed_of_sound = compute_speed_of_sound(density, pressure); // dt = double(0.5) * sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = double(0.5) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { dim3 Dg(nelr / block_length), Db(block_length); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes) { const double smoothing_coefficient = double(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; double3 normal; double normal_len; double factor; double density_i = variables[i + VAR_DENSITY*nelr]; double3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); double speed_sqd_i = compute_speed_sqd(velocity_i); double speed_i = sqrt(speed_sqd_i); double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); double3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; double3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); double flux_i_density = double(0.0); double3 flux_i_momentum; flux_i_momentum.x = double(0.0); flux_i_momentum.y = double(0.0); flux_i_momentum.z = double(0.0); double flux_i_density_energy = double(0.0); double3 velocity_nb; double density_nb, density_energy_nb; double3 momentum_nb; double3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; double3 flux_contribution_nb_density_energy; double speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*double(0.5)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = double(0.5)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = double(0.5)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } __global__ void cuda_time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double factor = step_factors[i]/double(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); hipEventRecord(start, 0); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } void addBenchmarkSpecOptions(OptionParser &op) { } void cfd(ResultDatabase &resultDB, OptionParser &op); void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { printf("Running CFDSolver (double)\n"); bool quiet = op.getOptionBool("quiet"); if(!quiet) { printf("WG size of %d\n", block_length); } hipEventCreate(&start); hipEventCreate(&stop); int passes = op.getOptionInt("passes"); for(int i = 0; i < passes; i++) { kernelTime = 0.0f; transferTime = 0.0f; if(!quiet) { printf("Pass %d:\n", i); } cfd(resultDB, op); if(!quiet) { printf("Done.\n"); } } } void cfd(ResultDatabase &resultDB, OptionParser &op) { // set far field conditions and load them into constant memory on the gpu { double h_ff_variable[NVAR]; const double angle_of_attack = double(3.1415926535897931 / 180.0) * double(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = double(1.4); double ff_pressure = double(1.0); double ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); double ff_speed = double(ff_mach)*ff_speed_of_sound; double3 ff_velocity; ff_velocity.x = ff_speed*double(cos((double)angle_of_attack)); ff_velocity.y = ff_speed*double(sin((double)angle_of_attack)); ff_velocity.z = 0.0; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(double(0.5)*(ff_speed*ff_speed)) + (ff_pressure / double(GAMMA-1.0)); double3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); double3 h_ff_flux_contribution_momentum_x; double3 h_ff_flux_contribution_momentum_y; double3 h_ff_flux_contribution_momentum_z; double3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu hipEventRecord(start, 0); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(double)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(double3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(double3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(double3)) ); CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(double3)) ); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } int nel; int nelr; // read in domain geometry double* areas; int* elements_surrounding_elements; double* normals; { string inputFile = op.getOptionString("inputFile"); std::ifstream file(inputFile.c_str()); if(inputFile != "") { file >> nel; } else { int problemSizes[4] = {97000, 200000, 1000000, 4000000}; nel = problemSizes[op.getOptionInt("size") - 1]; } nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length)); double* h_areas = new double[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; double* h_normals = new double[nelr*NDIM*NNB]; srand(SEED); // read in data for(int i = 0; i < nel; i++) { if(inputFile != "") { file >> h_areas[i]; } else { h_areas[i] = 1.0 * rand() / RAND_MAX; } for(int j = 0; j < NNB; j++) { if(inputFile != "") { file >> h_elements_surrounding_elements[i + j*nelr]; } else { int val = i + (rand() % 20) - 10; h_elements_surrounding_elements[i + j * nelr] = val; } if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { if(inputFile != "") { file >> h_normals[i + (j + k*NNB)*nelr]; } else { h_normals[i + (j + k*NNB)*nelr] = 1.0 * rand() / RAND_MAX - 0.5; } h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<double>(nelr); upload<double>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<double>(nelr*NDIM*NNB); upload<double>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions double* variables = alloc<double>(nelr*NVAR); initialize_variables(nelr, variables); double* old_variables = alloc<double>(nelr*NVAR); double* fluxes = alloc<double>(nelr*NVAR); double* step_factors = alloc<double>(nelr); // make sure all memory is doublely allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(double)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step // Begin iterations for(int i = 0; i < iterations; i++) { copy<double>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); CHECK_CUDA_ERROR(); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); CHECK_CUDA_ERROR(); time_step(j, nelr, old_variables, variables, step_factors, fluxes); CHECK_CUDA_ERROR(); } } hipDeviceSynchronize(); if(op.getOptionBool("verbose")) { dump(variables, nel, nelr); } dealloc<double>(areas); dealloc<int>(elements_surrounding_elements); dealloc<double>(normals); dealloc<double>(variables); dealloc<double>(old_variables); dealloc<double>(fluxes); dealloc<double>(step_factors); char atts[1024]; sprintf(atts, "numelements:%d", nel); resultDB.AddResult("cfd_double_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("cfd_double_transfer_time", atts, "sec", transferTime); resultDB.AddResult("cfd_double_parity", atts, "N", transferTime / kernelTime); resultDB.AddOverall("Time", "sec", kernelTime+transferTime); }
35f5c829fe5bd06bde4198036a0131435884dc20.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper // #include <cutil.h> #include <iostream> #include <fstream> #include "cudacommon.h" #include "ResultDatabase.h" #include "OptionParser.h" #define SEED 7 #if CUDART_VERSION < 3000 struct double3 { double x, y, z; }; #endif /* * Options * */ #define GAMMA 1.4 #define iterations 2000 #ifndef block_length #define block_length 128 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2 #define deg_angle_of_attack 0.0 /* * not options */ #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) float kernelTime = 0.0f; float transferTime = 0.0f; cudaEvent_t start, stop; float elapsed; /* * Generic functions */ template <typename T> T* alloc(int N) { T* t; CUDA_SAFE_CALL(cudaMalloc((void**)&t, sizeof(T)*N)); return t; } template <typename T> void dealloc(T* array) { CUDA_SAFE_CALL(cudaFree((void*)array)); } template <typename T> void copy(T* dst, T* src, int N) { cudaEventRecord(start, 0); CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } template <typename T> void upload(T* dst, T* src, int N) { cudaEventRecord(start, 0); CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } template <typename T> void download(T* dst, T* src, int N) { cudaEventRecord(start, 0); CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } void dump(double* variables, int nel, int nelr) { double* h_variables = new double[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ double ff_variable[NVAR]; __constant__ double3 ff_flux_contribution_momentum_x[1]; __constant__ double3 ff_flux_contribution_momentum_y[1]; __constant__ double3 ff_flux_contribution_momentum_z[1]; __constant__ double3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, double* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, double* variables) { dim3 Dg(nelr / block_length), Db(block_length); cudaEventRecord(start, 0); cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } __device__ __host__ inline void compute_flux_contribution(double& density, double3& momentum, double& density_energy, double& pressure, double3& velocity, double3& fc_momentum_x, double3& fc_momentum_y, double3& fc_momentum_z, double3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; double de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(double& density, double3& momentum, double3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline double compute_speed_sqd(double3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline double compute_pressure(double& density, double& density_energy, double& speed_sqd) { return (double(GAMMA)-double(1.0))*(density_energy - double(0.5)*density*speed_sqd); } __device__ inline double compute_speed_of_sound(double& density, double& pressure) { return sqrt(double(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double density = variables[i + VAR_DENSITY*nelr]; double3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity; compute_velocity(density, momentum, velocity); double speed_sqd = compute_speed_sqd(velocity); double pressure = compute_pressure(density, density_energy, speed_sqd); double speed_of_sound = compute_speed_of_sound(density, pressure); // dt = double(0.5) * sqrt(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = double(0.5) / (sqrt(areas[i]) * (sqrt(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, double* variables, double* areas, double* step_factors) { dim3 Dg(nelr / block_length), Db(block_length); cudaEventRecord(start, 0); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } /* * * */ __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes) { const double smoothing_coefficient = double(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; double3 normal; double normal_len; double factor; double density_i = variables[i + VAR_DENSITY*nelr]; double3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; double density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; double3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); double speed_sqd_i = compute_speed_sqd(velocity_i); double speed_i = sqrt(speed_sqd_i); double pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); double speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); double3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; double3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); double flux_i_density = double(0.0); double3 flux_i_momentum; flux_i_momentum.x = double(0.0); flux_i_momentum.y = double(0.0); flux_i_momentum.z = double(0.0); double flux_i_density_energy = double(0.0); double3 velocity_nb; double density_nb, density_energy_nb; double3 momentum_nb; double3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; double3 flux_contribution_nb_density_energy; double speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrt(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*double(0.5)*(speed_i + sqrt(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = double(0.5)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = double(0.5)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = double(0.5)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = double(0.5)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, double* normals, double* variables, double* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); cudaEventRecord(start, 0); cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } __global__ void cuda_time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); double factor = step_factors[i]/double(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, double* old_variables, double* variables, double* step_factors, double* fluxes) { dim3 Dg(nelr / block_length), Db(block_length); cudaEventRecord(start, 0); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); kernelTime += elapsed * 1.e-3; CHECK_CUDA_ERROR(); } void addBenchmarkSpecOptions(OptionParser &op) { } void cfd(ResultDatabase &resultDB, OptionParser &op); void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { printf("Running CFDSolver (double)\n"); bool quiet = op.getOptionBool("quiet"); if(!quiet) { printf("WG size of %d\n", block_length); } cudaEventCreate(&start); cudaEventCreate(&stop); int passes = op.getOptionInt("passes"); for(int i = 0; i < passes; i++) { kernelTime = 0.0f; transferTime = 0.0f; if(!quiet) { printf("Pass %d:\n", i); } cfd(resultDB, op); if(!quiet) { printf("Done.\n"); } } } void cfd(ResultDatabase &resultDB, OptionParser &op) { // set far field conditions and load them into constant memory on the gpu { double h_ff_variable[NVAR]; const double angle_of_attack = double(3.1415926535897931 / 180.0) * double(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = double(1.4); double ff_pressure = double(1.0); double ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); double ff_speed = double(ff_mach)*ff_speed_of_sound; double3 ff_velocity; ff_velocity.x = ff_speed*double(cos((double)angle_of_attack)); ff_velocity.y = ff_speed*double(sin((double)angle_of_attack)); ff_velocity.z = 0.0; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(double(0.5)*(ff_speed*ff_speed)) + (ff_pressure / double(GAMMA-1.0)); double3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); double3 h_ff_flux_contribution_momentum_x; double3 h_ff_flux_contribution_momentum_y; double3 h_ff_flux_contribution_momentum_z; double3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu cudaEventRecord(start, 0); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(double)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(double3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(double3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(double3)) ); CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(double3)) ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); transferTime += elapsed * 1.e-3; } int nel; int nelr; // read in domain geometry double* areas; int* elements_surrounding_elements; double* normals; { string inputFile = op.getOptionString("inputFile"); std::ifstream file(inputFile.c_str()); if(inputFile != "") { file >> nel; } else { int problemSizes[4] = {97000, 200000, 1000000, 4000000}; nel = problemSizes[op.getOptionInt("size") - 1]; } nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length)); double* h_areas = new double[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; double* h_normals = new double[nelr*NDIM*NNB]; srand(SEED); // read in data for(int i = 0; i < nel; i++) { if(inputFile != "") { file >> h_areas[i]; } else { h_areas[i] = 1.0 * rand() / RAND_MAX; } for(int j = 0; j < NNB; j++) { if(inputFile != "") { file >> h_elements_surrounding_elements[i + j*nelr]; } else { int val = i + (rand() % 20) - 10; h_elements_surrounding_elements[i + j * nelr] = val; } if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering for(int k = 0; k < NDIM; k++) { if(inputFile != "") { file >> h_normals[i + (j + k*NNB)*nelr]; } else { h_normals[i + (j + k*NNB)*nelr] = 1.0 * rand() / RAND_MAX - 0.5; } h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<double>(nelr); upload<double>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<double>(nelr*NDIM*NNB); upload<double>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions double* variables = alloc<double>(nelr*NVAR); initialize_variables(nelr, variables); double* old_variables = alloc<double>(nelr*NVAR); double* fluxes = alloc<double>(nelr*NVAR); double* step_factors = alloc<double>(nelr); // make sure all memory is doublely allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(double)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaThreadSynchronize(); // these need to be computed the first time in order to compute time step // Begin iterations for(int i = 0; i < iterations; i++) { copy<double>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); CHECK_CUDA_ERROR(); for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); CHECK_CUDA_ERROR(); time_step(j, nelr, old_variables, variables, step_factors, fluxes); CHECK_CUDA_ERROR(); } } cudaThreadSynchronize(); if(op.getOptionBool("verbose")) { dump(variables, nel, nelr); } dealloc<double>(areas); dealloc<int>(elements_surrounding_elements); dealloc<double>(normals); dealloc<double>(variables); dealloc<double>(old_variables); dealloc<double>(fluxes); dealloc<double>(step_factors); char atts[1024]; sprintf(atts, "numelements:%d", nel); resultDB.AddResult("cfd_double_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("cfd_double_transfer_time", atts, "sec", transferTime); resultDB.AddResult("cfd_double_parity", atts, "N", transferTime / kernelTime); resultDB.AddOverall("Time", "sec", kernelTime+transferTime); }
a557a910a3298f9f83e79f3dc36b62629948e478.hip
// !!! This is a file automatically generated by hipify!!! // CUDA #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> using namespace std; const int N = 100; // const int BLOCK_data = 3; // const int THREAD_data = 10; // CUDA bool InitCUDA() { int deviceCount; // hipGetDeviceCount (&deviceCount); if (deviceCount == 0) { cout << "" << endl; return EXIT_FAILURE; } int i; for (i=0; i<deviceCount; i++) { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop,i)==hipSuccess) // { if (prop.major>=1) //cuda { break; } } } if (i==deviceCount) { cout << " CUDA " << endl; return EXIT_FAILURE; } hipSetDevice(i); // return EXIT_SUCCESS; } // __global__ static void Sum (int *data,int *result) { // const int tid = threadIdx.x; // const int bid = blockIdx.x; int sum = 0; // for (int i=bid*THREAD_data+tid; i<N; i+=BLOCK_data*THREAD_data) { sum += data[i]; } // result result[bid*THREAD_data+tid] = sum; } int main () { // CUDA if (InitCUDA()) { return EXIT_FAILURE; } cout << " CUDA " << endl << endl; // int *data = new int [N]; cout << ": " << endl; for (int i=0; i<N; i++) { data[i] = rand()%10; cout << data[i] << " "; if ((i+1)%10 == 0) cout << endl; } cout << endl; int *gpudata, *result; // hipMalloc ((void**)&gpudata, sizeof(int)*N); // hipMalloc ((void**)&result, sizeof(int)*BLOCK_data*THREAD_data); // hipMemcpy (gpudata, data, sizeof(int)*N, hipMemcpyHostToDevice); // kernel - hipLaunchKernelGGL(( Sum), dim3(BLOCK_data),dim3(THREAD_data),0, 0, gpudata,result); // int *sumArray = new int[THREAD_data*BLOCK_data]; // hipMemcpy (sumArray, result, sizeof(int)*THREAD_data*BLOCK_data, hipMemcpyDeviceToHost); // hipFree (gpudata); hipFree (result); // GPU int final_sum=0; for (int i=0; i<THREAD_data*BLOCK_data; i++) { final_sum += sumArray[i]; } cout << "GPU : " << final_sum << endl; // CPU final_sum = 0; for (int i=0; i<N; i++) { final_sum += data[i]; } cout << "CPU : " << final_sum << endl; return 0; }
a557a910a3298f9f83e79f3dc36b62629948e478.cu
// 相关 CUDA 库 #include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> using namespace std; const int N = 100; // 块数 const int BLOCK_data = 3; // 各块中的线程数 const int THREAD_data = 10; // CUDA初始化函数 bool InitCUDA() { int deviceCount; // 获取显示设备数 cudaGetDeviceCount (&deviceCount); if (deviceCount == 0) { cout << "找不到设备" << endl; return EXIT_FAILURE; } int i; for (i=0; i<deviceCount; i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop,i)==cudaSuccess) // 获取设备属性 { if (prop.major>=1) //cuda计算能力 { break; } } } if (i==deviceCount) { cout << "找不到支持 CUDA 计算的设备" << endl; return EXIT_FAILURE; } cudaSetDevice(i); // 选定使用的显示设备 return EXIT_SUCCESS; } // 此函数在主机端调用,设备端执行。 __global__ static void Sum (int *data,int *result) { // 取得线程号 const int tid = threadIdx.x; // 获得块号 const int bid = blockIdx.x; int sum = 0; // 有点像网格计算的思路 for (int i=bid*THREAD_data+tid; i<N; i+=BLOCK_data*THREAD_data) { sum += data[i]; } // result 数组存放各个线程的计算结果 result[bid*THREAD_data+tid] = sum; } int main () { // 初始化 CUDA 编译环境 if (InitCUDA()) { return EXIT_FAILURE; } cout << "成功建立 CUDA 计算环境" << endl << endl; // 建立,初始化,打印测试数组 int *data = new int [N]; cout << "测试矩阵: " << endl; for (int i=0; i<N; i++) { data[i] = rand()%10; cout << data[i] << " "; if ((i+1)%10 == 0) cout << endl; } cout << endl; int *gpudata, *result; // 在显存中为计算对象开辟空间 cudaMalloc ((void**)&gpudata, sizeof(int)*N); // 在显存中为结果对象开辟空间 cudaMalloc ((void**)&result, sizeof(int)*BLOCK_data*THREAD_data); // 将数组数据传输进显存 cudaMemcpy (gpudata, data, sizeof(int)*N, cudaMemcpyHostToDevice); // 调用 kernel 函数 - 此函数可以根据显存地址以及自身的块号,线程号处理数据。 Sum<<<BLOCK_data,THREAD_data,0>>> (gpudata,result); // 在内存中为计算对象开辟空间 int *sumArray = new int[THREAD_data*BLOCK_data]; // 从显存获取处理的结果 cudaMemcpy (sumArray, result, sizeof(int)*THREAD_data*BLOCK_data, cudaMemcpyDeviceToHost); // 释放显存 cudaFree (gpudata); cudaFree (result); // 计算 GPU 每个线程计算出来和的总和 int final_sum=0; for (int i=0; i<THREAD_data*BLOCK_data; i++) { final_sum += sumArray[i]; } cout << "GPU 求和结果为: " << final_sum << endl; // 使用 CPU 对矩阵进行求和并将结果对照 final_sum = 0; for (int i=0; i<N; i++) { final_sum += data[i]; } cout << "CPU 求和结果为: " << final_sum << endl; return 0; }
a5bdf657355691a15c5e4ca1930a42c651ef0636.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "gpu/image/imgExtract.hpp" #include "../deviceBuffer.hpp" #include "../deviceStream.hpp" #include "cuda/util.hpp" #include <hip/hip_runtime.h> #include <cassert> namespace VideoStitch { namespace Image { /** * This kernel extract a part of the content of the image @src at * offset (@offsetX, offsetY) with size (dstWidth x dstHeight) and writes it in into a packed buffer @dst. * @dst must be large enough to hold the dstWidth * dstHeight pixels. * On overflow, the source image wraps if hWrap is true. Else pixels are filled with 0. * 2D version: We assume that the @dst (but not the @src) image is divisible * by the block size on each dimension. */ __global__ void imgExtractFromKernel(uint32_t* __restrict__ dst, unsigned dstWidth, unsigned dstHeight, const uint32_t* __restrict__ src, unsigned srcWidth, unsigned srcHeight, int offsetX, int offsetY, bool hWrap) { int dstX = blockIdx.x * blockDim.x + threadIdx.x; int dstY = blockIdx.y * blockDim.y + threadIdx.y; int srcX = offsetX + dstX; int srcY = offsetY + dstY; uint32_t res = 0; if (dstX < dstWidth && dstY < dstHeight) { if (0 <= srcY && srcY < srcHeight) { if (hWrap) { if (0 <= srcX) { if (srcX < srcWidth) { res = src[srcWidth * srcY + srcX]; } else { res = src[srcWidth * srcY + (srcX % srcWidth)]; } } else { res = src[srcWidth * srcY + srcWidth + (srcX % srcWidth)]; // modulo has sign of dividend } } else if (0 <= srcX & srcX < srcWidth) { res = src[srcWidth * srcY + srcX]; } } dst[dstWidth * dstY + dstX] = res; } } Status imgExtractFrom(GPU::Buffer<uint32_t> dst, std::size_t dstWidth, std::size_t dstHeight, GPU::Buffer<const uint32_t> src, std::size_t srcWidth, std::size_t srcHeight, std::size_t offsetX, std::size_t offsetY, bool hWrap, GPU::Stream stream) { dim3 dimBlock(16, 16, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(dstWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(dstHeight, dimBlock.y), 1); hipLaunchKernelGGL(( imgExtractFromKernel), dim3(dimGrid), dim3(dimBlock), 0, stream.get(), dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY, hWrap); // wraps return CUDA_STATUS; } } // namespace Image } // namespace VideoStitch
a5bdf657355691a15c5e4ca1930a42c651ef0636.cu
// Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm #include "gpu/image/imgExtract.hpp" #include "../deviceBuffer.hpp" #include "../deviceStream.hpp" #include "cuda/util.hpp" #include <cuda_runtime.h> #include <cassert> namespace VideoStitch { namespace Image { /** * This kernel extract a part of the content of the image @src at * offset (@offsetX, offsetY) with size (dstWidth x dstHeight) and writes it in into a packed buffer @dst. * @dst must be large enough to hold the dstWidth * dstHeight pixels. * On overflow, the source image wraps if hWrap is true. Else pixels are filled with 0. * 2D version: We assume that the @dst (but not the @src) image is divisible * by the block size on each dimension. */ __global__ void imgExtractFromKernel(uint32_t* __restrict__ dst, unsigned dstWidth, unsigned dstHeight, const uint32_t* __restrict__ src, unsigned srcWidth, unsigned srcHeight, int offsetX, int offsetY, bool hWrap) { int dstX = blockIdx.x * blockDim.x + threadIdx.x; int dstY = blockIdx.y * blockDim.y + threadIdx.y; int srcX = offsetX + dstX; int srcY = offsetY + dstY; uint32_t res = 0; if (dstX < dstWidth && dstY < dstHeight) { if (0 <= srcY && srcY < srcHeight) { if (hWrap) { if (0 <= srcX) { if (srcX < srcWidth) { res = src[srcWidth * srcY + srcX]; } else { res = src[srcWidth * srcY + (srcX % srcWidth)]; } } else { res = src[srcWidth * srcY + srcWidth + (srcX % srcWidth)]; // modulo has sign of dividend } } else if (0 <= srcX & srcX < srcWidth) { res = src[srcWidth * srcY + srcX]; } } dst[dstWidth * dstY + dstX] = res; } } Status imgExtractFrom(GPU::Buffer<uint32_t> dst, std::size_t dstWidth, std::size_t dstHeight, GPU::Buffer<const uint32_t> src, std::size_t srcWidth, std::size_t srcHeight, std::size_t offsetX, std::size_t offsetY, bool hWrap, GPU::Stream stream) { dim3 dimBlock(16, 16, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(dstWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(dstHeight, dimBlock.y), 1); imgExtractFromKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), (unsigned)dstWidth, (unsigned)dstHeight, src.get(), (unsigned)srcWidth, (unsigned)srcHeight, (unsigned)offsetX, (unsigned)offsetY, hWrap); // wraps return CUDA_STATUS; } } // namespace Image } // namespace VideoStitch
c493e24f4caef6294b6e3f879b02fa2ddef99e29.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <limits> namespace at { namespace native { const char acos_name[] = "acos"; void acos_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR static const auto acos_string = jiterator_stringify( template <typename T> T acos(T a) { return std::acos(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "acos_name", [&]() { jitted_gpu_kernel< /*name=*/acos_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, acos_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "acos_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::acos(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "acos_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::acos(a); }); }); } } REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda); } // namespace native } // namespace at
c493e24f4caef6294b6e3f879b02fa2ddef99e29.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/OpMathType.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/UnaryOps.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <limits> namespace at { namespace native { const char acos_name[] = "acos"; void acos_kernel_cuda(TensorIteratorBase& iter) { auto common_dtype = iter.common_dtype(); if (at::isComplexType(common_dtype)) { #if AT_USE_JITERATOR static const auto acos_string = jiterator_stringify( template <typename T> T acos(T a) { return std::acos(a); }); AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "acos_name", [&]() { jitted_gpu_kernel< /*name=*/acos_name, /*return_dtype=*/scalar_t, /*common_dtype=*/scalar_t, /*arity=*/1>(iter, acos_string); }); #else AT_DISPATCH_COMPLEX_TYPES_AND( kComplexHalf, common_dtype, "acos_name", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; return ::acos(static_cast<opmath_t>(a)); }); }); #endif } else { AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, common_dtype, "acos_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::acos(a); }); }); } } REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda); } // namespace native } // namespace at
0f0e89566c01df0aa3b2e21e605ac589bb4bb40e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void res_calc_gpu( const float *x1, const float *x2, const float *q1, const float *q2, const float *adt1, const float *adt2, float *res1, float *res2) { float dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0] - x2[0]; dy = x1[1] - x2[1]; ri = 1.0f/q1[0]; p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2])); vol1 = ri*(q1[1]*dy - q1[2]*dx); ri = 1.0f/q2[0]; p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2])); vol2 = ri*(q2[1]*dy - q2[2]*dx); mu = 0.5f*((*adt1)+(*adt2))*eps; f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]); res1[0] += f; res2[0] -= f; f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]); res1[1] += f; res2[1] -= f; f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]); res1[2] += f; res2[2] -= f; f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const float *__restrict ind_arg2, float *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int *ind_map, short *arg_map, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg6_l[4]; float arg7_l[4]; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ float *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg3_size = ind_arg_sizes[0+blockId*1]; ind_arg3_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; //set shared memory pointers int nbytes = 0; ind_arg3_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3_s[n] = ZERO_float; } __syncthreads(); for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; int map3idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_float; } for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_float; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; map3idx = opDat2Map[n + offset_b + set_size * 1]; //user-supplied kernel call res_calc_gpu(ind_arg0+map0idx*2, ind_arg0+map1idx*2, ind_arg1+map2idx*4, ind_arg1+map3idx*4, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); col2 = colors[n+offset_b]; } //store local variables int arg6_map; int arg7_map; if (col2>=0) { arg6_map = arg_map[0*set_size+n+offset_b]; arg7_map = arg_map[1*set_size+n+offset_b]; } for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg6_l[0] += ind_arg3_s[0+arg6_map*4]; arg6_l[1] += ind_arg3_s[1+arg6_map*4]; arg6_l[2] += ind_arg3_s[2+arg6_map*4]; arg6_l[3] += ind_arg3_s[3+arg6_map*4]; arg7_l[0] += ind_arg3_s[0+arg7_map*4]; arg7_l[1] += ind_arg3_s[1+arg7_map*4]; arg7_l[2] += ind_arg3_s[2+arg7_map*4]; arg7_l[3] += ind_arg3_s[3+arg7_map*4]; ind_arg3_s[0+arg6_map*4] = arg6_l[0]; ind_arg3_s[1+arg6_map*4] = arg6_l[1]; ind_arg3_s[2+arg6_map*4] = arg6_l[2]; ind_arg3_s[3+arg6_map*4] = arg6_l[3]; ind_arg3_s[0+arg7_map*4] = arg7_l[0]; ind_arg3_s[1+arg7_map*4] = arg7_l[1]; ind_arg3_s[2+arg7_map*4] = arg7_l[2]; ind_arg3_s[3+arg7_map*4] = arg7_l[3]; } __syncthreads(); } } for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } //get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0, (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg4.data_d, (float *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, Plan->ind_map, Plan->loc_map, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
0f0e89566c01df0aa3b2e21e605ac589bb4bb40e.cu
// // auto-generated by op2.py // //user function __device__ void res_calc_gpu( const float *x1, const float *x2, const float *q1, const float *q2, const float *adt1, const float *adt2, float *res1, float *res2) { float dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0] - x2[0]; dy = x1[1] - x2[1]; ri = 1.0f/q1[0]; p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2])); vol1 = ri*(q1[1]*dy - q1[2]*dx); ri = 1.0f/q2[0]; p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2])); vol2 = ri*(q2[1]*dy - q2[2]*dx); mu = 0.5f*((*adt1)+(*adt2))*eps; f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]); res1[0] += f; res2[0] -= f; f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]); res1[1] += f; res2[1] -= f; f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]); res1[2] += f; res2[2] -= f; f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const float *__restrict ind_arg0, const float *__restrict ind_arg1, const float *__restrict ind_arg2, float *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int *ind_map, short *arg_map, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { float arg6_l[4]; float arg7_l[4]; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ float *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg3_size = ind_arg_sizes[0+blockId*1]; ind_arg3_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; //set shared memory pointers int nbytes = 0; ind_arg3_s = (float *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3_s[n] = ZERO_float; } __syncthreads(); for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; int map3idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_float; } for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_float; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; map3idx = opDat2Map[n + offset_b + set_size * 1]; //user-supplied kernel call res_calc_gpu(ind_arg0+map0idx*2, ind_arg0+map1idx*2, ind_arg1+map2idx*4, ind_arg1+map3idx*4, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); col2 = colors[n+offset_b]; } //store local variables int arg6_map; int arg7_map; if (col2>=0) { arg6_map = arg_map[0*set_size+n+offset_b]; arg7_map = arg_map[1*set_size+n+offset_b]; } for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg6_l[0] += ind_arg3_s[0+arg6_map*4]; arg6_l[1] += ind_arg3_s[1+arg6_map*4]; arg6_l[2] += ind_arg3_s[2+arg6_map*4]; arg6_l[3] += ind_arg3_s[3+arg6_map*4]; arg7_l[0] += ind_arg3_s[0+arg7_map*4]; arg7_l[1] += ind_arg3_s[1+arg7_map*4]; arg7_l[2] += ind_arg3_s[2+arg7_map*4]; arg7_l[3] += ind_arg3_s[3+arg7_map*4]; ind_arg3_s[0+arg6_map*4] = arg6_l[0]; ind_arg3_s[1+arg6_map*4] = arg6_l[1]; ind_arg3_s[2+arg6_map*4] = arg6_l[2]; ind_arg3_s[3+arg6_map*4] = arg6_l[3]; ind_arg3_s[0+arg7_map*4] = arg7_l[0]; ind_arg3_s[1+arg7_map*4] = arg7_l[1]; ind_arg3_s[2+arg7_map*4] = arg7_l[2]; ind_arg3_s[3+arg7_map*4] = arg7_l[3]; } __syncthreads(); } } for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } //get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; op_cuda_res_calc<<<nblocks,nthread,nshared>>>( (float *)arg0.data_d, (float *)arg2.data_d, (float *)arg4.data_d, (float *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, Plan->ind_map, Plan->loc_map, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
d88d80f15be1d88e0d3542f9e365d1f01372e050.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "update_bins.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *vec = NULL; hipMalloc(&vec, XSIZE*YSIZE); int *bin = NULL; hipMalloc(&bin, XSIZE*YSIZE); int *bin_counters = NULL; hipMalloc(&bin_counters, XSIZE*YSIZE); const int num_bins = 1; const int n = 1; const float slope = 1; const float intercept = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( update_bins), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,bin,bin_counters,num_bins,n,slope,intercept); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( update_bins), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,bin,bin_counters,num_bins,n,slope,intercept); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( update_bins), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,bin,bin_counters,num_bins,n,slope,intercept); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d88d80f15be1d88e0d3542f9e365d1f01372e050.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "update_bins.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *vec = NULL; cudaMalloc(&vec, XSIZE*YSIZE); int *bin = NULL; cudaMalloc(&bin, XSIZE*YSIZE); int *bin_counters = NULL; cudaMalloc(&bin_counters, XSIZE*YSIZE); const int num_bins = 1; const int n = 1; const float slope = 1; const float intercept = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); update_bins<<<gridBlock,threadBlock>>>(vec,bin,bin_counters,num_bins,n,slope,intercept); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { update_bins<<<gridBlock,threadBlock>>>(vec,bin,bin_counters,num_bins,n,slope,intercept); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { update_bins<<<gridBlock,threadBlock>>>(vec,bin,bin_counters,num_bins,n,slope,intercept); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9e1649c8c2927a64aa5ee566de534ace8de7ed99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "ten_tusscher_2006.h" __constant__ size_t pitch; extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { size_t pitch_h; log_info("Using ten Tusscher 2006 GPU model\n"); uint32_t num_volumes = solver->original_num_cells; // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1) / BLOCK_SIZE; size_t size = num_volumes * sizeof(real); check_cuda_error(hipMallocPitch((void **)&(solver->sv), &pitch_h, size, (size_t)NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); real *initial_conditions = NULL; real *initial_conditions_device = NULL; if(solver->ode_extra_data) { initial_conditions = (real *)solver->ode_extra_data; check_cuda_error(hipMalloc((void **)&initial_conditions_device, solver->extra_data_size)); check_cuda_error(hipMemcpy(initial_conditions_device, initial_conditions, solver->extra_data_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( kernel_set_model_inital_conditions), dim3(GRID), dim3(BLOCK_SIZE), 0, 0, solver->sv, initial_conditions_device, num_volumes); check_cuda_error(hipPeekAtLastError()); hipDeviceSynchronize(); check_cuda_error(hipFree(initial_conditions_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES(solve_model_odes_gpu) { size_t num_cells_to_solve = ode_solver->num_cells_to_solve; uint32_t * cells_to_solve = ode_solver->cells_to_solve; real *sv = ode_solver->sv; real dt = ode_solver->min_dt; uint32_t num_steps = ode_solver->num_steps; // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1) / BLOCK_SIZE; size_t stim_currents_size = sizeof(real) * num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t) * num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **)&stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); // the array cells to solve is passed when we are using and adaptive mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **)&cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu), dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error(hipPeekAtLastError()); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, real *initial_conditions, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { if(initial_conditions == NULL) { *((real *)((char *)sv + pitch * 0) + threadID) = -85.23f; // V; millivolt *((real *)((char *)sv + pitch * 1) + threadID) = 0.00621; // Xr1; dimensionless *((real *)((char *)sv + pitch * 2) + threadID) = 0.4712; // Xr2; dimensionless *((real *)((char *)sv + pitch * 3) + threadID) = 0.0095; // Xs; dimensionless *((real *)((char *)sv + pitch * 4) + threadID) = 0.00172; // m; dimensionless *((real *)((char *)sv + pitch * 5) + threadID) = 0.7444; // h; dimensionless *((real *)((char *)sv + pitch * 6) + threadID) = 0.7045; // j; dimensionless *((real *)((char *)sv + pitch * 7) + threadID) = 3.373e-5; // d; dimensionless *((real *)((char *)sv + pitch * 8) + threadID) = 0.7888; // f; dimensionless *((real *)((char *)sv + pitch * 9) + threadID) = 0.9755; // f2; dimensionless *((real *)((char *)sv + pitch * 10) + threadID) = 0.9953; // fCass; dimensionless *((real *)((char *)sv + pitch * 11) + threadID) = 0.999998; // s; dimensionless *((real *)((char *)sv + pitch * 12) + threadID) = 2.42e-8; // r; dimensionless *((real *)((char *)sv + pitch * 13) + threadID) = 0.000126; // Ca_i; millimolar *((real *)((char *)sv + pitch * 14) + threadID) = 3.64; // Ca_SR; millimolar *((real *)((char *)sv + pitch * 15) + threadID) = 0.00036; // Ca_ss; millimolar *((real *)((char *)sv + pitch * 16) + threadID) = 0.9073; // R_prime; dimensionless *((real *)((char *)sv + pitch * 17) + threadID) = 8.604; // Na_i; millimolar *((real *)((char *)sv + pitch * 18) + threadID) = 136.89; // K_i; millimolar } else { *((real *)((char *)sv + pitch * 0) + threadID) = initial_conditions[0]; // V; millivolt *((real *)((char *)sv + pitch * 1) + threadID) = initial_conditions[1]; // Xr1; dimensionless *((real *)((char *)sv + pitch * 2) + threadID) = initial_conditions[2]; // Xr2; dimensionless *((real *)((char *)sv + pitch * 3) + threadID) = initial_conditions[3]; // Xs; dimensionless *((real *)((char *)sv + pitch * 4) + threadID) = initial_conditions[4]; // m; dimensionless *((real *)((char *)sv + pitch * 5) + threadID) = initial_conditions[5]; // h; dimensionless *((real *)((char *)sv + pitch * 6) + threadID) = initial_conditions[6]; // j; dimensionless *((real *)((char *)sv + pitch * 7) + threadID) = initial_conditions[7]; // d; dimensionless *((real *)((char *)sv + pitch * 8) + threadID) = initial_conditions[8]; // f; dimensionless *((real *)((char *)sv + pitch * 9) + threadID) = initial_conditions[9]; // f2; dimensionless *((real *)((char *)sv + pitch * 10) + threadID) = initial_conditions[10]; // fCass; dimensionless *((real *)((char *)sv + pitch * 11) + threadID) = initial_conditions[11]; // s; dimensionless *((real *)((char *)sv + pitch * 12) + threadID) = initial_conditions[12]; // r; dimensionless *((real *)((char *)sv + pitch * 13) + threadID) = initial_conditions[13]; // Ca_i; millimolar *((real *)((char *)sv + pitch * 14) + threadID) = initial_conditions[14]; // Ca_SR; millimolar *((real *)((char *)sv + pitch * 15) + threadID) = initial_conditions[15]; // Ca_ss; millimolar *((real *)((char *)sv + pitch * 16) + threadID) = initial_conditions[16]; // R_prime; dimensionless *((real *)((char *)sv + pitch * 17) + threadID) = initial_conditions[17]; // Na_i; millimolar *((real *)((char *)sv + pitch * 18) + threadID) = initial_conditions[18]; // K_i; millimolar } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real *stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for(int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real *)((char *)sv) + sv_id) = dt * rDY[0] + *((real *)((char *)sv) + sv_id); for(int i = 1; i < 13; i++) { *((real *)((char *)sv + pitch * i) + sv_id) = rDY[i]; } for(int i = 13; i < 19; i++) { *((real *)((char *)sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *)((char *)sv + pitch * i) + sv_id); } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables const real V = *((real *)((char *)sv_ + pitch * 0) + threadID_); // Membrane variable const real Xr1 = *((real *)((char *)sv_ + pitch * 1) + threadID_); // Rapid time dependent potassium current Xr1 const real Xr2 = *((real *)((char *)sv_ + pitch * 2) + threadID_); // Rapid time dependent potassium current Xr2 const real Xs = *((real *)((char *)sv_ + pitch * 3) + threadID_); // Slow time dependent potassium current Xs const real m = *((real *)((char *)sv_ + pitch * 4) + threadID_); // Fast sodium current m const real h = *((real *)((char *)sv_ + pitch * 5) + threadID_); // Fast sodium current h gate const real j = *((real *)((char *)sv_ + pitch * 6) + threadID_); // Fast sodium current j gate const real d = *((real *)((char *)sv_ + pitch * 7) + threadID_); // L type Ca current d gate const real f = *((real *)((char *)sv_ + pitch * 8) + threadID_); ; // var_L_type_Ca_current_f_gate__f const real f2 = *((real *)((char *)sv_ + pitch * 9) + threadID_); // var_L_type_Ca_current_f2_gate__f2 const real fCass = *((real *)((char *)sv_ + pitch * 10) + threadID_); // L_type_Ca_current__fCass const real s = *((real *)((char *)sv_ + pitch * 11) + threadID_); // gating s const real r = *((real *)((char *)sv_ + pitch * 12) + threadID_); // gating r const real Ca_i = *((real *)((char *)sv_ + pitch * 13) + threadID_); // calcium_dynamics__Ca_i const real Ca_SR = *((real *)((char *)sv_ + pitch * 14) + threadID_); const real Ca_ss = *((real *)((char *)sv_ + pitch * 15) + threadID_); const real R_prime = *((real *)((char *)sv_ + pitch * 16) + threadID_); const real Na_i = *((real *)((char *)sv_ + pitch * 17) + threadID_); // var_sodium_dynamics__Na_i const real K_i = *((real *)((char *)sv_ + pitch * 18) + threadID_); // var_potassium_dynamics__K_i // Some constants const real R = 8314.472; const real T = 310.0; const real F = 96485.3415f; const real Cm = 0.185; const real V_c = 0.016404; const real Ko = 5.4; const real Nao = 140.0; const real Cao = 2.0; const real P_kna = 0.03; const real K_mk = 1.0; const real P_NaK = 2.724; const real K_mNa = 40.0; const real K_pCa = 0.0005; // Calcium dynamics const real V_rel = 0.102; const real k1_prime = 0.15; const real max_sr = 2.5; const real min_sr = 1.0; const real EC = 1.5; const real Vmax_up = 0.006375; // NCX consts const real alpha = 2.5; const real gamma = 0.35; const real K_sat = 0.1; const real Km_Ca = 1.38; const real Km_Nai = 87.5; const real K_NaCa = 1000.0; const real g_to = 0.294; const real g_Kr = 0.153; const real g_Ks = 0.098; const real g_CaL = 3.98e-05; const real g_Na = 14.838; const real g_pK = 0.0146; const real g_bca = 0.000592; const real g_pCa = 0.1238; const real g_K1 = 5.405; const real g_bna = 0.00029; // Calculations real EK = ((R * T) / F) * log(Ko / K_i); real EKs = ((R * T) / F) * log((Ko + (P_kna * Nao)) / (K_i + (P_kna * Na_i))); real ENa = ((R * T) / F) * log(Nao / Na_i); real ECa = ((0.5f * R * T) / F) * log(Cao / Ca_i); real beta_K1 = ((3.0f * exp(0.0002f * ((V - EK) + 100.0f))) + exp(0.1f * ((V - EK) - 10.0f))) / (1.0f + exp((-0.5f) * (V - EK))); real alpha_K1 = 0.1f / (1.0f + exp(0.06f * ((V - EK) - 200.0f))); real xK1_inf = alpha_K1 / (alpha_K1 + beta_K1); real IK1 = g_K1 * xK1_inf * (V - EK); real Ito = g_to * r * s * (V - EK); real IKr = g_Kr * Xr1 * Xr2 * (V - EK) * sqrt(Ko / 5.4f); real IKs = g_Ks * pow(Xs, 2.0f) * (V - EKs); real IpK = (g_pK * (V - EK)) / (1.0f + exp((25.0f - V) / 5.98f)); real ICaL = (V < 15.0f - 1.0e-5f || V > 15.0f + 1.0e-5f) ? ((((g_CaL * d * f * f2 * fCass * 4.0f * (V - 15.0f) * pow(F, 2.0f)) / (R * T)) * ((0.25f * Ca_ss * exp((2.0f * (V - 15.0f) * F) / (R * T))) - Cao)) / (exp((2.0f * (V - 15.0f) * F) / (R * T)) - 1.0f)) : g_CaL * d * f * f2 * fCass * 2.0f * F * (0.25f * Ca_ss - Cao); real IbCa = g_bca * (V - ECa); real IpCa = (g_pCa * Ca_i) / (Ca_i + K_pCa); real INaK = ((((P_NaK * Ko) / (Ko + K_mk)) * Na_i) / (Na_i + K_mNa)) / (1.0f + (0.1245f * exp(((-0.1f) * V * F) / (R * T))) + (0.0353f * exp(((-V) * F) / (R * T)))); real INa = g_Na * pow(m, 3.0f) * h * j * (V - ENa); real IbNa = g_bna * (V - ENa); real INaCa = (K_NaCa * ((exp((gamma * V * F) / (R * T)) * pow(Na_i, 3.0f) * Cao) - (exp(((gamma - 1.0f) * V * F) / (R * T)) * pow(Nao, 3) * Ca_i * alpha))) / ((pow(Km_Nai, 3.0f) + pow(Nao, 3.0f)) * (Km_Ca + Cao) * (1.0f + (K_sat * exp(((gamma - 1.0f) * V * F) / (R * T))))); // Stimulus // real var_membrane__i_Stim = ((time>=stim_start)&&(time<=stim_start+stim_dur)) ? stim_current: 0.0f; real var_membrane__i_Stim = stim_current; real xr1_inf = 1.0f / (1.0f + exp(((-26.0f) - V) / 7.0f)); real alpha_xr1 = 450.0f / (1.0f + exp(((-45.0f) - V) / 10.0f)); real beta_xr1 = 6.0f / (1.0f + exp((V + 30.0f) / 11.5f)); real tau_xr1 = 1.0f * alpha_xr1 * beta_xr1; real xr2_inf = 1.0f / (1.0f + exp((V + 88.0f) / 24.0f)); real alpha_xr2 = 3.0f / (1.0f + exp(((-60.0f) - V) / 20.0f)); real beta_xr2 = 1.12f / (1.0f + exp((V - 60.0f) / 20.0f)); real tau_xr2 = 1.0f * alpha_xr2 * beta_xr2; real xs_inf = 1.0f / (1.0f + exp(((-5.0f) - V) / 14.0f)); real alpha_xs = 1400.0f / sqrt(1.0f + exp((5.0f - V) / 6.0f)); real beta_xs = 1.0f / (1.0f + exp((V - 35.0f) / 15.0f)); real tau_xs = (1.0f * alpha_xs * beta_xs) + 80.0f; real m_inf = 1.0f / pow(1.0f + exp(((-56.86f) - V) / 9.03f), 2.0f); real alpha_m = 1.0f / (1.0f + exp(((-60.0f) - V) / 5.0f)); real beta_m = (0.1f / (1.0f + exp((V + 35.0f) / 5.0f))) + (0.1f / (1.0f + exp((V - 50.0f) / 200.0f))); real tau_m = 1.0f * alpha_m * beta_m; real h_inf = 1.0f / pow(1.0f + exp((V + 71.55f) / 7.43f), 2.0f); real alpha_h = (V < (-40.0f)) ? (0.057f * exp((-(V + 80.0f)) / 6.8f)) : 0.0f; real beta_h = (V < (-40.0f)) ? ((2.7f * exp(0.079f * V)) + (310000.0f * exp(0.3485f * V))) : (0.77f / (0.13f * (1.0f + exp((V + 10.66f) / (-11.1f))))); real tau_h = 1.0f / (alpha_h + beta_h); real j_inf = 1.0f / pow(1.0f + exp((V + 71.55f) / 7.43f), 2.0f); real alpha_j = (V < (-40.0f)) ? ((((((-25428.0f) * exp(0.2444f * V)) - (6.948e-06f * exp((-0.04391f) * V))) * (V + 37.78f)) / 1.0f) / (1.0f + exp(0.311f * (V + 79.23f)))) : 0.0f; real beta_j = (V < (-40.0f)) ? ((0.02424f * exp((-0.01052f) * V)) / (1.0f + exp((-0.1378f) * (V + 40.14f)))) : ((0.6f * exp(0.057f * V)) / (1.0f + exp((-0.1f) * (V + 32.0f)))); real tau_j = 1.0f / (alpha_j + beta_j); real d_inf = 1.0f / (1.0f + exp(((-8.0f) - V) / 7.5f)); real alpha_d = (1.4f / (1.0f + exp(((-35.0f) - V) / 13.0f))) + 0.25f; real beta_d = 1.4f / (1.0f + exp((V + 5.0f) / 5.0f)); real gamma_d = 1.0f / (1.0f + exp((50.0f - V) / 20.0f)); real tau_d = (1.0f * alpha_d * beta_d) + gamma_d; real f_inf = 1.0f / (1.0f + exp((V + 20.0f) / 7.0f)); real tau_f = (1102.5f * exp((-pow(V + 27.0f, 2.0f)) / 225.0f)) + (200.0f / (1.0f + exp((13.0f - V) / 10.0f))) + (180.0f / (1.0f + exp((V + 30.0f) / 10.0f))) + 20.0f; real f2_inf = (0.67f / (1.0f + exp((V + 35.0f) / 7.0f))) + 0.33f; real tau_f2 = (562.0f * exp((-pow(V + 27.0f, 2.0f)) / 240.0f)) + (31.0f / (1.0f + exp((25.0f - V) / 10.0f))) + (80.0f / (1.0f + exp((V + 30.0f) / 10.0f))); real fCass_inf = (0.6f / (1.0f + pow(Ca_ss / 0.05f, 2.0f))) + 0.4f; real tau_fCass = (80.0f / (1.0f + pow(Ca_ss / 0.05f, 2.0f))) + 2.0f; real s_inf = 1.0f / (1.0f + exp((V + 20.0f) / 5.0f)); real tau_s = (85.0f * exp((-pow(V + 45.0f, 2.0f)) / 320.0f)) + (5.0f / (1.0f + exp((V - 20.0f) / 5.0f))) + 3.0f; real r_inf = 1.0f / (1.0f + exp((20.0f - V) / 6.0f)); real tau_r = (9.5f * exp((-pow(V + 40.0f, 2.0f)) / 1800.0f)) + 0.8f; real kcasr = max_sr - ((max_sr - min_sr) / (1.0f + pow(EC / Ca_SR, 2.0f))); real k1 = k1_prime / kcasr; const real k3 = 0.06; real var_calcium_dynamics__O = (k1 * pow(Ca_ss, 2.0f) * R_prime) / (k3 + (k1 * pow(Ca_ss, 2.0f))); real Irel = V_rel * var_calcium_dynamics__O * (Ca_SR - Ca_ss); const real var_calcium_dynamics__K_up = 0.00025; real var_calcium_dynamics__i_up = Vmax_up / (1.0f + (pow(var_calcium_dynamics__K_up, 2.0f) / pow(Ca_i, 2.0f))); real var_calcium_dynamics__V_leak = 0.00036f; real var_calcium_dynamics__i_leak = var_calcium_dynamics__V_leak * (Ca_SR - Ca_i); const real var_calcium_dynamics__V_xfer = 0.0038f; real var_calcium_dynamics__i_xfer = var_calcium_dynamics__V_xfer * (Ca_ss - Ca_i); const real var_calcium_dynamics__k2_prime = 0.045f; real var_calcium_dynamics__k2 = var_calcium_dynamics__k2_prime * kcasr; const real var_calcium_dynamics__k4 = 0.005f; const real var_calcium_dynamics__Buf_c = 0.2f; const real var_calcium_dynamics__K_buf_c = 0.001f; real Ca_i_bufc = 1.0f / (1.0f + ((var_calcium_dynamics__Buf_c * var_calcium_dynamics__K_buf_c) / pow(Ca_i + var_calcium_dynamics__K_buf_c, 2))); const real var_calcium_dynamics__K_buf_sr = 0.3f; const real var_calcium_dynamics__Buf_sr = 10.0f; real var_calcium_dynamics__Ca_sr_bufsr = 1.0f / (1.0f + ((var_calcium_dynamics__Buf_sr * var_calcium_dynamics__K_buf_sr) / pow(Ca_SR + var_calcium_dynamics__K_buf_sr, 2))); const real var_calcium_dynamics__Buf_ss = 0.4f; const real var_calcium_dynamics__K_buf_ss = 0.00025f; real Ca_ss_bufss = 1.0f / (1.0f + ((var_calcium_dynamics__Buf_ss * var_calcium_dynamics__K_buf_ss) / pow(Ca_ss + var_calcium_dynamics__K_buf_ss, 2.0f))); const real var_calcium_dynamics__V_sr = 0.001094f; const real var_calcium_dynamics__V_ss = 5.468e-05f; real var_calcium_dynamics__V_c = V_c; real var_calcium_dynamics__F = F; real var_calcium_dynamics__Cm = Cm; real var_calcium_dynamics__ICaL = ICaL; real var_calcium_dynamics__INaCa = INaCa; real var_calcium_dynamics__IpCa = IpCa; real var_calcium_dynamics__IbCa = IbCa; real d_dt_V = -(IK1 + Ito + IKr + IKs + ICaL + INaK + INa + IbNa + INaCa + IbCa + IpK + IpCa + var_membrane__i_Stim); real d_dt_R_prime = ((-var_calcium_dynamics__k2) * Ca_ss * R_prime) + (var_calcium_dynamics__k4 * (1.0f - R_prime)); real d_dt_Ca_i = Ca_i_bufc * (((((var_calcium_dynamics__i_leak - var_calcium_dynamics__i_up) * var_calcium_dynamics__V_sr) / var_calcium_dynamics__V_c) + var_calcium_dynamics__i_xfer) - ((((var_calcium_dynamics__IbCa + var_calcium_dynamics__IpCa) - (2.0f * var_calcium_dynamics__INaCa)) * var_calcium_dynamics__Cm) / (2.0f * var_calcium_dynamics__V_c * var_calcium_dynamics__F))); real d_dt_Ca_SR = var_calcium_dynamics__Ca_sr_bufsr * (var_calcium_dynamics__i_up - (Irel + var_calcium_dynamics__i_leak)); real d_dt_Ca_ss = Ca_ss_bufss * (((((-var_calcium_dynamics__ICaL) * var_calcium_dynamics__Cm) / (2.0f * var_calcium_dynamics__V_ss * var_calcium_dynamics__F)) + ((Irel * var_calcium_dynamics__V_sr) / var_calcium_dynamics__V_ss)) - ((var_calcium_dynamics__i_xfer * var_calcium_dynamics__V_c) / var_calcium_dynamics__V_ss)); real d_dt_Na_i = ((-(INa + IbNa + (3.0f * INaK) + (3.0f * INaCa))) / (V_c * F)) * Cm; real d_dt_K_i = ((-((IK1 + Ito + IKr + IKs + IpK + var_membrane__i_Stim) - (2.0f * INaK))) / (V_c * F)) * Cm; rDY_[0] = d_dt_V; rDY_[13] = d_dt_Ca_i; rDY_[14] = d_dt_Ca_SR; rDY_[15] = d_dt_Ca_ss; rDY_[16] = d_dt_R_prime; rDY_[17] = d_dt_Na_i; rDY_[18] = d_dt_K_i; // Rush Larsen rDY_[1] = xr1_inf + (Xr1 - xr1_inf) * exp(-dt / tau_xr1); rDY_[2] = xr2_inf + (Xr2 - xr2_inf) * exp(-dt / tau_xr2); rDY_[3] = xs_inf + (Xs - xs_inf) * exp(-dt / tau_xs); rDY_[4] = m_inf + (m - m_inf) * exp(-dt / tau_m); rDY_[5] = h_inf + (h - h_inf) * exp(-dt / tau_h); rDY_[6] = j_inf + (j - j_inf) * exp(-dt / tau_j); rDY_[7] = d_inf + (d - d_inf) * exp(-dt / tau_d); rDY_[8] = f_inf + (f - f_inf) * exp(-dt / tau_f); rDY_[9] = f2_inf + (f2 - f2_inf) * exp(-dt / tau_f2); rDY_[10] = fCass_inf + (fCass - fCass_inf) * exp(-dt / tau_fCass); rDY_[11] = s_inf + (s - s_inf) * exp(-dt / tau_s); rDY_[12] = r_inf + (r - r_inf) * exp(-dt / tau_r); }
9e1649c8c2927a64aa5ee566de534ace8de7ed99.cu
#include <stddef.h> #include <stdint.h> #include "ten_tusscher_2006.h" __constant__ size_t pitch; extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { size_t pitch_h; log_info("Using ten Tusscher 2006 GPU model\n"); uint32_t num_volumes = solver->original_num_cells; // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1) / BLOCK_SIZE; size_t size = num_volumes * sizeof(real); check_cuda_error(cudaMallocPitch((void **)&(solver->sv), &pitch_h, size, (size_t)NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); real *initial_conditions = NULL; real *initial_conditions_device = NULL; if(solver->ode_extra_data) { initial_conditions = (real *)solver->ode_extra_data; check_cuda_error(cudaMalloc((void **)&initial_conditions_device, solver->extra_data_size)); check_cuda_error(cudaMemcpy(initial_conditions_device, initial_conditions, solver->extra_data_size, cudaMemcpyHostToDevice)); } kernel_set_model_inital_conditions<<<GRID, BLOCK_SIZE>>>(solver->sv, initial_conditions_device, num_volumes); check_cuda_error(cudaPeekAtLastError()); cudaDeviceSynchronize(); check_cuda_error(cudaFree(initial_conditions_device)); return pitch_h; } extern "C" SOLVE_MODEL_ODES(solve_model_odes_gpu) { size_t num_cells_to_solve = ode_solver->num_cells_to_solve; uint32_t * cells_to_solve = ode_solver->cells_to_solve; real *sv = ode_solver->sv; real dt = ode_solver->min_dt; uint32_t num_steps = ode_solver->num_steps; // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1) / BLOCK_SIZE; size_t stim_currents_size = sizeof(real) * num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t) * num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **)&stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); // the array cells to solve is passed when we are using and adaptive mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **)&cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu<<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error(cudaPeekAtLastError()); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, real *initial_conditions, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { if(initial_conditions == NULL) { *((real *)((char *)sv + pitch * 0) + threadID) = -85.23f; // V; millivolt *((real *)((char *)sv + pitch * 1) + threadID) = 0.00621; // Xr1; dimensionless *((real *)((char *)sv + pitch * 2) + threadID) = 0.4712; // Xr2; dimensionless *((real *)((char *)sv + pitch * 3) + threadID) = 0.0095; // Xs; dimensionless *((real *)((char *)sv + pitch * 4) + threadID) = 0.00172; // m; dimensionless *((real *)((char *)sv + pitch * 5) + threadID) = 0.7444; // h; dimensionless *((real *)((char *)sv + pitch * 6) + threadID) = 0.7045; // j; dimensionless *((real *)((char *)sv + pitch * 7) + threadID) = 3.373e-5; // d; dimensionless *((real *)((char *)sv + pitch * 8) + threadID) = 0.7888; // f; dimensionless *((real *)((char *)sv + pitch * 9) + threadID) = 0.9755; // f2; dimensionless *((real *)((char *)sv + pitch * 10) + threadID) = 0.9953; // fCass; dimensionless *((real *)((char *)sv + pitch * 11) + threadID) = 0.999998; // s; dimensionless *((real *)((char *)sv + pitch * 12) + threadID) = 2.42e-8; // r; dimensionless *((real *)((char *)sv + pitch * 13) + threadID) = 0.000126; // Ca_i; millimolar *((real *)((char *)sv + pitch * 14) + threadID) = 3.64; // Ca_SR; millimolar *((real *)((char *)sv + pitch * 15) + threadID) = 0.00036; // Ca_ss; millimolar *((real *)((char *)sv + pitch * 16) + threadID) = 0.9073; // R_prime; dimensionless *((real *)((char *)sv + pitch * 17) + threadID) = 8.604; // Na_i; millimolar *((real *)((char *)sv + pitch * 18) + threadID) = 136.89; // K_i; millimolar } else { *((real *)((char *)sv + pitch * 0) + threadID) = initial_conditions[0]; // V; millivolt *((real *)((char *)sv + pitch * 1) + threadID) = initial_conditions[1]; // Xr1; dimensionless *((real *)((char *)sv + pitch * 2) + threadID) = initial_conditions[2]; // Xr2; dimensionless *((real *)((char *)sv + pitch * 3) + threadID) = initial_conditions[3]; // Xs; dimensionless *((real *)((char *)sv + pitch * 4) + threadID) = initial_conditions[4]; // m; dimensionless *((real *)((char *)sv + pitch * 5) + threadID) = initial_conditions[5]; // h; dimensionless *((real *)((char *)sv + pitch * 6) + threadID) = initial_conditions[6]; // j; dimensionless *((real *)((char *)sv + pitch * 7) + threadID) = initial_conditions[7]; // d; dimensionless *((real *)((char *)sv + pitch * 8) + threadID) = initial_conditions[8]; // f; dimensionless *((real *)((char *)sv + pitch * 9) + threadID) = initial_conditions[9]; // f2; dimensionless *((real *)((char *)sv + pitch * 10) + threadID) = initial_conditions[10]; // fCass; dimensionless *((real *)((char *)sv + pitch * 11) + threadID) = initial_conditions[11]; // s; dimensionless *((real *)((char *)sv + pitch * 12) + threadID) = initial_conditions[12]; // r; dimensionless *((real *)((char *)sv + pitch * 13) + threadID) = initial_conditions[13]; // Ca_i; millimolar *((real *)((char *)sv + pitch * 14) + threadID) = initial_conditions[14]; // Ca_SR; millimolar *((real *)((char *)sv + pitch * 15) + threadID) = initial_conditions[15]; // Ca_ss; millimolar *((real *)((char *)sv + pitch * 16) + threadID) = initial_conditions[16]; // R_prime; dimensionless *((real *)((char *)sv + pitch * 17) + threadID) = initial_conditions[17]; // Na_i; millimolar *((real *)((char *)sv + pitch * 18) + threadID) = initial_conditions[18]; // K_i; millimolar } } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real *stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for(int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real *)((char *)sv) + sv_id) = dt * rDY[0] + *((real *)((char *)sv) + sv_id); for(int i = 1; i < 13; i++) { *((real *)((char *)sv + pitch * i) + sv_id) = rDY[i]; } for(int i = 13; i < 19; i++) { *((real *)((char *)sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *)((char *)sv + pitch * i) + sv_id); } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, int threadID_, real dt) { // State variables const real V = *((real *)((char *)sv_ + pitch * 0) + threadID_); // Membrane variable const real Xr1 = *((real *)((char *)sv_ + pitch * 1) + threadID_); // Rapid time dependent potassium current Xr1 const real Xr2 = *((real *)((char *)sv_ + pitch * 2) + threadID_); // Rapid time dependent potassium current Xr2 const real Xs = *((real *)((char *)sv_ + pitch * 3) + threadID_); // Slow time dependent potassium current Xs const real m = *((real *)((char *)sv_ + pitch * 4) + threadID_); // Fast sodium current m const real h = *((real *)((char *)sv_ + pitch * 5) + threadID_); // Fast sodium current h gate const real j = *((real *)((char *)sv_ + pitch * 6) + threadID_); // Fast sodium current j gate const real d = *((real *)((char *)sv_ + pitch * 7) + threadID_); // L type Ca current d gate const real f = *((real *)((char *)sv_ + pitch * 8) + threadID_); ; // var_L_type_Ca_current_f_gate__f const real f2 = *((real *)((char *)sv_ + pitch * 9) + threadID_); // var_L_type_Ca_current_f2_gate__f2 const real fCass = *((real *)((char *)sv_ + pitch * 10) + threadID_); // L_type_Ca_current__fCass const real s = *((real *)((char *)sv_ + pitch * 11) + threadID_); // gating s const real r = *((real *)((char *)sv_ + pitch * 12) + threadID_); // gating r const real Ca_i = *((real *)((char *)sv_ + pitch * 13) + threadID_); // calcium_dynamics__Ca_i const real Ca_SR = *((real *)((char *)sv_ + pitch * 14) + threadID_); const real Ca_ss = *((real *)((char *)sv_ + pitch * 15) + threadID_); const real R_prime = *((real *)((char *)sv_ + pitch * 16) + threadID_); const real Na_i = *((real *)((char *)sv_ + pitch * 17) + threadID_); // var_sodium_dynamics__Na_i const real K_i = *((real *)((char *)sv_ + pitch * 18) + threadID_); // var_potassium_dynamics__K_i // Some constants const real R = 8314.472; const real T = 310.0; const real F = 96485.3415f; const real Cm = 0.185; const real V_c = 0.016404; const real Ko = 5.4; const real Nao = 140.0; const real Cao = 2.0; const real P_kna = 0.03; const real K_mk = 1.0; const real P_NaK = 2.724; const real K_mNa = 40.0; const real K_pCa = 0.0005; // Calcium dynamics const real V_rel = 0.102; const real k1_prime = 0.15; const real max_sr = 2.5; const real min_sr = 1.0; const real EC = 1.5; const real Vmax_up = 0.006375; // NCX consts const real alpha = 2.5; const real gamma = 0.35; const real K_sat = 0.1; const real Km_Ca = 1.38; const real Km_Nai = 87.5; const real K_NaCa = 1000.0; const real g_to = 0.294; const real g_Kr = 0.153; const real g_Ks = 0.098; const real g_CaL = 3.98e-05; const real g_Na = 14.838; const real g_pK = 0.0146; const real g_bca = 0.000592; const real g_pCa = 0.1238; const real g_K1 = 5.405; const real g_bna = 0.00029; // Calculations real EK = ((R * T) / F) * log(Ko / K_i); real EKs = ((R * T) / F) * log((Ko + (P_kna * Nao)) / (K_i + (P_kna * Na_i))); real ENa = ((R * T) / F) * log(Nao / Na_i); real ECa = ((0.5f * R * T) / F) * log(Cao / Ca_i); real beta_K1 = ((3.0f * exp(0.0002f * ((V - EK) + 100.0f))) + exp(0.1f * ((V - EK) - 10.0f))) / (1.0f + exp((-0.5f) * (V - EK))); real alpha_K1 = 0.1f / (1.0f + exp(0.06f * ((V - EK) - 200.0f))); real xK1_inf = alpha_K1 / (alpha_K1 + beta_K1); real IK1 = g_K1 * xK1_inf * (V - EK); real Ito = g_to * r * s * (V - EK); real IKr = g_Kr * Xr1 * Xr2 * (V - EK) * sqrt(Ko / 5.4f); real IKs = g_Ks * pow(Xs, 2.0f) * (V - EKs); real IpK = (g_pK * (V - EK)) / (1.0f + exp((25.0f - V) / 5.98f)); real ICaL = (V < 15.0f - 1.0e-5f || V > 15.0f + 1.0e-5f) ? ((((g_CaL * d * f * f2 * fCass * 4.0f * (V - 15.0f) * pow(F, 2.0f)) / (R * T)) * ((0.25f * Ca_ss * exp((2.0f * (V - 15.0f) * F) / (R * T))) - Cao)) / (exp((2.0f * (V - 15.0f) * F) / (R * T)) - 1.0f)) : g_CaL * d * f * f2 * fCass * 2.0f * F * (0.25f * Ca_ss - Cao); real IbCa = g_bca * (V - ECa); real IpCa = (g_pCa * Ca_i) / (Ca_i + K_pCa); real INaK = ((((P_NaK * Ko) / (Ko + K_mk)) * Na_i) / (Na_i + K_mNa)) / (1.0f + (0.1245f * exp(((-0.1f) * V * F) / (R * T))) + (0.0353f * exp(((-V) * F) / (R * T)))); real INa = g_Na * pow(m, 3.0f) * h * j * (V - ENa); real IbNa = g_bna * (V - ENa); real INaCa = (K_NaCa * ((exp((gamma * V * F) / (R * T)) * pow(Na_i, 3.0f) * Cao) - (exp(((gamma - 1.0f) * V * F) / (R * T)) * pow(Nao, 3) * Ca_i * alpha))) / ((pow(Km_Nai, 3.0f) + pow(Nao, 3.0f)) * (Km_Ca + Cao) * (1.0f + (K_sat * exp(((gamma - 1.0f) * V * F) / (R * T))))); // Stimulus // real var_membrane__i_Stim = ((time>=stim_start)&&(time<=stim_start+stim_dur)) ? stim_current: 0.0f; real var_membrane__i_Stim = stim_current; real xr1_inf = 1.0f / (1.0f + exp(((-26.0f) - V) / 7.0f)); real alpha_xr1 = 450.0f / (1.0f + exp(((-45.0f) - V) / 10.0f)); real beta_xr1 = 6.0f / (1.0f + exp((V + 30.0f) / 11.5f)); real tau_xr1 = 1.0f * alpha_xr1 * beta_xr1; real xr2_inf = 1.0f / (1.0f + exp((V + 88.0f) / 24.0f)); real alpha_xr2 = 3.0f / (1.0f + exp(((-60.0f) - V) / 20.0f)); real beta_xr2 = 1.12f / (1.0f + exp((V - 60.0f) / 20.0f)); real tau_xr2 = 1.0f * alpha_xr2 * beta_xr2; real xs_inf = 1.0f / (1.0f + exp(((-5.0f) - V) / 14.0f)); real alpha_xs = 1400.0f / sqrt(1.0f + exp((5.0f - V) / 6.0f)); real beta_xs = 1.0f / (1.0f + exp((V - 35.0f) / 15.0f)); real tau_xs = (1.0f * alpha_xs * beta_xs) + 80.0f; real m_inf = 1.0f / pow(1.0f + exp(((-56.86f) - V) / 9.03f), 2.0f); real alpha_m = 1.0f / (1.0f + exp(((-60.0f) - V) / 5.0f)); real beta_m = (0.1f / (1.0f + exp((V + 35.0f) / 5.0f))) + (0.1f / (1.0f + exp((V - 50.0f) / 200.0f))); real tau_m = 1.0f * alpha_m * beta_m; real h_inf = 1.0f / pow(1.0f + exp((V + 71.55f) / 7.43f), 2.0f); real alpha_h = (V < (-40.0f)) ? (0.057f * exp((-(V + 80.0f)) / 6.8f)) : 0.0f; real beta_h = (V < (-40.0f)) ? ((2.7f * exp(0.079f * V)) + (310000.0f * exp(0.3485f * V))) : (0.77f / (0.13f * (1.0f + exp((V + 10.66f) / (-11.1f))))); real tau_h = 1.0f / (alpha_h + beta_h); real j_inf = 1.0f / pow(1.0f + exp((V + 71.55f) / 7.43f), 2.0f); real alpha_j = (V < (-40.0f)) ? ((((((-25428.0f) * exp(0.2444f * V)) - (6.948e-06f * exp((-0.04391f) * V))) * (V + 37.78f)) / 1.0f) / (1.0f + exp(0.311f * (V + 79.23f)))) : 0.0f; real beta_j = (V < (-40.0f)) ? ((0.02424f * exp((-0.01052f) * V)) / (1.0f + exp((-0.1378f) * (V + 40.14f)))) : ((0.6f * exp(0.057f * V)) / (1.0f + exp((-0.1f) * (V + 32.0f)))); real tau_j = 1.0f / (alpha_j + beta_j); real d_inf = 1.0f / (1.0f + exp(((-8.0f) - V) / 7.5f)); real alpha_d = (1.4f / (1.0f + exp(((-35.0f) - V) / 13.0f))) + 0.25f; real beta_d = 1.4f / (1.0f + exp((V + 5.0f) / 5.0f)); real gamma_d = 1.0f / (1.0f + exp((50.0f - V) / 20.0f)); real tau_d = (1.0f * alpha_d * beta_d) + gamma_d; real f_inf = 1.0f / (1.0f + exp((V + 20.0f) / 7.0f)); real tau_f = (1102.5f * exp((-pow(V + 27.0f, 2.0f)) / 225.0f)) + (200.0f / (1.0f + exp((13.0f - V) / 10.0f))) + (180.0f / (1.0f + exp((V + 30.0f) / 10.0f))) + 20.0f; real f2_inf = (0.67f / (1.0f + exp((V + 35.0f) / 7.0f))) + 0.33f; real tau_f2 = (562.0f * exp((-pow(V + 27.0f, 2.0f)) / 240.0f)) + (31.0f / (1.0f + exp((25.0f - V) / 10.0f))) + (80.0f / (1.0f + exp((V + 30.0f) / 10.0f))); real fCass_inf = (0.6f / (1.0f + pow(Ca_ss / 0.05f, 2.0f))) + 0.4f; real tau_fCass = (80.0f / (1.0f + pow(Ca_ss / 0.05f, 2.0f))) + 2.0f; real s_inf = 1.0f / (1.0f + exp((V + 20.0f) / 5.0f)); real tau_s = (85.0f * exp((-pow(V + 45.0f, 2.0f)) / 320.0f)) + (5.0f / (1.0f + exp((V - 20.0f) / 5.0f))) + 3.0f; real r_inf = 1.0f / (1.0f + exp((20.0f - V) / 6.0f)); real tau_r = (9.5f * exp((-pow(V + 40.0f, 2.0f)) / 1800.0f)) + 0.8f; real kcasr = max_sr - ((max_sr - min_sr) / (1.0f + pow(EC / Ca_SR, 2.0f))); real k1 = k1_prime / kcasr; const real k3 = 0.06; real var_calcium_dynamics__O = (k1 * pow(Ca_ss, 2.0f) * R_prime) / (k3 + (k1 * pow(Ca_ss, 2.0f))); real Irel = V_rel * var_calcium_dynamics__O * (Ca_SR - Ca_ss); const real var_calcium_dynamics__K_up = 0.00025; real var_calcium_dynamics__i_up = Vmax_up / (1.0f + (pow(var_calcium_dynamics__K_up, 2.0f) / pow(Ca_i, 2.0f))); real var_calcium_dynamics__V_leak = 0.00036f; real var_calcium_dynamics__i_leak = var_calcium_dynamics__V_leak * (Ca_SR - Ca_i); const real var_calcium_dynamics__V_xfer = 0.0038f; real var_calcium_dynamics__i_xfer = var_calcium_dynamics__V_xfer * (Ca_ss - Ca_i); const real var_calcium_dynamics__k2_prime = 0.045f; real var_calcium_dynamics__k2 = var_calcium_dynamics__k2_prime * kcasr; const real var_calcium_dynamics__k4 = 0.005f; const real var_calcium_dynamics__Buf_c = 0.2f; const real var_calcium_dynamics__K_buf_c = 0.001f; real Ca_i_bufc = 1.0f / (1.0f + ((var_calcium_dynamics__Buf_c * var_calcium_dynamics__K_buf_c) / pow(Ca_i + var_calcium_dynamics__K_buf_c, 2))); const real var_calcium_dynamics__K_buf_sr = 0.3f; const real var_calcium_dynamics__Buf_sr = 10.0f; real var_calcium_dynamics__Ca_sr_bufsr = 1.0f / (1.0f + ((var_calcium_dynamics__Buf_sr * var_calcium_dynamics__K_buf_sr) / pow(Ca_SR + var_calcium_dynamics__K_buf_sr, 2))); const real var_calcium_dynamics__Buf_ss = 0.4f; const real var_calcium_dynamics__K_buf_ss = 0.00025f; real Ca_ss_bufss = 1.0f / (1.0f + ((var_calcium_dynamics__Buf_ss * var_calcium_dynamics__K_buf_ss) / pow(Ca_ss + var_calcium_dynamics__K_buf_ss, 2.0f))); const real var_calcium_dynamics__V_sr = 0.001094f; const real var_calcium_dynamics__V_ss = 5.468e-05f; real var_calcium_dynamics__V_c = V_c; real var_calcium_dynamics__F = F; real var_calcium_dynamics__Cm = Cm; real var_calcium_dynamics__ICaL = ICaL; real var_calcium_dynamics__INaCa = INaCa; real var_calcium_dynamics__IpCa = IpCa; real var_calcium_dynamics__IbCa = IbCa; real d_dt_V = -(IK1 + Ito + IKr + IKs + ICaL + INaK + INa + IbNa + INaCa + IbCa + IpK + IpCa + var_membrane__i_Stim); real d_dt_R_prime = ((-var_calcium_dynamics__k2) * Ca_ss * R_prime) + (var_calcium_dynamics__k4 * (1.0f - R_prime)); real d_dt_Ca_i = Ca_i_bufc * (((((var_calcium_dynamics__i_leak - var_calcium_dynamics__i_up) * var_calcium_dynamics__V_sr) / var_calcium_dynamics__V_c) + var_calcium_dynamics__i_xfer) - ((((var_calcium_dynamics__IbCa + var_calcium_dynamics__IpCa) - (2.0f * var_calcium_dynamics__INaCa)) * var_calcium_dynamics__Cm) / (2.0f * var_calcium_dynamics__V_c * var_calcium_dynamics__F))); real d_dt_Ca_SR = var_calcium_dynamics__Ca_sr_bufsr * (var_calcium_dynamics__i_up - (Irel + var_calcium_dynamics__i_leak)); real d_dt_Ca_ss = Ca_ss_bufss * (((((-var_calcium_dynamics__ICaL) * var_calcium_dynamics__Cm) / (2.0f * var_calcium_dynamics__V_ss * var_calcium_dynamics__F)) + ((Irel * var_calcium_dynamics__V_sr) / var_calcium_dynamics__V_ss)) - ((var_calcium_dynamics__i_xfer * var_calcium_dynamics__V_c) / var_calcium_dynamics__V_ss)); real d_dt_Na_i = ((-(INa + IbNa + (3.0f * INaK) + (3.0f * INaCa))) / (V_c * F)) * Cm; real d_dt_K_i = ((-((IK1 + Ito + IKr + IKs + IpK + var_membrane__i_Stim) - (2.0f * INaK))) / (V_c * F)) * Cm; rDY_[0] = d_dt_V; rDY_[13] = d_dt_Ca_i; rDY_[14] = d_dt_Ca_SR; rDY_[15] = d_dt_Ca_ss; rDY_[16] = d_dt_R_prime; rDY_[17] = d_dt_Na_i; rDY_[18] = d_dt_K_i; // Rush Larsen rDY_[1] = xr1_inf + (Xr1 - xr1_inf) * exp(-dt / tau_xr1); rDY_[2] = xr2_inf + (Xr2 - xr2_inf) * exp(-dt / tau_xr2); rDY_[3] = xs_inf + (Xs - xs_inf) * exp(-dt / tau_xs); rDY_[4] = m_inf + (m - m_inf) * exp(-dt / tau_m); rDY_[5] = h_inf + (h - h_inf) * exp(-dt / tau_h); rDY_[6] = j_inf + (j - j_inf) * exp(-dt / tau_j); rDY_[7] = d_inf + (d - d_inf) * exp(-dt / tau_d); rDY_[8] = f_inf + (f - f_inf) * exp(-dt / tau_f); rDY_[9] = f2_inf + (f2 - f2_inf) * exp(-dt / tau_f2); rDY_[10] = fCass_inf + (fCass - fCass_inf) * exp(-dt / tau_fCass); rDY_[11] = s_inf + (s - s_inf) * exp(-dt / tau_s); rDY_[12] = r_inf + (r - r_inf) * exp(-dt / tau_r); }
fa62d0814fca603552a990f7f92505d0401c139f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #include <iostream> #define SEGMENT_LENGTH 256 #define BLOCK_SIZE 256 __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < len){ out[idx] = in1[idx] + in2[idx]; } } int myMin(int a, int b){ if(a < b){ return a; } return b; } int ceil(int a, int b){ return (a + b - 1) / b; } int main(int argc, char ** argv) { wbArg_t args; int inputLength; float * hostInput1; float * hostInput2; float * deviceInput1; float * deviceInput2; float * deviceOutput; float* pinnedHostInput1; float* pinnedHostInput2; float* pinnedHostOutput; hipStream_t stream0, stream1, stream2, stream3; hipStreamCreate( &stream0); hipStreamCreate( &stream1); hipStreamCreate( &stream2); hipStreamCreate( &stream3); args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength); wbTime_stop(Generic, "Importing data and creating memory on host"); // allocate page-locked memory on CPU hipHostMalloc((void **)&pinnedHostInput1, sizeof(float) * inputLength); hipHostMalloc((void **)&pinnedHostInput2, sizeof(float) * inputLength); hipHostMalloc((void **)&pinnedHostOutput, sizeof(float) * inputLength); // memcpy input memcpy(pinnedHostInput1, hostInput1, sizeof(float) * inputLength); memcpy(pinnedHostInput2, hostInput2, sizeof(float) * inputLength); // 1. Allocate memory on GPU hipMalloc((void**) &deviceInput1, sizeof(float) * 4 * SEGMENT_LENGTH); hipMalloc((void**) &deviceInput2, sizeof(float) * 4 * SEGMENT_LENGTH); hipMalloc((void**) &deviceOutput, sizeof(float) * 4 * SEGMENT_LENGTH); dim3 DimGrid(ceil(SEGMENT_LENGTH, BLOCK_SIZE), 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); // 2. do computation, Breadth First Kernel Issue for(int index = 0; index < inputLength; index += 4 * SEGMENT_LENGTH){ int currentPtr1 = index; int currentPtr2 = currentPtr1 + SEGMENT_LENGTH; int currentPtr3 = currentPtr2 + SEGMENT_LENGTH; int currentPtr4 = currentPtr3 + SEGMENT_LENGTH; int length1 = 0, length2 = 0, length3 = 0, length4 = 0; // copy data if(currentPtr1 < inputLength){ length1 = myMin(SEGMENT_LENGTH, inputLength - currentPtr1); hipMemcpyAsync(&deviceInput1[0], &pinnedHostInput1[currentPtr1], sizeof(float) * length1, hipMemcpyHostToDevice, stream0); hipMemcpyAsync(&deviceInput2[0], &pinnedHostInput2[currentPtr1], sizeof(float) * length1, hipMemcpyHostToDevice, stream0); } if(currentPtr2 < inputLength){ length2 = myMin(SEGMENT_LENGTH, inputLength - currentPtr2); hipMemcpyAsync(&deviceInput1[SEGMENT_LENGTH], &pinnedHostInput1[currentPtr2], sizeof(float) * length2, hipMemcpyHostToDevice, stream1); hipMemcpyAsync(&deviceInput2[SEGMENT_LENGTH], &pinnedHostInput2[currentPtr2], sizeof(float) * length2, hipMemcpyHostToDevice, stream1); } if(currentPtr3 < inputLength){ length3 = myMin(SEGMENT_LENGTH, inputLength - currentPtr3); hipMemcpyAsync(&deviceInput1[SEGMENT_LENGTH * 2], &pinnedHostInput1[currentPtr3], sizeof(float) * length3, hipMemcpyHostToDevice, stream2); hipMemcpyAsync(&deviceInput2[SEGMENT_LENGTH * 2], &pinnedHostInput2[currentPtr3], sizeof(float) * length3, hipMemcpyHostToDevice, stream2); } if(currentPtr4 < inputLength){ length4 = myMin(SEGMENT_LENGTH, inputLength - currentPtr4); hipMemcpyAsync(&deviceInput1[SEGMENT_LENGTH * 3], &pinnedHostInput1[currentPtr4], sizeof(float) * length4, hipMemcpyHostToDevice, stream3); hipMemcpyAsync(&deviceInput2[SEGMENT_LENGTH * 3], &pinnedHostInput2[currentPtr4], sizeof(float) * length4, hipMemcpyHostToDevice, stream3); } // do calculation if(currentPtr1 < inputLength){ hipLaunchKernelGGL(( vecAdd), dim3(DimGrid), dim3(DimBlock), 0, stream0, &deviceInput1[0], &deviceInput2[0], &deviceOutput[0], length1); } if(currentPtr2 < inputLength){ hipLaunchKernelGGL(( vecAdd), dim3(DimGrid), dim3(DimBlock), 0, stream1, &deviceInput1[SEGMENT_LENGTH], &deviceInput2[SEGMENT_LENGTH], &deviceOutput[SEGMENT_LENGTH], length2); } if(currentPtr3 < inputLength){ hipLaunchKernelGGL(( vecAdd), dim3(DimGrid), dim3(DimBlock), 0, stream2, &deviceInput1[SEGMENT_LENGTH * 2], &deviceInput2[SEGMENT_LENGTH * 2], &deviceOutput[SEGMENT_LENGTH * 2], length3); } if(currentPtr4 < inputLength){ hipLaunchKernelGGL(( vecAdd), dim3(DimGrid), dim3(DimBlock), 0, stream3, &deviceInput1[SEGMENT_LENGTH * 3], &deviceInput2[SEGMENT_LENGTH * 3], &deviceOutput[SEGMENT_LENGTH * 3], length4); } // do memory copy from device to host if(currentPtr1 < inputLength){ hipMemcpyAsync(&pinnedHostOutput[currentPtr1], &deviceOutput[0], sizeof(float) * length1, hipMemcpyDeviceToHost, stream0); } if(currentPtr2 < inputLength){ hipMemcpyAsync(&pinnedHostOutput[currentPtr2], &deviceOutput[SEGMENT_LENGTH], sizeof(float) * length2, hipMemcpyDeviceToHost, stream1); } if(currentPtr3 < inputLength){ hipMemcpyAsync(&pinnedHostOutput[currentPtr3], &deviceOutput[SEGMENT_LENGTH * 2], sizeof(float) * length3, hipMemcpyDeviceToHost, stream2); } if(currentPtr4 < inputLength){ hipMemcpyAsync(&pinnedHostOutput[currentPtr4], &deviceOutput[SEGMENT_LENGTH * 3], sizeof(float) * length4, hipMemcpyDeviceToHost, stream3); } } hipDeviceSynchronize(); std::cout<<"check hostoutput"<<std::endl; for(int index = 0; index < myMin(10, inputLength); index++){ std::cout<<pinnedHostOutput[index]<<", "; } std::cout<<std::endl; wbSolution(args, pinnedHostOutput, inputLength); // free GPU memory hipFree(deviceInput1); hipFree(deviceInput2); hipFree(deviceOutput); // free page-locked memory hipHostFree(pinnedHostInput1); hipHostFree(pinnedHostInput2); hipHostFree(pinnedHostOutput); // free pageable memory free(hostInput1); free(hostInput2); return 0; }
fa62d0814fca603552a990f7f92505d0401c139f.cu
#include <wb.h> #include <iostream> #define SEGMENT_LENGTH 256 #define BLOCK_SIZE 256 __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < len){ out[idx] = in1[idx] + in2[idx]; } } int myMin(int a, int b){ if(a < b){ return a; } return b; } int ceil(int a, int b){ return (a + b - 1) / b; } int main(int argc, char ** argv) { wbArg_t args; int inputLength; float * hostInput1; float * hostInput2; float * deviceInput1; float * deviceInput2; float * deviceOutput; float* pinnedHostInput1; float* pinnedHostInput2; float* pinnedHostOutput; cudaStream_t stream0, stream1, stream2, stream3; cudaStreamCreate( &stream0); cudaStreamCreate( &stream1); cudaStreamCreate( &stream2); cudaStreamCreate( &stream3); args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength); hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength); wbTime_stop(Generic, "Importing data and creating memory on host"); // allocate page-locked memory on CPU cudaMallocHost((void **)&pinnedHostInput1, sizeof(float) * inputLength); cudaMallocHost((void **)&pinnedHostInput2, sizeof(float) * inputLength); cudaMallocHost((void **)&pinnedHostOutput, sizeof(float) * inputLength); // memcpy input memcpy(pinnedHostInput1, hostInput1, sizeof(float) * inputLength); memcpy(pinnedHostInput2, hostInput2, sizeof(float) * inputLength); // 1. Allocate memory on GPU cudaMalloc((void**) &deviceInput1, sizeof(float) * 4 * SEGMENT_LENGTH); cudaMalloc((void**) &deviceInput2, sizeof(float) * 4 * SEGMENT_LENGTH); cudaMalloc((void**) &deviceOutput, sizeof(float) * 4 * SEGMENT_LENGTH); dim3 DimGrid(ceil(SEGMENT_LENGTH, BLOCK_SIZE), 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); // 2. do computation, Breadth First Kernel Issue for(int index = 0; index < inputLength; index += 4 * SEGMENT_LENGTH){ int currentPtr1 = index; int currentPtr2 = currentPtr1 + SEGMENT_LENGTH; int currentPtr3 = currentPtr2 + SEGMENT_LENGTH; int currentPtr4 = currentPtr3 + SEGMENT_LENGTH; int length1 = 0, length2 = 0, length3 = 0, length4 = 0; // copy data if(currentPtr1 < inputLength){ length1 = myMin(SEGMENT_LENGTH, inputLength - currentPtr1); cudaMemcpyAsync(&deviceInput1[0], &pinnedHostInput1[currentPtr1], sizeof(float) * length1, cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(&deviceInput2[0], &pinnedHostInput2[currentPtr1], sizeof(float) * length1, cudaMemcpyHostToDevice, stream0); } if(currentPtr2 < inputLength){ length2 = myMin(SEGMENT_LENGTH, inputLength - currentPtr2); cudaMemcpyAsync(&deviceInput1[SEGMENT_LENGTH], &pinnedHostInput1[currentPtr2], sizeof(float) * length2, cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(&deviceInput2[SEGMENT_LENGTH], &pinnedHostInput2[currentPtr2], sizeof(float) * length2, cudaMemcpyHostToDevice, stream1); } if(currentPtr3 < inputLength){ length3 = myMin(SEGMENT_LENGTH, inputLength - currentPtr3); cudaMemcpyAsync(&deviceInput1[SEGMENT_LENGTH * 2], &pinnedHostInput1[currentPtr3], sizeof(float) * length3, cudaMemcpyHostToDevice, stream2); cudaMemcpyAsync(&deviceInput2[SEGMENT_LENGTH * 2], &pinnedHostInput2[currentPtr3], sizeof(float) * length3, cudaMemcpyHostToDevice, stream2); } if(currentPtr4 < inputLength){ length4 = myMin(SEGMENT_LENGTH, inputLength - currentPtr4); cudaMemcpyAsync(&deviceInput1[SEGMENT_LENGTH * 3], &pinnedHostInput1[currentPtr4], sizeof(float) * length4, cudaMemcpyHostToDevice, stream3); cudaMemcpyAsync(&deviceInput2[SEGMENT_LENGTH * 3], &pinnedHostInput2[currentPtr4], sizeof(float) * length4, cudaMemcpyHostToDevice, stream3); } // do calculation if(currentPtr1 < inputLength){ vecAdd<<<DimGrid, DimBlock, 0, stream0>>>(&deviceInput1[0], &deviceInput2[0], &deviceOutput[0], length1); } if(currentPtr2 < inputLength){ vecAdd<<<DimGrid, DimBlock, 0, stream1>>>(&deviceInput1[SEGMENT_LENGTH], &deviceInput2[SEGMENT_LENGTH], &deviceOutput[SEGMENT_LENGTH], length2); } if(currentPtr3 < inputLength){ vecAdd<<<DimGrid, DimBlock, 0, stream2>>>(&deviceInput1[SEGMENT_LENGTH * 2], &deviceInput2[SEGMENT_LENGTH * 2], &deviceOutput[SEGMENT_LENGTH * 2], length3); } if(currentPtr4 < inputLength){ vecAdd<<<DimGrid, DimBlock, 0, stream3>>>(&deviceInput1[SEGMENT_LENGTH * 3], &deviceInput2[SEGMENT_LENGTH * 3], &deviceOutput[SEGMENT_LENGTH * 3], length4); } // do memory copy from device to host if(currentPtr1 < inputLength){ cudaMemcpyAsync(&pinnedHostOutput[currentPtr1], &deviceOutput[0], sizeof(float) * length1, cudaMemcpyDeviceToHost, stream0); } if(currentPtr2 < inputLength){ cudaMemcpyAsync(&pinnedHostOutput[currentPtr2], &deviceOutput[SEGMENT_LENGTH], sizeof(float) * length2, cudaMemcpyDeviceToHost, stream1); } if(currentPtr3 < inputLength){ cudaMemcpyAsync(&pinnedHostOutput[currentPtr3], &deviceOutput[SEGMENT_LENGTH * 2], sizeof(float) * length3, cudaMemcpyDeviceToHost, stream2); } if(currentPtr4 < inputLength){ cudaMemcpyAsync(&pinnedHostOutput[currentPtr4], &deviceOutput[SEGMENT_LENGTH * 3], sizeof(float) * length4, cudaMemcpyDeviceToHost, stream3); } } cudaDeviceSynchronize(); std::cout<<"check hostoutput"<<std::endl; for(int index = 0; index < myMin(10, inputLength); index++){ std::cout<<pinnedHostOutput[index]<<", "; } std::cout<<std::endl; wbSolution(args, pinnedHostOutput, inputLength); // free GPU memory cudaFree(deviceInput1); cudaFree(deviceInput2); cudaFree(deviceOutput); // free page-locked memory cudaFreeHost(pinnedHostInput1); cudaFreeHost(pinnedHostInput2); cudaFreeHost(pinnedHostOutput); // free pageable memory free(hostInput1); free(hostInput2); return 0; }
b6a8368c6c6cd65b0ea8ce64cf3da438c0beeab2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_tools.h" __global__ void moving_average ( const double *input, double *output, const size_t width, const uint *cols_heights, const uint window_width ) { // column uint gid = blockIdx.x * blockDim.x + threadIdx.x; uint idx = gid; if(gid >= width) { return; } uint col_height = cols_heights[gid]; unsigned long end = idx + col_height * width; uint i = 0; double lastSum = 0; double result = 0; // // Dla pierwszych window_width - 1 elementw [0; window_width - 1) // // Warto redniej kroczcej dla pierwsztch window_width elementw jest // wyznaczana tak, e dla i-tego elementu jest rwne rdniej arytmetycznej // od elementu o indeksie 0 do elementu o indeksie i+window_width while(i < window_width && idx < end) { lastSum += input[idx]; idx += width; i++; } i = 0; while(i < window_width && idx < end) { result = lastSum / ((double)(window_width + i)); output[idx - (window_width * width)] = result; double new_v = input[idx]; lastSum = lastSum + new_v; idx += width; i++; } // // Dla elementw o indeksie z przedziau [window_width; ilo_elementw - window_width] // double fwindow_width = (double)(2 * window_width); while(idx < end) { result = lastSum / fwindow_width; output[idx - (window_width * width)] = result; double new_v = input[idx]; double old = input[idx - (2 * window_width * width)]; lastSum = lastSum - old + new_v; idx += width; } // // Dla elementw o indeksie z przedziau (ilo_elementw - window_width; ilo_elementw] // // Warto redniej kroczcej dla i-tego elementu jest redni arytmetyczn elementw // o indeksach w przedziale (i; ilo_elementw) lastSum = 0.0f; idx -= 2 * window_width * width; while(idx < end) { lastSum += input[idx]; idx += width; } idx -= window_width * width; i = 2 * window_width; while(idx < end) { result = lastSum / ((double)(i)); output[idx] = result; double old = input[idx - window_width * width]; lastSum = lastSum - old; idx += width; i--; } } extern "C" void movingAverage( // centered moving average const double *h_input, double *h_output, const uint *h_cols, const size_t width, const size_t height, const uint window ) { // malloc double *d_input, *d_output; uint *d_cols; const size_t matrix_size = width * height * sizeof(double); const size_t cols_vec_size = width * sizeof(size_t); checkCudaErrors(hipMalloc((void**)&d_input, matrix_size)); checkCudaErrors(hipMalloc((void**)&d_output, matrix_size)); checkCudaErrors(hipMalloc((void**)&d_cols, cols_vec_size)); checkCudaErrors(hipMemcpy(d_input, h_input, matrix_size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_cols, h_cols, cols_vec_size, hipMemcpyHostToDevice)); const uint threadsPerBlock = BLOCK_DIM; const uint blocksPerGrid = calculateBlockNumber(width, BLOCK_DIM); hipLaunchKernelGGL(( moving_average), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_input, d_output, width, d_cols, window); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(h_output, d_output, matrix_size, hipMemcpyDeviceToHost)); hipFree(d_input); hipFree(d_output); hipFree(d_cols); }
b6a8368c6c6cd65b0ea8ce64cf3da438c0beeab2.cu
#include "cuda_tools.h" __global__ void moving_average ( const double *input, double *output, const size_t width, const uint *cols_heights, const uint window_width ) { // column uint gid = blockIdx.x * blockDim.x + threadIdx.x; uint idx = gid; if(gid >= width) { return; } uint col_height = cols_heights[gid]; unsigned long end = idx + col_height * width; uint i = 0; double lastSum = 0; double result = 0; // // Dla pierwszych window_width - 1 elementów [0; window_width - 1) // // Wartość średniej kroczącej dla pierwsztch window_width elementów jest // wyznaczana tak, że dla i-tego elementu jest równe śrędniej arytmetycznej // od elementu o indeksie 0 do elementu o indeksie i+window_width while(i < window_width && idx < end) { lastSum += input[idx]; idx += width; i++; } i = 0; while(i < window_width && idx < end) { result = lastSum / ((double)(window_width + i)); output[idx - (window_width * width)] = result; double new_v = input[idx]; lastSum = lastSum + new_v; idx += width; i++; } // // Dla elementów o indeksie z przedziału [window_width; ilość_elementów - window_width] // double fwindow_width = (double)(2 * window_width); while(idx < end) { result = lastSum / fwindow_width; output[idx - (window_width * width)] = result; double new_v = input[idx]; double old = input[idx - (2 * window_width * width)]; lastSum = lastSum - old + new_v; idx += width; } // // Dla elementów o indeksie z przedziału (ilość_elementów - window_width; ilość_elementów] // // Wartość średniej kroczącej dla i-tego elementu jest średnią arytmetyczną elementów // o indeksach w przedziale (i; ilość_elementów) lastSum = 0.0f; idx -= 2 * window_width * width; while(idx < end) { lastSum += input[idx]; idx += width; } idx -= window_width * width; i = 2 * window_width; while(idx < end) { result = lastSum / ((double)(i)); output[idx] = result; double old = input[idx - window_width * width]; lastSum = lastSum - old; idx += width; i--; } } extern "C" void movingAverage( // centered moving average const double *h_input, double *h_output, const uint *h_cols, const size_t width, const size_t height, const uint window ) { // malloc double *d_input, *d_output; uint *d_cols; const size_t matrix_size = width * height * sizeof(double); const size_t cols_vec_size = width * sizeof(size_t); checkCudaErrors(cudaMalloc((void**)&d_input, matrix_size)); checkCudaErrors(cudaMalloc((void**)&d_output, matrix_size)); checkCudaErrors(cudaMalloc((void**)&d_cols, cols_vec_size)); checkCudaErrors(cudaMemcpy(d_input, h_input, matrix_size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_cols, h_cols, cols_vec_size, cudaMemcpyHostToDevice)); const uint threadsPerBlock = BLOCK_DIM; const uint blocksPerGrid = calculateBlockNumber(width, BLOCK_DIM); moving_average<<<blocksPerGrid, threadsPerBlock>>>(d_input, d_output, width, d_cols, window); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(h_output, d_output, matrix_size, cudaMemcpyDeviceToHost)); cudaFree(d_input); cudaFree(d_output); cudaFree(d_cols); }
7e4a59e9e93a5eb78b62e94231632394701a09bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) // Compiler constant tile size #define TILE_WIDTH 32 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP // First, calculate my row and column int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Allocate the shared memory tiles, one for each input matrix // NOTE: this must match the block dimensions!! __shared__ float tileA[TILE_WIDTH][TILE_WIDTH]; __shared__ float tileB[TILE_WIDTH][TILE_WIDTH]; // Accumulate result in local variable float localC = 0.; // Loop over input tiles int numTiles = (numAColumns-1) / TILE_WIDTH + 1; for(int iTile = 0; iTile < numTiles; ++iTile){ // Load my element from tile A int tileColA = iTile*TILE_WIDTH + threadIdx.x; if(row < numARows && tileColA < numAColumns) tileA[threadIdx.y][threadIdx.x] = A[row*numAColumns + tileColA]; else tileA[threadIdx.y][threadIdx.x] = 0.; // Load my element from tile B int tileRowB = iTile*TILE_WIDTH + threadIdx.y; if(tileRowB < numBRows && col < numBColumns) tileB[threadIdx.y][threadIdx.x] = B[tileRowB*numBColumns + col]; else tileB[threadIdx.y][threadIdx.x] = 0.; // Synchronize all threads in this block __syncthreads(); // Partial calculation of dot-product within shared tiles. // Loop over the tile elements for(int i = 0; i < TILE_WIDTH; ++i){ localC += tileA[threadIdx.y][i] * tileB[i][threadIdx.x]; } // Need to synchronize again before loading next tile __syncthreads(); } // All tiles are finished. Write local result to global output. // Check output matrix boundary if(row < numCRows && col < numCColumns) C[row*numCColumns + col] = localC; } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = new float[numCRows*numCColumns]; wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here int sizeA = numARows * numAColumns * sizeof(float); int sizeB = numBRows * numBColumns * sizeof(float); int sizeC = numCRows * numCColumns * sizeof(float); wbCheck( hipMalloc((void**)&deviceA, sizeA) ); wbCheck( hipMalloc((void**)&deviceB, sizeB) ); wbCheck( hipMalloc((void**)&deviceC, sizeC) ); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck( hipMemcpy(deviceA, hostA, sizeA, hipMemcpyHostToDevice) ); wbCheck( hipMemcpy(deviceB, hostB, sizeB, hipMemcpyHostToDevice) ); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here // Choose block size dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1); // Determine grid size to finish the problem dim3 gridSize((numCColumns-1)/TILE_WIDTH + 1, (numCRows-1)/TILE_WIDTH + 1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(gridSize), dim3(blockSize), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck( hipMemcpy(hostC, deviceC, sizeC, hipMemcpyDeviceToHost) ); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbCheck( hipFree(deviceA) ); wbCheck( hipFree(deviceB) ); wbCheck( hipFree(deviceC) ); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
7e4a59e9e93a5eb78b62e94231632394701a09bf.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) // Compiler constant tile size #define TILE_WIDTH 32 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP // First, calculate my row and column int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Allocate the shared memory tiles, one for each input matrix // NOTE: this must match the block dimensions!! __shared__ float tileA[TILE_WIDTH][TILE_WIDTH]; __shared__ float tileB[TILE_WIDTH][TILE_WIDTH]; // Accumulate result in local variable float localC = 0.; // Loop over input tiles int numTiles = (numAColumns-1) / TILE_WIDTH + 1; for(int iTile = 0; iTile < numTiles; ++iTile){ // Load my element from tile A int tileColA = iTile*TILE_WIDTH + threadIdx.x; if(row < numARows && tileColA < numAColumns) tileA[threadIdx.y][threadIdx.x] = A[row*numAColumns + tileColA]; else tileA[threadIdx.y][threadIdx.x] = 0.; // Load my element from tile B int tileRowB = iTile*TILE_WIDTH + threadIdx.y; if(tileRowB < numBRows && col < numBColumns) tileB[threadIdx.y][threadIdx.x] = B[tileRowB*numBColumns + col]; else tileB[threadIdx.y][threadIdx.x] = 0.; // Synchronize all threads in this block __syncthreads(); // Partial calculation of dot-product within shared tiles. // Loop over the tile elements for(int i = 0; i < TILE_WIDTH; ++i){ localC += tileA[threadIdx.y][i] * tileB[i][threadIdx.x]; } // Need to synchronize again before loading next tile __syncthreads(); } // All tiles are finished. Write local result to global output. // Check output matrix boundary if(row < numCRows && col < numCColumns) C[row*numCColumns + col] = localC; } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = new float[numCRows*numCColumns]; wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here int sizeA = numARows * numAColumns * sizeof(float); int sizeB = numBRows * numBColumns * sizeof(float); int sizeC = numCRows * numCColumns * sizeof(float); wbCheck( cudaMalloc((void**)&deviceA, sizeA) ); wbCheck( cudaMalloc((void**)&deviceB, sizeB) ); wbCheck( cudaMalloc((void**)&deviceC, sizeC) ); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck( cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice) ); wbCheck( cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice) ); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here // Choose block size dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1); // Determine grid size to finish the problem dim3 gridSize((numCColumns-1)/TILE_WIDTH + 1, (numCRows-1)/TILE_WIDTH + 1, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<<gridSize, blockSize>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck( cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost) ); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbCheck( cudaFree(deviceA) ); wbCheck( cudaFree(deviceB) ); wbCheck( cudaFree(deviceC) ); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
371575704cc086ed010d895006efedc8e1eb09cc.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ // Includes #include <stdio.h> // includes, project #include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples #include <shrQATest.h> #include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void VecAdd(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // Host code int main(int argc, char** argv) { shrQAStart(argc, argv); printf("Vector Addition\n"); int N = 2500; size_t size = N * sizeof(float); ParseArguments(argc, argv); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); // Invoke kernel int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, N); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); // Verify result int i; for (i = 0; i < N; ++i) { float sum = h_A[i] + h_B[i]; if (fabs(h_C[i] - sum) > 1e-5) break; } CleanupResources(); shrQAFinishExit(argc, (const char **)argv, (i==N) ? QA_PASSED : QA_FAILED); } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); hipDeviceReset(); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--noprompt") == 0 || strcmp(argv[i], "-noprompt") == 0) { noprompt = true; break; } } }
371575704cc086ed010d895006efedc8e1eb09cc.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ // Includes #include <stdio.h> // includes, project #include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples #include <shrQATest.h> #include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void VecAdd(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // Host code int main(int argc, char** argv) { shrQAStart(argc, argv); printf("Vector Addition\n"); int N = 2500; size_t size = N * sizeof(float); ParseArguments(argc, argv); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); // Invoke kernel int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); // Verify result int i; for (i = 0; i < N; ++i) { float sum = h_A[i] + h_B[i]; if (fabs(h_C[i] - sum) > 1e-5) break; } CleanupResources(); shrQAFinishExit(argc, (const char **)argv, (i==N) ? QA_PASSED : QA_FAILED); } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); cudaDeviceReset(); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; } // Parse program arguments void ParseArguments(int argc, char** argv) { for (int i = 0; i < argc; ++i) { if (strcmp(argv[i], "--noprompt") == 0 || strcmp(argv[i], "-noprompt") == 0) { noprompt = true; break; } } }
f5a471257b36f2f449de78d45584cf9ada3975da.hip
// !!! This is a file automatically generated by hipify!!! #include <bits/stdc++.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 1024 using namespace std; __global__ void sum(int *d_A, int *d_B, int *d_C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; //if(i < n*n) d_C[i] = d_A[i] + d_B[i]; } __global__ void sumR(int *d_A, int *d_B, int *d_C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; for(int j = 0; j < n; j++) d_C[i*n + j] = d_A[i*n + j] + d_B[i*n + j]; } __global__ void sumC(int *d_A, int *d_B, int *d_C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; for(int j = 0; j < n; j++) d_C[j*n + i] = d_A[j*n + i] + d_B[j*n + i]; } int main(){ int *h_A, *h_B, *h_C; int n; cin>>n; int size = sizeof(int) * n*n; h_A = (int *)malloc(size); h_B = (int *)malloc(size); h_C = (int *)malloc(size); for(int i = 0; i < n*n; i++) { h_A[i] = 3; h_B[i] = 4; } int *d_A, *d_B, *d_C; clock_t t = clock(); hipMalloc(&d_A, size); hipMalloc(&d_B, size); hipMalloc(&d_C, size); hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); //sum<<< ceil( (n*n) / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n); hipLaunchKernelGGL(( sumR), dim3(ceil( n / (double)BLOCK_SIZE)), dim3(BLOCK_SIZE) , 0, 0, d_A, d_B, d_C, n); //sumC<<< ceil( n / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n); hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); double a = ((double)(clock()-t))/CLOCKS_PER_SEC; cout<< a <<endl; //for(int i = 0; i < n; i++) { //for(int j = 0; j < n; j++) //cout<<h_C[j]<<" "; //cout<<endl; //} free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); return 0; }
f5a471257b36f2f449de78d45584cf9ada3975da.cu
#include <bits/stdc++.h> #include <cuda.h> #define BLOCK_SIZE 1024 using namespace std; __global__ void sum(int *d_A, int *d_B, int *d_C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; //if(i < n*n) d_C[i] = d_A[i] + d_B[i]; } __global__ void sumR(int *d_A, int *d_B, int *d_C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; for(int j = 0; j < n; j++) d_C[i*n + j] = d_A[i*n + j] + d_B[i*n + j]; } __global__ void sumC(int *d_A, int *d_B, int *d_C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; for(int j = 0; j < n; j++) d_C[j*n + i] = d_A[j*n + i] + d_B[j*n + i]; } int main(){ int *h_A, *h_B, *h_C; int n; cin>>n; int size = sizeof(int) * n*n; h_A = (int *)malloc(size); h_B = (int *)malloc(size); h_C = (int *)malloc(size); for(int i = 0; i < n*n; i++) { h_A[i] = 3; h_B[i] = 4; } int *d_A, *d_B, *d_C; clock_t t = clock(); cudaMalloc(&d_A, size); cudaMalloc(&d_B, size); cudaMalloc(&d_C, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); //sum<<< ceil( (n*n) / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n); sumR<<< ceil( n / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n); //sumC<<< ceil( n / (double)BLOCK_SIZE), BLOCK_SIZE >>> (d_A, d_B, d_C, n); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); double a = ((double)(clock()-t))/CLOCKS_PER_SEC; cout<< a <<endl; //for(int i = 0; i < n; i++) { //for(int j = 0; j < n; j++) //cout<<h_C[j]<<" "; //cout<<endl; //} free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
0c7990dc981fffc3eb0c024fa697ea44c4ca77cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** Delaunay refinement -*- CUDA -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * Refinement of an initial, unrefined Delaunay mesh to eliminate triangles * with angles < 30 degrees, using a variation of Chew's algorithm. * * @author Rupesh Nasre <[email protected]> */ #include "lonestargpu.h" #define MINANGLE 30 #define PI 3.14159265358979323846 // from C99 standard. #define FORD float #define DIMSTYPE unsigned #define INVALIDID 1234567890 #define MAXID INVALIDID #define TESTNBLOCKSFACTOR 4 #define ALLOCMULTIPLE 2 // alloc in multiples of this. unsigned ALLOCFACTOR = 6; // initial alloc factor. void next_line(std::ifstream& scanner) { scanner.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); } void readNodes(std::string filename, FORD * &nodex, FORD * &nodey, unsigned &nnodes) { std::ifstream scanner(filename.append(".node").c_str()); scanner >> nnodes; //next_line(scanner); nodex = (FORD *)malloc(nnodes * sizeof(FORD)); nodey = (FORD *)malloc(nnodes * sizeof(FORD)); for (size_t i = 0; i < nnodes; i++) { size_t index; FORD x; FORD y; next_line(scanner); scanner >> index >> x >> y; nodex[index] = x; nodey[index] = y; } } void readTriangles(std::string basename, unsigned * &tnodes, unsigned &ntriangles, unsigned nnodes) { // bug on the placement of next_line identified by Molly O'Neil: fixed. unsigned ntrianglesone, ntrianglestwo; unsigned i, index, n1, n2, n3, row; std::string filename; filename = basename; std::ifstream scanner(filename.append(".ele").c_str()); scanner >> ntrianglesone; filename = basename; std::ifstream scannerperimeter(filename.append(".poly").c_str()); //next_line(scannerperimeter); scannerperimeter >> ntrianglestwo; ntriangles = ntrianglesone + ntrianglestwo; tnodes = (unsigned *)malloc(3 * ntriangles * sizeof(unsigned)); for (i = 0; i < ntrianglesone; i++) { next_line(scanner); scanner >> index >> n1 >> n2 >> n3; row = 3 * index; tnodes[row + 0] = n1; tnodes[row + 1] = n2; tnodes[row + 2] = n3; } for (i = 0; i < ntrianglestwo; i++) { next_line(scannerperimeter); scannerperimeter >> index >> n1 >> n2; row = 3 * (ntrianglesone + index); tnodes[row + 0] = n1; tnodes[row + 1] = n2; tnodes[row + 2] = INVALIDID; } } void optimizeone(unsigned ntriangles) { } __device__ FORD distanceSquare(FORD onex, FORD oney, FORD twox, FORD twoy) { FORD dx = onex - twox; FORD dy = oney - twoy; FORD dsq = dx * dx + dy * dy; return dsq; } __device__ FORD distanceSquare(unsigned one, unsigned two, FORD *nodex, FORD *nodey) { return distanceSquare(nodex[one], nodey[one], nodex[two], nodey[two]); } __device__ FORD distance(unsigned one, unsigned two, FORD *nodex, FORD *nodey) { return sqrtf(distanceSquare(one, two, nodex, nodey)); } __device__ FORD radiusSquare(FORD centerx, FORD centery, unsigned tri, FORD *nodex, FORD *nodey, unsigned *tnodes) { unsigned row = 3 * tri; unsigned first = tnodes[row + 0]; return distanceSquare(centerx, centery, nodex[first], nodey[first]); } __device__ bool checkbad(unsigned id, FORD *nodex, FORD *nodey, unsigned *tnodes, DIMSTYPE *obtuse, unsigned ntriangles) { //if (id < ntriangles) { unsigned row = 3 * id; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); for (unsigned ii = 0; ii < dims; ++ii) { unsigned curr = tnodes[row + ii]; unsigned aa = tnodes[row + (ii + 1) % dims]; unsigned bb = tnodes[row + (ii + 2) % dims]; if (curr < ntriangles && aa < ntriangles && bb < ntriangles) { FORD vax = nodex[aa] - nodex[curr]; FORD vay = nodey[aa] - nodey[curr]; FORD vbx = nodex[bb] - nodex[curr]; FORD vby = nodey[bb] - nodey[curr]; FORD dp = vax * vbx + vay * vby; if (dp < 0) { // id is obtuse at point ii. obtuse[id] = ii; } else { FORD dsqaacurr = distanceSquare(aa, curr, nodex, nodey); FORD dsqbbcurr = distanceSquare(bb, curr, nodex, nodey); FORD c = dp * rsqrtf(dsqaacurr * dsqbbcurr); if (c > cos(MINANGLE * (PI / 180))) { return true; } } } } //} return false; } __global__ void dinit(FORD *nodex, FORD *nodey, unsigned *tnodes, bool *isbad, DIMSTYPE *obtuse, bool *isdel, unsigned nnodes, unsigned ntriangles) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if (id < ntriangles) { obtuse[id] = 3; isbad[id] = checkbad(id, nodex, nodey, tnodes, obtuse, ntriangles); isdel[id] = false; } } __global__ void dverify(FORD *nodex, FORD *nodey, unsigned *tnodes, bool *isbad, bool *isdel, unsigned nnodes, unsigned ntriangles, bool *changed, unsigned *nchanged) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if (id < ntriangles && !isdel[id] && isbad[id] ) { *changed = true; ++*nchanged; } } __device__ unsigned adjacent(unsigned trione, unsigned tritwo, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { unsigned rowone = 3 * trione; unsigned rowtwo = 3 * tritwo; unsigned dimsone = (tnodes[rowone + 2] == INVALIDID ? 2 : 3); unsigned dimstwo = (tnodes[rowtwo + 2] == INVALIDID ? 2 : 3); unsigned ncommon = 0; unsigned firstmatch = 3; // not adjacent. for (unsigned ii = 0; ii < dimsone; ++ii) { for (unsigned jj = 0; jj < dimstwo; ++jj) { if (tnodes[rowone + ii] == tnodes[rowtwo + jj]) { if (++ncommon == 2) { return firstmatch; } else { firstmatch = ii; } } } } return 3; // not adjacent. } __global__ void dfindneighbors(FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned *neighbors, DIMSTYPE *neighboredges, unsigned nnodes, unsigned ntriangles, unsigned nblocks, unsigned starttri, unsigned endtri) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; //unsigned wpt = (ntriangles + nblocks * blockDim.x - 1) / nblocks / blockDim.x; unsigned wpt = (endtri - starttri + nblocks * blockDim.x - 1) / (nblocks * blockDim.x); //1; unsigned start = starttri + id * wpt; unsigned end = start + wpt; for (unsigned tt = start; tt < end && tt < ntriangles; ++tt) { unsigned row = 3 * tt; unsigned iirow = 0; //for (unsigned ii = 0; ii < ntriangles; ++ii) { for (unsigned ii = starttri; ii < endtri; ++ii) { if (ii != tt) { unsigned commonedgestart = adjacent(tt, ii, tnodes, nnodes, ntriangles); if (commonedgestart < 3 && iirow < 3) { // common edge, adjacent. neighbors[row + iirow] = ii; neighboredges[row + iirow] = commonedgestart; // store the common edge for the first triangle, another thread will store it for the second triangle. ++iirow; } } } // fill the remaining entries by invalid data. for (; iirow < 3; ++iirow) { neighbors[row + iirow] = INVALIDID; neighboredges[row + iirow] = 3; } } } __device__ unsigned getOpposite(unsigned centerelement, unsigned obtuse, unsigned *neighbors, DIMSTYPE *neighboredges, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { unsigned row = 3 * centerelement; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); DIMSTYPE commonedgepoint1 = (obtuse + 1) % dims; //unsigned commonedgepoint2 = (obtuse + 2) % dims; for (unsigned ii = 0; ii < 3; ++ii) { // iterate over neighbors. DIMSTYPE nnedgestart = neighboredges[row + ii]; if (nnedgestart == commonedgepoint1) { return neighbors[row + ii]; } } return INVALIDID; } __device__ void getCenter(unsigned centerelement, FORD &centerx, FORD &centery, FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { unsigned row = 3 * centerelement; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); unsigned aa = tnodes[row + 0]; unsigned bb = tnodes[row + 1]; unsigned cc = tnodes[row + 2]; if (!(aa < ntriangles && bb < ntriangles && cc < ntriangles)) { centerx = centery = 0.0; return; } if (dims == 2) { centerx = (nodex[aa] + nodex[bb]) * 0.5; centery = (nodey[aa] + nodey[bb]) * 0.5; return; } FORD xxx = nodex[bb] - nodex[aa]; FORD xxy = nodey[bb] - nodey[aa]; FORD yyx = nodex[cc] - nodex[aa]; FORD yyy = nodey[cc] - nodey[aa]; FORD xxlen = distance(aa, bb, nodex, nodey); FORD yylen = distance(aa, cc, nodex, nodey); FORD cosine = (xxx * yyx + xxy * yyy) / (xxlen * yylen); FORD sinesq = 1.0 - cosine * cosine; FORD plen = yylen / xxlen; FORD ss = plen * cosine; FORD tt = plen * sinesq; FORD wp = (plen - cosine) / (2 * tt); FORD wb = 0.5 - (wp * ss); centerx = nodex[aa] * (1 - wb - wp) + nodex[bb] * wb + nodex[cc] * wp; centery = nodey[aa] * (1 - wb - wp) + nodey[bb] * wb + nodey[cc] * wp; } __device__ bool inCircumcircle(FORD xx, FORD yy, unsigned tri, FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { // check if point (xx, yy) is in the circumcircle of tri. FORD centerx, centery; getCenter(tri, centerx, centery, nodex, nodey, tnodes, nnodes, ntriangles); FORD dd = distanceSquare(centerx, centery, xx, yy); return dd <= radiusSquare(centerx, centery, tri, nodex, nodey, tnodes); } __device__ unsigned addPoint(FORD xx, FORD yy, FORD *nodex, FORD *nodey, unsigned *pnnodes, unsigned &nnodes) { unsigned newpoint = *pnnodes; ++*pnnodes; //atomicInc(pnnodes, MAXID); nodex[newpoint] = xx; nodey[newpoint] = yy; nnodes = newpoint; // update. return newpoint; } __device__ void addPoint(FORD xx, FORD yy, FORD *nodex, FORD *nodey, unsigned newpoint) { nodex[newpoint] = xx; nodey[newpoint] = yy; } __device__ void initNeighbors(unsigned tri, unsigned *neighbors, DIMSTYPE *neighboredges) { unsigned row = 3 * tri; for (unsigned ii = 0; ii < 3; ++ii) { neighbors[row + ii] = INVALIDID; neighboredges[row + ii] = 3; } } __device__ unsigned addTriangle(unsigned point0, unsigned point1, unsigned point2, unsigned *tnodes, unsigned *pntriangles, unsigned &ntriangles, bool *isdel, DIMSTYPE *obtuse, unsigned *neighbors, DIMSTYPE *neighboredges) { unsigned newtriid = atomicInc(pntriangles, MAXID); unsigned newrow = 3 * newtriid; tnodes[newrow + 0] = point0; tnodes[newrow + 1] = point1; tnodes[newrow + 2] = point2; initNeighbors(newtriid, neighbors, neighboredges); isdel[newtriid] = false; obtuse[newtriid] = 3; ntriangles = newtriid; // update. return newtriid; } __device__ void copyNeighbors(unsigned to, unsigned from, unsigned *neighbors, DIMSTYPE *neighboredges) { unsigned torow = 3 * to; unsigned fromrow = 3 * from; for (unsigned ii = 0; ii < 3; ++ii) { neighbors[torow + ii] = neighbors[fromrow + ii]; neighboredges[torow + ii] = neighboredges[fromrow + ii]; // ??? } } __device__ bool updateNeighbor(unsigned of, unsigned oldn, unsigned newn, unsigned *neighbors, unsigned *tnodes) { unsigned row = 3 * of; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); for (unsigned ii = 0; ii < dims; ++ii) { if (neighbors[row + ii] == oldn) { neighbors[row + ii] = newn; return true; } } // no need to update neighboredges, as the index won't change. return false; } void *mycudarealloc(void *oldptr, unsigned oldsize, unsigned newsize) { void *newptr; if (hipMalloc((void **)&newptr, newsize) != hipSuccess) CudaTest("allocating newptr failed"); hipMemcpy(newptr, oldptr, oldsize, hipMemcpyDeviceToDevice); hipFree(oldptr); return newptr; } //GPU lock-free synchronization function __device__ void __gpu_sync(unsigned goalVal, volatile unsigned *Arrayin, volatile unsigned *Arrayout) { // thread ID in a block unsigned tid_in_blk = threadIdx.x * blockDim.y + threadIdx.y; unsigned nBlockNum = gridDim.x * gridDim.y; unsigned bid = blockIdx.x * gridDim.y + blockIdx.y; // only thread 0 is used for synchronization if (tid_in_blk == 0) { Arrayin[bid] = goalVal; __threadfence(); } if (bid == 0) { if (tid_in_blk < nBlockNum) { while (Arrayin[tid_in_blk] != goalVal){ //Do nothing here } } __syncthreads(); if (tid_in_blk < nBlockNum) { Arrayout[tid_in_blk] = goalVal; __threadfence(); } } if (tid_in_blk == 0) { while (Arrayout[bid] != goalVal) { //Do nothing here } } __syncthreads(); } __device__ void globalsyncthreads(unsigned &blockcount, volatile unsigned *go) { unsigned tt; if (threadIdx.x == 0) { tt = gridDim.x - 1; if (tt == atomicInc(&blockcount, tt)) { *go = 1; __threadfence_block(); } } __syncthreads(); if (threadIdx.x == 0) { while (*go != 1) { ; } } __syncthreads(); if (threadIdx.x == 0) { tt = gridDim.x - 1; if (tt == atomicInc(&blockcount, tt)) { *go = 0; __threadfence_block(); } } __syncthreads(); if (threadIdx.x == 0) { while (*go != 0) { ; } } __syncthreads(); } __global__ void countbad(bool *isbad, unsigned ntriangles, unsigned *nbad, unsigned goal, volatile unsigned *arrayin, volatile unsigned *arrayout, unsigned *blockcount) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned nthreads = blockDim.x * gridDim.x; unsigned wpt = (ntriangles + nthreads - 1) / nthreads; unsigned start = id*wpt; unsigned end = start + wpt; __shared__ unsigned tcount[BLOCKSIZE]; unsigned imemyself = threadIdx.x; tcount[imemyself] = 0; for (unsigned ii = start; ii < end; ++ii) { if (ii < ntriangles && isbad[ii]) { ++tcount[imemyself]; } } __syncthreads(); for (unsigned s = blockDim.x / 2; s; s >>= 1) { if (imemyself < s) { tcount[imemyself] += tcount[imemyself + s]; } __syncthreads(); } __syncthreads(); if (imemyself == 0) { blockcount[blockIdx.x] = tcount[0]; __threadfence(); } __gpu_sync(++goal, arrayin, arrayout); if (id == 0) { unsigned lcount = 0; for (unsigned ii = 0; ii < gridDim.x; ++ii) { lcount += blockcount[ii]; } *nbad = lcount; } } #define DEBUGCHECK(ii) if (ii >= SMALLSIZE) { printf("ERROR %s: %d.\n", #ii, ii);} #define DEBUGCHECK4(ii) if (ii >= 4*SMALLSIZE) { printf("ERROR %s: %d.\n", #ii, ii);} #define DEBUGCHECKN(ii, N) if (ii >= N) { printf("ERROR %s: %d.\n", #ii, ii);} #define MAXITR 10 __global__ __launch_bounds__(BLOCKSIZE, TESTNBLOCKSFACTOR) void drefine(FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned *neighbors, DIMSTYPE *neighboredges, bool *isbad, DIMSTYPE *obtuse, bool *isdel, unsigned *pnnodes, unsigned *pntriangles, bool *changed, unsigned starttri, unsigned endtri, unsigned nblocks, unsigned *owner, unsigned *successful, unsigned *aborted, unsigned *blockcount, volatile unsigned *go, unsigned goal, volatile unsigned *arrayin, volatile unsigned *arrayout) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned nthreads = blockDim.x * nblocks; unsigned wpt = (endtri - starttri + nthreads - 1) / nthreads; //1; unsigned start = starttri + id * wpt; unsigned end = start + wpt; unsigned nnodes = *pnnodes; unsigned ntriangles = *pntriangles; unsigned centerelement = 0, row = 0; DIMSTYPE ceobtuse = 3, dims = 3; FORD centerx = 0.0, centery = 0.0; bool lchanged = false; #define SMALLSIZE 64 unsigned frontier[SMALLSIZE], iifrontier = 0; unsigned pre[SMALLSIZE], iipre = 0; unsigned post[SMALLSIZE], iipost = 0; unsigned connections[4 * SMALLSIZE], iiconnections = 0; // edgesrc, edgedst, triangleone, triangletwo. for (unsigned tt = start; tt < end; ++tt) { if (tt < ntriangles && !isdel[tt] && isbad[tt]) { iifrontier = iipre = iipost = iiconnections = 0; // cavity.initialize(tt); centerelement = tt; ceobtuse = obtuse[centerelement]; unsigned itr = 0; while (ceobtuse < 3 && centerelement < ntriangles && ++itr < MAXITR) { // while it is obtuse. centerelement = getOpposite(centerelement, ceobtuse, neighbors, neighboredges, tnodes, nnodes, ntriangles); if (centerelement < ntriangles) { ceobtuse = obtuse[centerelement]; } } if (centerelement >= ntriangles || isdel[centerelement]) { centerelement = tt; ceobtuse = obtuse[centerelement]; } getCenter(centerelement, centerx, centery, nodex, nodey, tnodes, nnodes, ntriangles); pre[iipre++] = centerelement; frontier[iifrontier++] = centerelement; //DEBUGCHECK(iipre); // cavity.build(); while (iifrontier > 0) { unsigned curr = frontier[--iifrontier]; unsigned row = 3 * curr; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); for (unsigned ii = 0; ii < dims; ++ii) { //expand(curr, neighbors[row + ii]); unsigned next = neighbors[row + ii]; if (next >= ntriangles) { break; } if (isdel[next]) { continue; } unsigned nextrow = 3 * next; unsigned nextdims = (tnodes[nextrow + 2] == INVALIDID ? 2 : 3); if (!(dims == 2 && nextdims == 2 && next != centerelement) && inCircumcircle(centerx, centery, next, nodex, nodey, tnodes, nnodes, ntriangles)) { // isMember says next is part of the cavity, and we're not the second // segment encroaching on this cavity if (nextdims == 2 && dims != 2) { // is segment, and we are encroaching. iifrontier = iipre = iipost = iiconnections = 0; centerelement = next; ceobtuse = obtuse[centerelement]; itr = 0; while (ceobtuse < 3 && centerelement < ntriangles && ++itr < MAXITR) { centerelement = getOpposite(centerelement, ceobtuse, neighbors, neighboredges, tnodes, nnodes, ntriangles); if (centerelement < ntriangles) { ceobtuse = obtuse[centerelement]; } } if (centerelement >= ntriangles || isdel[centerelement]) { centerelement = next; ceobtuse = obtuse[centerelement]; } getCenter(centerelement, centerx, centery, nodex, nodey, tnodes, nnodes, ntriangles); pre[iipre++] = centerelement; frontier[iifrontier++] = centerelement; //DEBUGCHECK(iipre); } else { unsigned jj; for (jj = 0; jj < iipre; ++jj) { if (pre[jj] == next) { break; } } if (jj == iipre) { pre[iipre++] = next; frontier[iifrontier++] = next; } //DEBUGCHECK(iipre); } } else { // not a member // add the common edge between curr and next to connections if doesn't already exist. DIMSTYPE cestart = neighboredges[row + ii]; // see definition of next above. if (cestart >= 3) { continue; } unsigned connpt1 = tnodes[row + cestart]; unsigned connpt2 = tnodes[row + (cestart + 1) % dims]; unsigned jj; for (jj = 0; jj < iiconnections; jj += 4) { if (connections[jj] == connpt1 && connections[jj + 1] == connpt2) { break; } } if (jj == iiconnections) { connections[iiconnections++] = connpt1; connections[iiconnections++] = connpt2; connections[iiconnections++] = curr; connections[iiconnections++] = next; //DEBUGCHECK4(iiconnections); } } } } // mark the triangles in the cavity. for (unsigned ii = 0; ii < iipre; ++ii) { unsigned cavtri = pre[ii]; if (cavtri < endtri && cavtri >= starttri) { owner[cavtri] = id; } } } //__syncthreads(); //__threadfence(); //globalsyncthreads(*blockcount, go); __gpu_sync(++goal, arrayin, arrayout); bool backoff = false; if (tt < ntriangles && !isdel[tt] && isbad[tt]) { // go over your triangles and see if they contain your id. if (!backoff) { for (unsigned ii = 0; ii < iipre; ++ii) { unsigned cavtri = pre[ii]; if (owner[cavtri] < id) { // cavity overlap and the other thread has priority! backoff = true; break; } else if (owner[cavtri] > id) { // cavity overlap but you have the priority. owner[cavtri] = id; // mark it yours: due to this write, we require another checking phase. } } } //__syncthreads(); //__threadfence(); } //globalsyncthreads(*blockcount, go); __gpu_sync(++goal, arrayin, arrayout); if (tt < ntriangles && !isdel[tt] && isbad[tt]) { // once again go over your triangles and see if they contain your id. if (!backoff) { for (unsigned ii = 0; ii < iipre; ++ii) { unsigned cavtri = pre[ii]; if (owner[cavtri] != id) { // cavity overlap. backoff = true; break; } } } //__syncthreads(); if (backoff) { lchanged = true; ++*aborted; continue; } ++*successful; // cavity.update(): create the new cavity based on the data of the old cavity. row = 3 * centerelement; dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); unsigned newpoint = addPoint(centerx, centery, nodex, nodey, pnnodes, nnodes); if (dims == 2) { // we built around a segment. // create two segments (as triangles). unsigned newtriid1 = addTriangle(newpoint, tnodes[row + 0], INVALIDID, tnodes, pntriangles, ntriangles, isdel, obtuse, neighbors, neighboredges); unsigned newtriid2 = addTriangle(newpoint, tnodes[row + 1], INVALIDID, tnodes, pntriangles, ntriangles, isdel, obtuse, neighbors, neighboredges); // update triangles' neighbors: neighbors of the new triangles (segments) are the same as those of the previous segment? copyNeighbors(newtriid1, centerelement, neighbors, neighboredges); copyNeighbors(newtriid2, centerelement, neighbors, neighboredges); post[iipost++] = newtriid1; post[iipost++] = newtriid2; //DEBUGCHECK(iipost); } for (unsigned ii = 0; ii < iiconnections; ii += 4) { unsigned connpt1 = connections[ii + 0]; unsigned connpt2 = connections[ii + 1]; unsigned connsrc = connections[ii + 2]; unsigned conndst = connections[ii + 3]; unsigned newtri = addTriangle(newpoint, connpt1, connpt2, tnodes, pntriangles, ntriangles, isdel, obtuse, neighbors, neighboredges); unsigned jj; for (jj = 0; jj < iipre; ++jj) { if (pre[jj] == conndst) { break; } } unsigned newconn = (jj == iipre ? conndst : connsrc); // newtri and newconn are triangles, and their common edge is (connpt1, connpt2). // thus they are adjacent; their neighbors need to be updated. unsigned newrow = 3 * newtri; unsigned iineighbor = 0; neighbors[newrow + iineighbor] = newconn; neighboredges[newrow + iineighbor] = 1; // since connpt1 is point1 (newpoint is point0). ++iineighbor; for (unsigned jj = 0; jj < iipost; ++jj) { DIMSTYPE commonedgestart = adjacent(post[jj], newtri, tnodes, nnodes, ntriangles); if (commonedgestart < 3) { if (iineighbor < 3) { //DEBUGCHECKN(iineighbor, 3); neighbors[newrow + iineighbor] = post[jj]; neighboredges[newrow + iineighbor] = commonedgestart; ++iineighbor; } updateNeighbor(post[jj], newconn, newtri, neighbors, tnodes); // update neighbor of post[jj] from newconn to newtri, no need to change neighboredges. } } if (iipost < SMALLSIZE) { post[iipost++] = newtri; //DEBUGCHECK(iipost); } } // remove triangles from pre. for (unsigned ii = 0; ii < iipre; ++ii) { unsigned tri = pre[ii]; //DEBUGCHECKN(tri, ntriangles); isdel[tri] = true; } // add triangles from post, mark the bad triangles. // triangles are already added using addTriangle(), simply mark the bad ones. for (unsigned ii = 0; ii < iipost; ++ii) { unsigned tri = post[ii]; //DEBUGCHECKN(tri, 5000000); if (tri < ntriangles) { obtuse[tri] = 3; isbad[tri] = checkbad(tri, nodex, nodey, tnodes, obtuse, ntriangles); lchanged |= isbad[tri]; } } // add neighborhood information for the new triangles: already added using updateNeighbor. } } if (lchanged) { *changed = true; } } int main(int argc, char *argv[]) { unsigned int ntriangles, nnodes, *pnnodes, *pntriangles; bool *changed, hchanged; unsigned *nchanged, hnchanged, *owner; FORD *nodex, *nodey, *hnodex, *hnodey; unsigned *tnodes, *htnodes; unsigned *neighbors; DIMSTYPE *neighboredges; bool *isbad, *isdel; DIMSTYPE *obtuse; int iteration = 0; unsigned hsuccessful = 0, haborted = 0, *successful, *aborted; unsigned *blockcount, intzero = 0; volatile unsigned *go; volatile unsigned *arrayin, *arrayout; unsigned *bcount, *nbad, hnbad; KernelConfig kconf; double starttime, endtime; int runtime; std::string str; //hipDeviceProp_t deviceProp; //hipGetDeviceProperties(&deviceProp, 0); //NBLOCKS = deviceProp.multiProcessorCount; hipFuncSetCacheConfig(drefine, hipFuncCachePreferL1); hipFuncSetCacheConfig(countbad, hipFuncCachePreferL1); hipFuncSetCacheConfig(dfindneighbors, hipFuncCachePreferL1); hipFuncSetCacheConfig(dverify, hipFuncCachePreferL1); hipFuncSetCacheConfig(dinit, hipFuncCachePreferL1); if (argc != 2) { printf("Usage: %s <basefilename>\n", argv[0]); exit(1); } hipGetLastError(); std::cout << "reading graphs...\n"; readNodes(argv[1], hnodex, hnodey, nnodes); std::cout << "\t" << nnodes << " nodes\n"; readTriangles(argv[1], htnodes, ntriangles, nnodes); std::cout << "\t" << ntriangles << " triangles.\n"; kconf.setProblemSize(ntriangles); kconf.setNumberOfBlockThreads(256); //FACTOR = (ntriangles + BLOCKSIZE * NBLOCKS - 1) / (BLOCKSIZE * NBLOCKS); printf("optimizing.\n"); optimizeone(ntriangles); unsigned curralloc = ALLOCFACTOR * ntriangles, currsizenodes = ALLOCFACTOR * nnodes; if (hipMalloc((void **)&nodex, ALLOCFACTOR * nnodes * sizeof(FORD)) != hipSuccess) CudaTest("allocating nodex failed"); if (hipMalloc((void **)&nodey, ALLOCFACTOR * nnodes * sizeof(FORD)) != hipSuccess) CudaTest("allocating nodey failed"); if (hipMalloc((void **)&tnodes, ALLOCFACTOR * 3 * ntriangles * sizeof(unsigned)) != hipSuccess) CudaTest("allocating tnodes failed"); hipMemcpy(nodex, hnodex, nnodes * sizeof(FORD), hipMemcpyHostToDevice); hipMemcpy(nodey, hnodey, nnodes * sizeof(FORD), hipMemcpyHostToDevice); hipMemcpy(tnodes, htnodes, 3 * ntriangles * sizeof(unsigned), hipMemcpyHostToDevice); if (hipMalloc((void **)&neighbors, ALLOCFACTOR * 3 * ntriangles * sizeof(unsigned)) != hipSuccess) CudaTest("allocating neighbors failed"); if (hipMalloc((void **)&neighboredges, ALLOCFACTOR * 3 * ntriangles * sizeof(DIMSTYPE)) != hipSuccess) CudaTest("allocating neighboredges failed"); //printf("finding neighboring triangles.\n"); //unsigned nblocks = NBLOCKS * FACTOR; unsigned ntriperit = kconf.getNumberOfSMs() * kconf.getNumberOfBlockThreads(); unsigned ntriit = kconf.getProblemSize() / ntriperit; //unsigned ntriit = FACTOR; //unsigned ntriperit = NBLOCKS * BLOCKSIZE; starttime = rtclock(); for (unsigned ii = 0; ii < ntriit; ++ii) { printf("finding neighbors: %3d%% complete.\r", (int)(ii*ntriperit*100.0 / ntriangles)); //printf("finding neighbors: iteration=%d, start=%d, end=%d.\n", ii, ii * ntriperit, (ii + 1) * ntriperit); //dfindneighbors<<<NBLOCKS, BLOCKSIZE>>> (nodex, nodey, tnodes, neighbors, neighboredges, nnodes, ntriangles, NBLOCKS, 0, ntriangles); hipLaunchKernelGGL(( dfindneighbors), dim3(kconf.getNumberOfSMs()), dim3(kconf.getNumberOfBlockThreads()), 0, 0, nodex, nodey, tnodes, neighbors, neighboredges, nnodes, ntriangles, kconf.getNumberOfSMs(), ii * ntriperit, (ii + 1) * ntriperit); CudaTest("find neighbors failed"); } endtime = rtclock(); printf("\n"); printf("findneighbors took %u ms.\n", (int)(1000.0f * (endtime - starttime))); if (hipMalloc((void **)&isbad, ALLOCFACTOR * ntriangles * sizeof(bool)) != hipSuccess) CudaTest("allocating isbad failed"); if (hipMalloc((void **)&obtuse, ALLOCFACTOR * ntriangles * sizeof(DIMSTYPE)) != hipSuccess) CudaTest("allocating obtuse failed"); if (hipMalloc((void **)&isdel, ALLOCFACTOR * ntriangles * sizeof(bool)) != hipSuccess) CudaTest("allocating isdel failed"); if (hipMalloc((void **)&owner, ALLOCFACTOR * ntriangles * sizeof(unsigned)) != hipSuccess) CudaTest("allocating owner failed"); printf("init.\n"); hipLaunchKernelGGL(( dinit) , dim3(kconf.getNumberOfBlocks()), dim3(kconf.getNumberOfBlockThreads()), 0, 0, nodex, nodey, tnodes, isbad, obtuse, isdel, nnodes, ntriangles); CudaTest("initialization failed"); /*bool *hisbad = (bool *)malloc(ntriangles * sizeof(bool)); hipMemcpy(hisbad, isbad, ntriangles * sizeof(bool), hipMemcpyDeviceToHost); unsigned nbad = 0; for (unsigned ii = 0; ii < ntriangles; ++ii) { if (hisbad[ii]) nbad++; } std::cout << nbad << " bad triangles.\n";*/ if (hipMalloc((void **)&changed, sizeof(bool)) != hipSuccess) CudaTest("allocating changed failed"); if (hipMalloc((void **)&nchanged, sizeof(unsigned)) != hipSuccess) CudaTest("allocating nchanged failed"); if (hipMalloc((void **)&pnnodes, sizeof(unsigned)) != hipSuccess) CudaTest("allocating pnnodes failed"); if (hipMalloc((void **)&pntriangles, sizeof(unsigned)) != hipSuccess) CudaTest("allocating pntriangles failed"); hipMemcpy(pnnodes, &nnodes, sizeof(unsigned), hipMemcpyHostToDevice); hipMemcpy(pntriangles, &ntriangles, sizeof(unsigned), hipMemcpyHostToDevice); if (hipMalloc((void **)&successful, sizeof(unsigned)) != hipSuccess) CudaTest("allocating successful failed"); if (hipMalloc((void **)&aborted, sizeof(unsigned)) != hipSuccess) CudaTest("allocating aborted failed"); if (hipMalloc((void **)&blockcount, sizeof(unsigned)) != hipSuccess) CudaTest("allocating blockcount failed"); if (hipMalloc((void **)&go, sizeof(unsigned)) != hipSuccess) CudaTest("allocating go failed"); hipMemcpy(blockcount, &intzero, sizeof(unsigned), hipMemcpyHostToDevice); hipMemcpy((void *)go, &intzero, sizeof(unsigned), hipMemcpyHostToDevice); unsigned nblockfactor = TESTNBLOCKSFACTOR; //(ntriangles < 1000000 ? 7 : (ntriangles < 10000000 ? 31 : 61)); // for 250k.2, use 7, for r1M use 31, for r5M use 61. unsigned nblocks = kconf.getNumberOfSMs() * nblockfactor; unsigned blocksize = kconf.getNumberOfBlockThreads(); bool hlchanged; if (hipMalloc((void **)&arrayin, nblocks*sizeof(volatile unsigned)) != hipSuccess) CudaTest("allocating arrayin failed"); if (hipMalloc((void **)&arrayout, nblocks*sizeof(volatile unsigned)) != hipSuccess) CudaTest("allocating arrayout failed"); if (hipMalloc((void **)&bcount, nblocks*sizeof(unsigned)) != hipSuccess) CudaTest("allocating blockcount failed"); if (hipMalloc((void **)&nbad, sizeof(unsigned)) != hipSuccess) CudaTest("allocating nbad failed"); curralloc = ALLOCFACTOR * ntriangles; printf("solving.\n"); starttime = rtclock(); do { ++iteration; //printf("iteration %d: ntriangles=%d, nnodes=%d.\n", iteration, ntriangles, nnodes); unsigned orintriangles = ntriangles; hchanged = false; hipMemcpy(changed, &hchanged, sizeof(bool), hipMemcpyHostToDevice); hsuccessful = haborted = 0; hipMemcpy(successful, &hsuccessful, sizeof(unsigned), hipMemcpyHostToDevice); hipMemcpy(aborted, &haborted, sizeof(unsigned), hipMemcpyHostToDevice); hipLaunchKernelGGL(( countbad) , dim3(nblocks), dim3(blocksize), 0, 0, isbad, ntriangles, nbad, 1000 + iteration, arrayin, arrayout, bcount); CudaTest("countbad failed"); hipMemcpy(&hnbad, nbad, sizeof(unsigned), hipMemcpyDeviceToHost); //printf("iteration %d: nbad = %d.\n", iteration, hnbad); if (ntriangles + 2 * hnbad > curralloc) { // here 2 is the no of new triangles added for each bad triangle. nodex = (FORD *)mycudarealloc(nodex, currsizenodes*sizeof(FORD), ALLOCMULTIPLE*currsizenodes*sizeof(FORD)); nodey = (FORD *)mycudarealloc(nodey, currsizenodes*sizeof(FORD), ALLOCMULTIPLE*currsizenodes*sizeof(FORD)); currsizenodes = ALLOCMULTIPLE*currsizenodes; tnodes = (unsigned *)mycudarealloc(tnodes, 3*curralloc*sizeof(unsigned), ALLOCMULTIPLE*3*curralloc*sizeof(unsigned)); neighbors = (unsigned *)mycudarealloc(neighbors, 3*curralloc*sizeof(unsigned), ALLOCMULTIPLE*3*curralloc*sizeof(unsigned)); neighboredges = (unsigned *)mycudarealloc(neighboredges, 3*curralloc*sizeof(DIMSTYPE), ALLOCMULTIPLE*3*curralloc*sizeof(DIMSTYPE)); isbad = (bool *)mycudarealloc(isbad, curralloc*sizeof(bool), ALLOCMULTIPLE*curralloc*sizeof(bool)); obtuse = (DIMSTYPE *)mycudarealloc(obtuse, curralloc*sizeof(DIMSTYPE), ALLOCMULTIPLE*curralloc*sizeof(DIMSTYPE)); isdel = (bool *)mycudarealloc(isdel, curralloc*sizeof(bool), ALLOCMULTIPLE*curralloc*sizeof(bool)); owner = (unsigned *)mycudarealloc(owner, curralloc*sizeof(unsigned), ALLOCMULTIPLE*curralloc*sizeof(unsigned)); curralloc *= ALLOCMULTIPLE; printf("\t\tallocating memory to %d.\n", curralloc); } ntriperit = ntriangles; ntriit = (ntriangles + ntriperit - 1) / ntriperit; //1; //FACTOR; for (unsigned ii = 0; ii < ntriit; ++ii) { //printf("solving: inner iteration=%d, ntriangles=%d, nnodes=%d.\n", ii, ntriangles, nnodes); hipLaunchKernelGGL(( drefine) , dim3(nblocks), dim3(blocksize), 0, 0, nodex, nodey, tnodes, neighbors, neighboredges, isbad, obtuse, isdel, pnnodes, pntriangles, changed, ii * ntriperit, (ii + 1)*ntriperit, nblocks, owner, successful, aborted, blockcount, go, iteration, arrayin, arrayout); CudaTest("solving failed"); hipMemcpy(&nnodes, pnnodes, sizeof(unsigned), hipMemcpyDeviceToHost); hipMemcpy(&ntriangles, pntriangles, sizeof(unsigned), hipMemcpyDeviceToHost); hipMemcpy(&hsuccessful, successful, sizeof(unsigned), hipMemcpyDeviceToHost); hipMemcpy(&haborted, aborted, sizeof(unsigned), hipMemcpyDeviceToHost); //printf("\tsuccessful=%d, aborted=%d.\n", hsuccessful, haborted); hipMemcpy(&hlchanged, changed, sizeof(bool), hipMemcpyDeviceToHost); hchanged |= hlchanged; } if (hchanged && orintriangles == ntriangles) { nblocks = blocksize = 1; } else { nblocks = kconf.getNumberOfSMs() * nblockfactor; blocksize = kconf.getNumberOfBlockThreads(); } } while (hchanged); endtime = rtclock(); printf("verifying...\n"); hnchanged = 0; hipMemcpy(nchanged, &hnchanged, sizeof(unsigned), hipMemcpyHostToDevice); hchanged = false; hipMemcpy(changed, &hchanged, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( dverify) , dim3(kconf.getNumberOfBlocks()), dim3(kconf.getNumberOfBlockThreads()), 0, 0, nodex, nodey, tnodes, isbad, isdel, nnodes, ntriangles, changed, nchanged); CudaTest("verification failed"); hipMemcpy(&hchanged, changed, sizeof(bool), hipMemcpyDeviceToHost); hipMemcpy(&hnchanged, nchanged, sizeof(unsigned), hipMemcpyDeviceToHost); if (hchanged) { printf("verification failed: bad triangles exist: %d.\n", hnchanged); } else { printf("verification succeeded: 0 bad triangles exist.\n"); } printf("iterations = %d.\n", iteration); runtime = (int) (1000.0f * (endtime - starttime)); printf("%d ms.\n", runtime); // cleanup left to the OS. return 0; }
0c7990dc981fffc3eb0c024fa697ea44c4ca77cc.cu
/** Delaunay refinement -*- CUDA -*- * @file * @section License * * Galois, a framework to exploit amorphous data-parallelism in irregular * programs. * * Copyright (C) 2013, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. * * @section Description * * Refinement of an initial, unrefined Delaunay mesh to eliminate triangles * with angles < 30 degrees, using a variation of Chew's algorithm. * * @author Rupesh Nasre <[email protected]> */ #include "lonestargpu.h" #define MINANGLE 30 #define PI 3.14159265358979323846 // from C99 standard. #define FORD float #define DIMSTYPE unsigned #define INVALIDID 1234567890 #define MAXID INVALIDID #define TESTNBLOCKSFACTOR 4 #define ALLOCMULTIPLE 2 // alloc in multiples of this. unsigned ALLOCFACTOR = 6; // initial alloc factor. void next_line(std::ifstream& scanner) { scanner.ignore(std::numeric_limits<std::streamsize>::max(), '\n'); } void readNodes(std::string filename, FORD * &nodex, FORD * &nodey, unsigned &nnodes) { std::ifstream scanner(filename.append(".node").c_str()); scanner >> nnodes; //next_line(scanner); nodex = (FORD *)malloc(nnodes * sizeof(FORD)); nodey = (FORD *)malloc(nnodes * sizeof(FORD)); for (size_t i = 0; i < nnodes; i++) { size_t index; FORD x; FORD y; next_line(scanner); scanner >> index >> x >> y; nodex[index] = x; nodey[index] = y; } } void readTriangles(std::string basename, unsigned * &tnodes, unsigned &ntriangles, unsigned nnodes) { // bug on the placement of next_line identified by Molly O'Neil: fixed. unsigned ntrianglesone, ntrianglestwo; unsigned i, index, n1, n2, n3, row; std::string filename; filename = basename; std::ifstream scanner(filename.append(".ele").c_str()); scanner >> ntrianglesone; filename = basename; std::ifstream scannerperimeter(filename.append(".poly").c_str()); //next_line(scannerperimeter); scannerperimeter >> ntrianglestwo; ntriangles = ntrianglesone + ntrianglestwo; tnodes = (unsigned *)malloc(3 * ntriangles * sizeof(unsigned)); for (i = 0; i < ntrianglesone; i++) { next_line(scanner); scanner >> index >> n1 >> n2 >> n3; row = 3 * index; tnodes[row + 0] = n1; tnodes[row + 1] = n2; tnodes[row + 2] = n3; } for (i = 0; i < ntrianglestwo; i++) { next_line(scannerperimeter); scannerperimeter >> index >> n1 >> n2; row = 3 * (ntrianglesone + index); tnodes[row + 0] = n1; tnodes[row + 1] = n2; tnodes[row + 2] = INVALIDID; } } void optimizeone(unsigned ntriangles) { } __device__ FORD distanceSquare(FORD onex, FORD oney, FORD twox, FORD twoy) { FORD dx = onex - twox; FORD dy = oney - twoy; FORD dsq = dx * dx + dy * dy; return dsq; } __device__ FORD distanceSquare(unsigned one, unsigned two, FORD *nodex, FORD *nodey) { return distanceSquare(nodex[one], nodey[one], nodex[two], nodey[two]); } __device__ FORD distance(unsigned one, unsigned two, FORD *nodex, FORD *nodey) { return sqrtf(distanceSquare(one, two, nodex, nodey)); } __device__ FORD radiusSquare(FORD centerx, FORD centery, unsigned tri, FORD *nodex, FORD *nodey, unsigned *tnodes) { unsigned row = 3 * tri; unsigned first = tnodes[row + 0]; return distanceSquare(centerx, centery, nodex[first], nodey[first]); } __device__ bool checkbad(unsigned id, FORD *nodex, FORD *nodey, unsigned *tnodes, DIMSTYPE *obtuse, unsigned ntriangles) { //if (id < ntriangles) { unsigned row = 3 * id; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); for (unsigned ii = 0; ii < dims; ++ii) { unsigned curr = tnodes[row + ii]; unsigned aa = tnodes[row + (ii + 1) % dims]; unsigned bb = tnodes[row + (ii + 2) % dims]; if (curr < ntriangles && aa < ntriangles && bb < ntriangles) { FORD vax = nodex[aa] - nodex[curr]; FORD vay = nodey[aa] - nodey[curr]; FORD vbx = nodex[bb] - nodex[curr]; FORD vby = nodey[bb] - nodey[curr]; FORD dp = vax * vbx + vay * vby; if (dp < 0) { // id is obtuse at point ii. obtuse[id] = ii; } else { FORD dsqaacurr = distanceSquare(aa, curr, nodex, nodey); FORD dsqbbcurr = distanceSquare(bb, curr, nodex, nodey); FORD c = dp * rsqrtf(dsqaacurr * dsqbbcurr); if (c > cos(MINANGLE * (PI / 180))) { return true; } } } } //} return false; } __global__ void dinit(FORD *nodex, FORD *nodey, unsigned *tnodes, bool *isbad, DIMSTYPE *obtuse, bool *isdel, unsigned nnodes, unsigned ntriangles) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if (id < ntriangles) { obtuse[id] = 3; isbad[id] = checkbad(id, nodex, nodey, tnodes, obtuse, ntriangles); isdel[id] = false; } } __global__ void dverify(FORD *nodex, FORD *nodey, unsigned *tnodes, bool *isbad, bool *isdel, unsigned nnodes, unsigned ntriangles, bool *changed, unsigned *nchanged) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if (id < ntriangles && !isdel[id] && isbad[id] ) { *changed = true; ++*nchanged; } } __device__ unsigned adjacent(unsigned trione, unsigned tritwo, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { unsigned rowone = 3 * trione; unsigned rowtwo = 3 * tritwo; unsigned dimsone = (tnodes[rowone + 2] == INVALIDID ? 2 : 3); unsigned dimstwo = (tnodes[rowtwo + 2] == INVALIDID ? 2 : 3); unsigned ncommon = 0; unsigned firstmatch = 3; // not adjacent. for (unsigned ii = 0; ii < dimsone; ++ii) { for (unsigned jj = 0; jj < dimstwo; ++jj) { if (tnodes[rowone + ii] == tnodes[rowtwo + jj]) { if (++ncommon == 2) { return firstmatch; } else { firstmatch = ii; } } } } return 3; // not adjacent. } __global__ void dfindneighbors(FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned *neighbors, DIMSTYPE *neighboredges, unsigned nnodes, unsigned ntriangles, unsigned nblocks, unsigned starttri, unsigned endtri) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; //unsigned wpt = (ntriangles + nblocks * blockDim.x - 1) / nblocks / blockDim.x; unsigned wpt = (endtri - starttri + nblocks * blockDim.x - 1) / (nblocks * blockDim.x); //1; unsigned start = starttri + id * wpt; unsigned end = start + wpt; for (unsigned tt = start; tt < end && tt < ntriangles; ++tt) { unsigned row = 3 * tt; unsigned iirow = 0; //for (unsigned ii = 0; ii < ntriangles; ++ii) { for (unsigned ii = starttri; ii < endtri; ++ii) { if (ii != tt) { unsigned commonedgestart = adjacent(tt, ii, tnodes, nnodes, ntriangles); if (commonedgestart < 3 && iirow < 3) { // common edge, adjacent. neighbors[row + iirow] = ii; neighboredges[row + iirow] = commonedgestart; // store the common edge for the first triangle, another thread will store it for the second triangle. ++iirow; } } } // fill the remaining entries by invalid data. for (; iirow < 3; ++iirow) { neighbors[row + iirow] = INVALIDID; neighboredges[row + iirow] = 3; } } } __device__ unsigned getOpposite(unsigned centerelement, unsigned obtuse, unsigned *neighbors, DIMSTYPE *neighboredges, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { unsigned row = 3 * centerelement; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); DIMSTYPE commonedgepoint1 = (obtuse + 1) % dims; //unsigned commonedgepoint2 = (obtuse + 2) % dims; for (unsigned ii = 0; ii < 3; ++ii) { // iterate over neighbors. DIMSTYPE nnedgestart = neighboredges[row + ii]; if (nnedgestart == commonedgepoint1) { return neighbors[row + ii]; } } return INVALIDID; } __device__ void getCenter(unsigned centerelement, FORD &centerx, FORD &centery, FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { unsigned row = 3 * centerelement; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); unsigned aa = tnodes[row + 0]; unsigned bb = tnodes[row + 1]; unsigned cc = tnodes[row + 2]; if (!(aa < ntriangles && bb < ntriangles && cc < ntriangles)) { centerx = centery = 0.0; return; } if (dims == 2) { centerx = (nodex[aa] + nodex[bb]) * 0.5; centery = (nodey[aa] + nodey[bb]) * 0.5; return; } FORD xxx = nodex[bb] - nodex[aa]; FORD xxy = nodey[bb] - nodey[aa]; FORD yyx = nodex[cc] - nodex[aa]; FORD yyy = nodey[cc] - nodey[aa]; FORD xxlen = distance(aa, bb, nodex, nodey); FORD yylen = distance(aa, cc, nodex, nodey); FORD cosine = (xxx * yyx + xxy * yyy) / (xxlen * yylen); FORD sinesq = 1.0 - cosine * cosine; FORD plen = yylen / xxlen; FORD ss = plen * cosine; FORD tt = plen * sinesq; FORD wp = (plen - cosine) / (2 * tt); FORD wb = 0.5 - (wp * ss); centerx = nodex[aa] * (1 - wb - wp) + nodex[bb] * wb + nodex[cc] * wp; centery = nodey[aa] * (1 - wb - wp) + nodey[bb] * wb + nodey[cc] * wp; } __device__ bool inCircumcircle(FORD xx, FORD yy, unsigned tri, FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned nnodes, unsigned ntriangles) { // check if point (xx, yy) is in the circumcircle of tri. FORD centerx, centery; getCenter(tri, centerx, centery, nodex, nodey, tnodes, nnodes, ntriangles); FORD dd = distanceSquare(centerx, centery, xx, yy); return dd <= radiusSquare(centerx, centery, tri, nodex, nodey, tnodes); } __device__ unsigned addPoint(FORD xx, FORD yy, FORD *nodex, FORD *nodey, unsigned *pnnodes, unsigned &nnodes) { unsigned newpoint = *pnnodes; ++*pnnodes; //atomicInc(pnnodes, MAXID); nodex[newpoint] = xx; nodey[newpoint] = yy; nnodes = newpoint; // update. return newpoint; } __device__ void addPoint(FORD xx, FORD yy, FORD *nodex, FORD *nodey, unsigned newpoint) { nodex[newpoint] = xx; nodey[newpoint] = yy; } __device__ void initNeighbors(unsigned tri, unsigned *neighbors, DIMSTYPE *neighboredges) { unsigned row = 3 * tri; for (unsigned ii = 0; ii < 3; ++ii) { neighbors[row + ii] = INVALIDID; neighboredges[row + ii] = 3; } } __device__ unsigned addTriangle(unsigned point0, unsigned point1, unsigned point2, unsigned *tnodes, unsigned *pntriangles, unsigned &ntriangles, bool *isdel, DIMSTYPE *obtuse, unsigned *neighbors, DIMSTYPE *neighboredges) { unsigned newtriid = atomicInc(pntriangles, MAXID); unsigned newrow = 3 * newtriid; tnodes[newrow + 0] = point0; tnodes[newrow + 1] = point1; tnodes[newrow + 2] = point2; initNeighbors(newtriid, neighbors, neighboredges); isdel[newtriid] = false; obtuse[newtriid] = 3; ntriangles = newtriid; // update. return newtriid; } __device__ void copyNeighbors(unsigned to, unsigned from, unsigned *neighbors, DIMSTYPE *neighboredges) { unsigned torow = 3 * to; unsigned fromrow = 3 * from; for (unsigned ii = 0; ii < 3; ++ii) { neighbors[torow + ii] = neighbors[fromrow + ii]; neighboredges[torow + ii] = neighboredges[fromrow + ii]; // ??? } } __device__ bool updateNeighbor(unsigned of, unsigned oldn, unsigned newn, unsigned *neighbors, unsigned *tnodes) { unsigned row = 3 * of; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); for (unsigned ii = 0; ii < dims; ++ii) { if (neighbors[row + ii] == oldn) { neighbors[row + ii] = newn; return true; } } // no need to update neighboredges, as the index won't change. return false; } void *mycudarealloc(void *oldptr, unsigned oldsize, unsigned newsize) { void *newptr; if (cudaMalloc((void **)&newptr, newsize) != cudaSuccess) CudaTest("allocating newptr failed"); cudaMemcpy(newptr, oldptr, oldsize, cudaMemcpyDeviceToDevice); cudaFree(oldptr); return newptr; } //GPU lock-free synchronization function __device__ void __gpu_sync(unsigned goalVal, volatile unsigned *Arrayin, volatile unsigned *Arrayout) { // thread ID in a block unsigned tid_in_blk = threadIdx.x * blockDim.y + threadIdx.y; unsigned nBlockNum = gridDim.x * gridDim.y; unsigned bid = blockIdx.x * gridDim.y + blockIdx.y; // only thread 0 is used for synchronization if (tid_in_blk == 0) { Arrayin[bid] = goalVal; __threadfence(); } if (bid == 0) { if (tid_in_blk < nBlockNum) { while (Arrayin[tid_in_blk] != goalVal){ //Do nothing here } } __syncthreads(); if (tid_in_blk < nBlockNum) { Arrayout[tid_in_blk] = goalVal; __threadfence(); } } if (tid_in_blk == 0) { while (Arrayout[bid] != goalVal) { //Do nothing here } } __syncthreads(); } __device__ void globalsyncthreads(unsigned &blockcount, volatile unsigned *go) { unsigned tt; if (threadIdx.x == 0) { tt = gridDim.x - 1; if (tt == atomicInc(&blockcount, tt)) { *go = 1; __threadfence_block(); } } __syncthreads(); if (threadIdx.x == 0) { while (*go != 1) { ; } } __syncthreads(); if (threadIdx.x == 0) { tt = gridDim.x - 1; if (tt == atomicInc(&blockcount, tt)) { *go = 0; __threadfence_block(); } } __syncthreads(); if (threadIdx.x == 0) { while (*go != 0) { ; } } __syncthreads(); } __global__ void countbad(bool *isbad, unsigned ntriangles, unsigned *nbad, unsigned goal, volatile unsigned *arrayin, volatile unsigned *arrayout, unsigned *blockcount) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned nthreads = blockDim.x * gridDim.x; unsigned wpt = (ntriangles + nthreads - 1) / nthreads; unsigned start = id*wpt; unsigned end = start + wpt; __shared__ unsigned tcount[BLOCKSIZE]; unsigned imemyself = threadIdx.x; tcount[imemyself] = 0; for (unsigned ii = start; ii < end; ++ii) { if (ii < ntriangles && isbad[ii]) { ++tcount[imemyself]; } } __syncthreads(); for (unsigned s = blockDim.x / 2; s; s >>= 1) { if (imemyself < s) { tcount[imemyself] += tcount[imemyself + s]; } __syncthreads(); } __syncthreads(); if (imemyself == 0) { blockcount[blockIdx.x] = tcount[0]; __threadfence(); } __gpu_sync(++goal, arrayin, arrayout); if (id == 0) { unsigned lcount = 0; for (unsigned ii = 0; ii < gridDim.x; ++ii) { lcount += blockcount[ii]; } *nbad = lcount; } } #define DEBUGCHECK(ii) if (ii >= SMALLSIZE) { printf("ERROR %s: %d.\n", #ii, ii);} #define DEBUGCHECK4(ii) if (ii >= 4*SMALLSIZE) { printf("ERROR %s: %d.\n", #ii, ii);} #define DEBUGCHECKN(ii, N) if (ii >= N) { printf("ERROR %s: %d.\n", #ii, ii);} #define MAXITR 10 __global__ __launch_bounds__(BLOCKSIZE, TESTNBLOCKSFACTOR) void drefine(FORD *nodex, FORD *nodey, unsigned *tnodes, unsigned *neighbors, DIMSTYPE *neighboredges, bool *isbad, DIMSTYPE *obtuse, bool *isdel, unsigned *pnnodes, unsigned *pntriangles, bool *changed, unsigned starttri, unsigned endtri, unsigned nblocks, unsigned *owner, unsigned *successful, unsigned *aborted, unsigned *blockcount, volatile unsigned *go, unsigned goal, volatile unsigned *arrayin, volatile unsigned *arrayout) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; unsigned nthreads = blockDim.x * nblocks; unsigned wpt = (endtri - starttri + nthreads - 1) / nthreads; //1; unsigned start = starttri + id * wpt; unsigned end = start + wpt; unsigned nnodes = *pnnodes; unsigned ntriangles = *pntriangles; unsigned centerelement = 0, row = 0; DIMSTYPE ceobtuse = 3, dims = 3; FORD centerx = 0.0, centery = 0.0; bool lchanged = false; #define SMALLSIZE 64 unsigned frontier[SMALLSIZE], iifrontier = 0; unsigned pre[SMALLSIZE], iipre = 0; unsigned post[SMALLSIZE], iipost = 0; unsigned connections[4 * SMALLSIZE], iiconnections = 0; // edgesrc, edgedst, triangleone, triangletwo. for (unsigned tt = start; tt < end; ++tt) { if (tt < ntriangles && !isdel[tt] && isbad[tt]) { iifrontier = iipre = iipost = iiconnections = 0; // cavity.initialize(tt); centerelement = tt; ceobtuse = obtuse[centerelement]; unsigned itr = 0; while (ceobtuse < 3 && centerelement < ntriangles && ++itr < MAXITR) { // while it is obtuse. centerelement = getOpposite(centerelement, ceobtuse, neighbors, neighboredges, tnodes, nnodes, ntriangles); if (centerelement < ntriangles) { ceobtuse = obtuse[centerelement]; } } if (centerelement >= ntriangles || isdel[centerelement]) { centerelement = tt; ceobtuse = obtuse[centerelement]; } getCenter(centerelement, centerx, centery, nodex, nodey, tnodes, nnodes, ntriangles); pre[iipre++] = centerelement; frontier[iifrontier++] = centerelement; //DEBUGCHECK(iipre); // cavity.build(); while (iifrontier > 0) { unsigned curr = frontier[--iifrontier]; unsigned row = 3 * curr; DIMSTYPE dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); for (unsigned ii = 0; ii < dims; ++ii) { //expand(curr, neighbors[row + ii]); unsigned next = neighbors[row + ii]; if (next >= ntriangles) { break; } if (isdel[next]) { continue; } unsigned nextrow = 3 * next; unsigned nextdims = (tnodes[nextrow + 2] == INVALIDID ? 2 : 3); if (!(dims == 2 && nextdims == 2 && next != centerelement) && inCircumcircle(centerx, centery, next, nodex, nodey, tnodes, nnodes, ntriangles)) { // isMember says next is part of the cavity, and we're not the second // segment encroaching on this cavity if (nextdims == 2 && dims != 2) { // is segment, and we are encroaching. iifrontier = iipre = iipost = iiconnections = 0; centerelement = next; ceobtuse = obtuse[centerelement]; itr = 0; while (ceobtuse < 3 && centerelement < ntriangles && ++itr < MAXITR) { centerelement = getOpposite(centerelement, ceobtuse, neighbors, neighboredges, tnodes, nnodes, ntriangles); if (centerelement < ntriangles) { ceobtuse = obtuse[centerelement]; } } if (centerelement >= ntriangles || isdel[centerelement]) { centerelement = next; ceobtuse = obtuse[centerelement]; } getCenter(centerelement, centerx, centery, nodex, nodey, tnodes, nnodes, ntriangles); pre[iipre++] = centerelement; frontier[iifrontier++] = centerelement; //DEBUGCHECK(iipre); } else { unsigned jj; for (jj = 0; jj < iipre; ++jj) { if (pre[jj] == next) { break; } } if (jj == iipre) { pre[iipre++] = next; frontier[iifrontier++] = next; } //DEBUGCHECK(iipre); } } else { // not a member // add the common edge between curr and next to connections if doesn't already exist. DIMSTYPE cestart = neighboredges[row + ii]; // see definition of next above. if (cestart >= 3) { continue; } unsigned connpt1 = tnodes[row + cestart]; unsigned connpt2 = tnodes[row + (cestart + 1) % dims]; unsigned jj; for (jj = 0; jj < iiconnections; jj += 4) { if (connections[jj] == connpt1 && connections[jj + 1] == connpt2) { break; } } if (jj == iiconnections) { connections[iiconnections++] = connpt1; connections[iiconnections++] = connpt2; connections[iiconnections++] = curr; connections[iiconnections++] = next; //DEBUGCHECK4(iiconnections); } } } } // mark the triangles in the cavity. for (unsigned ii = 0; ii < iipre; ++ii) { unsigned cavtri = pre[ii]; if (cavtri < endtri && cavtri >= starttri) { owner[cavtri] = id; } } } //__syncthreads(); //__threadfence(); //globalsyncthreads(*blockcount, go); __gpu_sync(++goal, arrayin, arrayout); bool backoff = false; if (tt < ntriangles && !isdel[tt] && isbad[tt]) { // go over your triangles and see if they contain your id. if (!backoff) { for (unsigned ii = 0; ii < iipre; ++ii) { unsigned cavtri = pre[ii]; if (owner[cavtri] < id) { // cavity overlap and the other thread has priority! backoff = true; break; } else if (owner[cavtri] > id) { // cavity overlap but you have the priority. owner[cavtri] = id; // mark it yours: due to this write, we require another checking phase. } } } //__syncthreads(); //__threadfence(); } //globalsyncthreads(*blockcount, go); __gpu_sync(++goal, arrayin, arrayout); if (tt < ntriangles && !isdel[tt] && isbad[tt]) { // once again go over your triangles and see if they contain your id. if (!backoff) { for (unsigned ii = 0; ii < iipre; ++ii) { unsigned cavtri = pre[ii]; if (owner[cavtri] != id) { // cavity overlap. backoff = true; break; } } } //__syncthreads(); if (backoff) { lchanged = true; ++*aborted; continue; } ++*successful; // cavity.update(): create the new cavity based on the data of the old cavity. row = 3 * centerelement; dims = (tnodes[row + 2] == INVALIDID ? 2 : 3); unsigned newpoint = addPoint(centerx, centery, nodex, nodey, pnnodes, nnodes); if (dims == 2) { // we built around a segment. // create two segments (as triangles). unsigned newtriid1 = addTriangle(newpoint, tnodes[row + 0], INVALIDID, tnodes, pntriangles, ntriangles, isdel, obtuse, neighbors, neighboredges); unsigned newtriid2 = addTriangle(newpoint, tnodes[row + 1], INVALIDID, tnodes, pntriangles, ntriangles, isdel, obtuse, neighbors, neighboredges); // update triangles' neighbors: neighbors of the new triangles (segments) are the same as those of the previous segment? copyNeighbors(newtriid1, centerelement, neighbors, neighboredges); copyNeighbors(newtriid2, centerelement, neighbors, neighboredges); post[iipost++] = newtriid1; post[iipost++] = newtriid2; //DEBUGCHECK(iipost); } for (unsigned ii = 0; ii < iiconnections; ii += 4) { unsigned connpt1 = connections[ii + 0]; unsigned connpt2 = connections[ii + 1]; unsigned connsrc = connections[ii + 2]; unsigned conndst = connections[ii + 3]; unsigned newtri = addTriangle(newpoint, connpt1, connpt2, tnodes, pntriangles, ntriangles, isdel, obtuse, neighbors, neighboredges); unsigned jj; for (jj = 0; jj < iipre; ++jj) { if (pre[jj] == conndst) { break; } } unsigned newconn = (jj == iipre ? conndst : connsrc); // newtri and newconn are triangles, and their common edge is (connpt1, connpt2). // thus they are adjacent; their neighbors need to be updated. unsigned newrow = 3 * newtri; unsigned iineighbor = 0; neighbors[newrow + iineighbor] = newconn; neighboredges[newrow + iineighbor] = 1; // since connpt1 is point1 (newpoint is point0). ++iineighbor; for (unsigned jj = 0; jj < iipost; ++jj) { DIMSTYPE commonedgestart = adjacent(post[jj], newtri, tnodes, nnodes, ntriangles); if (commonedgestart < 3) { if (iineighbor < 3) { //DEBUGCHECKN(iineighbor, 3); neighbors[newrow + iineighbor] = post[jj]; neighboredges[newrow + iineighbor] = commonedgestart; ++iineighbor; } updateNeighbor(post[jj], newconn, newtri, neighbors, tnodes); // update neighbor of post[jj] from newconn to newtri, no need to change neighboredges. } } if (iipost < SMALLSIZE) { post[iipost++] = newtri; //DEBUGCHECK(iipost); } } // remove triangles from pre. for (unsigned ii = 0; ii < iipre; ++ii) { unsigned tri = pre[ii]; //DEBUGCHECKN(tri, ntriangles); isdel[tri] = true; } // add triangles from post, mark the bad triangles. // triangles are already added using addTriangle(), simply mark the bad ones. for (unsigned ii = 0; ii < iipost; ++ii) { unsigned tri = post[ii]; //DEBUGCHECKN(tri, 5000000); if (tri < ntriangles) { obtuse[tri] = 3; isbad[tri] = checkbad(tri, nodex, nodey, tnodes, obtuse, ntriangles); lchanged |= isbad[tri]; } } // add neighborhood information for the new triangles: already added using updateNeighbor. } } if (lchanged) { *changed = true; } } int main(int argc, char *argv[]) { unsigned int ntriangles, nnodes, *pnnodes, *pntriangles; bool *changed, hchanged; unsigned *nchanged, hnchanged, *owner; FORD *nodex, *nodey, *hnodex, *hnodey; unsigned *tnodes, *htnodes; unsigned *neighbors; DIMSTYPE *neighboredges; bool *isbad, *isdel; DIMSTYPE *obtuse; int iteration = 0; unsigned hsuccessful = 0, haborted = 0, *successful, *aborted; unsigned *blockcount, intzero = 0; volatile unsigned *go; volatile unsigned *arrayin, *arrayout; unsigned *bcount, *nbad, hnbad; KernelConfig kconf; double starttime, endtime; int runtime; std::string str; //cudaDeviceProp deviceProp; //cudaGetDeviceProperties(&deviceProp, 0); //NBLOCKS = deviceProp.multiProcessorCount; cudaFuncSetCacheConfig(drefine, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(countbad, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(dfindneighbors, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(dverify, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(dinit, cudaFuncCachePreferL1); if (argc != 2) { printf("Usage: %s <basefilename>\n", argv[0]); exit(1); } cudaGetLastError(); std::cout << "reading graphs...\n"; readNodes(argv[1], hnodex, hnodey, nnodes); std::cout << "\t" << nnodes << " nodes\n"; readTriangles(argv[1], htnodes, ntriangles, nnodes); std::cout << "\t" << ntriangles << " triangles.\n"; kconf.setProblemSize(ntriangles); kconf.setNumberOfBlockThreads(256); //FACTOR = (ntriangles + BLOCKSIZE * NBLOCKS - 1) / (BLOCKSIZE * NBLOCKS); printf("optimizing.\n"); optimizeone(ntriangles); unsigned curralloc = ALLOCFACTOR * ntriangles, currsizenodes = ALLOCFACTOR * nnodes; if (cudaMalloc((void **)&nodex, ALLOCFACTOR * nnodes * sizeof(FORD)) != cudaSuccess) CudaTest("allocating nodex failed"); if (cudaMalloc((void **)&nodey, ALLOCFACTOR * nnodes * sizeof(FORD)) != cudaSuccess) CudaTest("allocating nodey failed"); if (cudaMalloc((void **)&tnodes, ALLOCFACTOR * 3 * ntriangles * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating tnodes failed"); cudaMemcpy(nodex, hnodex, nnodes * sizeof(FORD), cudaMemcpyHostToDevice); cudaMemcpy(nodey, hnodey, nnodes * sizeof(FORD), cudaMemcpyHostToDevice); cudaMemcpy(tnodes, htnodes, 3 * ntriangles * sizeof(unsigned), cudaMemcpyHostToDevice); if (cudaMalloc((void **)&neighbors, ALLOCFACTOR * 3 * ntriangles * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating neighbors failed"); if (cudaMalloc((void **)&neighboredges, ALLOCFACTOR * 3 * ntriangles * sizeof(DIMSTYPE)) != cudaSuccess) CudaTest("allocating neighboredges failed"); //printf("finding neighboring triangles.\n"); //unsigned nblocks = NBLOCKS * FACTOR; unsigned ntriperit = kconf.getNumberOfSMs() * kconf.getNumberOfBlockThreads(); unsigned ntriit = kconf.getProblemSize() / ntriperit; //unsigned ntriit = FACTOR; //unsigned ntriperit = NBLOCKS * BLOCKSIZE; starttime = rtclock(); for (unsigned ii = 0; ii < ntriit; ++ii) { printf("finding neighbors: %3d%% complete.\r", (int)(ii*ntriperit*100.0 / ntriangles)); //printf("finding neighbors: iteration=%d, start=%d, end=%d.\n", ii, ii * ntriperit, (ii + 1) * ntriperit); //dfindneighbors<<<NBLOCKS, BLOCKSIZE>>> (nodex, nodey, tnodes, neighbors, neighboredges, nnodes, ntriangles, NBLOCKS, 0, ntriangles); dfindneighbors<<<kconf.getNumberOfSMs(), kconf.getNumberOfBlockThreads()>>> (nodex, nodey, tnodes, neighbors, neighboredges, nnodes, ntriangles, kconf.getNumberOfSMs(), ii * ntriperit, (ii + 1) * ntriperit); CudaTest("find neighbors failed"); } endtime = rtclock(); printf("\n"); printf("findneighbors took %u ms.\n", (int)(1000.0f * (endtime - starttime))); if (cudaMalloc((void **)&isbad, ALLOCFACTOR * ntriangles * sizeof(bool)) != cudaSuccess) CudaTest("allocating isbad failed"); if (cudaMalloc((void **)&obtuse, ALLOCFACTOR * ntriangles * sizeof(DIMSTYPE)) != cudaSuccess) CudaTest("allocating obtuse failed"); if (cudaMalloc((void **)&isdel, ALLOCFACTOR * ntriangles * sizeof(bool)) != cudaSuccess) CudaTest("allocating isdel failed"); if (cudaMalloc((void **)&owner, ALLOCFACTOR * ntriangles * sizeof(unsigned)) != cudaSuccess) CudaTest("allocating owner failed"); printf("init.\n"); dinit <<<kconf.getNumberOfBlocks(), kconf.getNumberOfBlockThreads()>>> (nodex, nodey, tnodes, isbad, obtuse, isdel, nnodes, ntriangles); CudaTest("initialization failed"); /*bool *hisbad = (bool *)malloc(ntriangles * sizeof(bool)); cudaMemcpy(hisbad, isbad, ntriangles * sizeof(bool), cudaMemcpyDeviceToHost); unsigned nbad = 0; for (unsigned ii = 0; ii < ntriangles; ++ii) { if (hisbad[ii]) nbad++; } std::cout << nbad << " bad triangles.\n";*/ if (cudaMalloc((void **)&changed, sizeof(bool)) != cudaSuccess) CudaTest("allocating changed failed"); if (cudaMalloc((void **)&nchanged, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nchanged failed"); if (cudaMalloc((void **)&pnnodes, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating pnnodes failed"); if (cudaMalloc((void **)&pntriangles, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating pntriangles failed"); cudaMemcpy(pnnodes, &nnodes, sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(pntriangles, &ntriangles, sizeof(unsigned), cudaMemcpyHostToDevice); if (cudaMalloc((void **)&successful, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating successful failed"); if (cudaMalloc((void **)&aborted, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating aborted failed"); if (cudaMalloc((void **)&blockcount, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating blockcount failed"); if (cudaMalloc((void **)&go, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating go failed"); cudaMemcpy(blockcount, &intzero, sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy((void *)go, &intzero, sizeof(unsigned), cudaMemcpyHostToDevice); unsigned nblockfactor = TESTNBLOCKSFACTOR; //(ntriangles < 1000000 ? 7 : (ntriangles < 10000000 ? 31 : 61)); // for 250k.2, use 7, for r1M use 31, for r5M use 61. unsigned nblocks = kconf.getNumberOfSMs() * nblockfactor; unsigned blocksize = kconf.getNumberOfBlockThreads(); bool hlchanged; if (cudaMalloc((void **)&arrayin, nblocks*sizeof(volatile unsigned)) != cudaSuccess) CudaTest("allocating arrayin failed"); if (cudaMalloc((void **)&arrayout, nblocks*sizeof(volatile unsigned)) != cudaSuccess) CudaTest("allocating arrayout failed"); if (cudaMalloc((void **)&bcount, nblocks*sizeof(unsigned)) != cudaSuccess) CudaTest("allocating blockcount failed"); if (cudaMalloc((void **)&nbad, sizeof(unsigned)) != cudaSuccess) CudaTest("allocating nbad failed"); curralloc = ALLOCFACTOR * ntriangles; printf("solving.\n"); starttime = rtclock(); do { ++iteration; //printf("iteration %d: ntriangles=%d, nnodes=%d.\n", iteration, ntriangles, nnodes); unsigned orintriangles = ntriangles; hchanged = false; cudaMemcpy(changed, &hchanged, sizeof(bool), cudaMemcpyHostToDevice); hsuccessful = haborted = 0; cudaMemcpy(successful, &hsuccessful, sizeof(unsigned), cudaMemcpyHostToDevice); cudaMemcpy(aborted, &haborted, sizeof(unsigned), cudaMemcpyHostToDevice); countbad <<<nblocks, blocksize>>> (isbad, ntriangles, nbad, 1000 + iteration, arrayin, arrayout, bcount); CudaTest("countbad failed"); cudaMemcpy(&hnbad, nbad, sizeof(unsigned), cudaMemcpyDeviceToHost); //printf("iteration %d: nbad = %d.\n", iteration, hnbad); if (ntriangles + 2 * hnbad > curralloc) { // here 2 is the no of new triangles added for each bad triangle. nodex = (FORD *)mycudarealloc(nodex, currsizenodes*sizeof(FORD), ALLOCMULTIPLE*currsizenodes*sizeof(FORD)); nodey = (FORD *)mycudarealloc(nodey, currsizenodes*sizeof(FORD), ALLOCMULTIPLE*currsizenodes*sizeof(FORD)); currsizenodes = ALLOCMULTIPLE*currsizenodes; tnodes = (unsigned *)mycudarealloc(tnodes, 3*curralloc*sizeof(unsigned), ALLOCMULTIPLE*3*curralloc*sizeof(unsigned)); neighbors = (unsigned *)mycudarealloc(neighbors, 3*curralloc*sizeof(unsigned), ALLOCMULTIPLE*3*curralloc*sizeof(unsigned)); neighboredges = (unsigned *)mycudarealloc(neighboredges, 3*curralloc*sizeof(DIMSTYPE), ALLOCMULTIPLE*3*curralloc*sizeof(DIMSTYPE)); isbad = (bool *)mycudarealloc(isbad, curralloc*sizeof(bool), ALLOCMULTIPLE*curralloc*sizeof(bool)); obtuse = (DIMSTYPE *)mycudarealloc(obtuse, curralloc*sizeof(DIMSTYPE), ALLOCMULTIPLE*curralloc*sizeof(DIMSTYPE)); isdel = (bool *)mycudarealloc(isdel, curralloc*sizeof(bool), ALLOCMULTIPLE*curralloc*sizeof(bool)); owner = (unsigned *)mycudarealloc(owner, curralloc*sizeof(unsigned), ALLOCMULTIPLE*curralloc*sizeof(unsigned)); curralloc *= ALLOCMULTIPLE; printf("\t\tallocating memory to %d.\n", curralloc); } ntriperit = ntriangles; ntriit = (ntriangles + ntriperit - 1) / ntriperit; //1; //FACTOR; for (unsigned ii = 0; ii < ntriit; ++ii) { //printf("solving: inner iteration=%d, ntriangles=%d, nnodes=%d.\n", ii, ntriangles, nnodes); drefine <<<nblocks, blocksize>>> (nodex, nodey, tnodes, neighbors, neighboredges, isbad, obtuse, isdel, pnnodes, pntriangles, changed, ii * ntriperit, (ii + 1)*ntriperit, nblocks, owner, successful, aborted, blockcount, go, iteration, arrayin, arrayout); CudaTest("solving failed"); cudaMemcpy(&nnodes, pnnodes, sizeof(unsigned), cudaMemcpyDeviceToHost); cudaMemcpy(&ntriangles, pntriangles, sizeof(unsigned), cudaMemcpyDeviceToHost); cudaMemcpy(&hsuccessful, successful, sizeof(unsigned), cudaMemcpyDeviceToHost); cudaMemcpy(&haborted, aborted, sizeof(unsigned), cudaMemcpyDeviceToHost); //printf("\tsuccessful=%d, aborted=%d.\n", hsuccessful, haborted); cudaMemcpy(&hlchanged, changed, sizeof(bool), cudaMemcpyDeviceToHost); hchanged |= hlchanged; } if (hchanged && orintriangles == ntriangles) { nblocks = blocksize = 1; } else { nblocks = kconf.getNumberOfSMs() * nblockfactor; blocksize = kconf.getNumberOfBlockThreads(); } } while (hchanged); endtime = rtclock(); printf("verifying...\n"); hnchanged = 0; cudaMemcpy(nchanged, &hnchanged, sizeof(unsigned), cudaMemcpyHostToDevice); hchanged = false; cudaMemcpy(changed, &hchanged, sizeof(bool), cudaMemcpyHostToDevice); dverify <<<kconf.getNumberOfBlocks(), kconf.getNumberOfBlockThreads()>>> (nodex, nodey, tnodes, isbad, isdel, nnodes, ntriangles, changed, nchanged); CudaTest("verification failed"); cudaMemcpy(&hchanged, changed, sizeof(bool), cudaMemcpyDeviceToHost); cudaMemcpy(&hnchanged, nchanged, sizeof(unsigned), cudaMemcpyDeviceToHost); if (hchanged) { printf("verification failed: bad triangles exist: %d.\n", hnchanged); } else { printf("verification succeeded: 0 bad triangles exist.\n"); } printf("iterations = %d.\n", iteration); runtime = (int) (1000.0f * (endtime - starttime)); printf("%d ms.\n", runtime); // cleanup left to the OS. return 0; }
108fa0699af63ba82a9fc55ccacde7e1e7765c7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gradient.h" __global__ void horizontalGradientKernel(float* image, float* grad, int xRes, int yRes) { int u = (blockIdx.x * blockDim.x) + threadIdx.x; int v = (blockIdx.y * blockDim.y) + threadIdx.y; int i = v * xRes + u; if(u < xRes && v < yRes){ float diff = 0.0f; if(u < xRes - 1 && u > 0) { //Diff to right diff = image[i+1] - image[i-1]; } grad[i] = diff;//isnan(diff)?0.0f:diff; } } __global__ void verticalGradientKernel(float* image, float* grad, int xRes, int yRes) { int u = (blockIdx.x * blockDim.x) + threadIdx.x; int v = (blockIdx.y * blockDim.y) + threadIdx.y; int i = v * xRes + u; if(u < xRes && v < yRes){ float diff = 0.0f; if(v < yRes - 1 && v > 0) { //Diff to right diff = image[i+xRes] - image[i-xRes]; } grad[i] = diff;//isnan(diff)?0.0f:diff; } } __host__ void horizontalGradient(float* image_in, float* gradient_out, int width, int height) { int tileX = 16; int tileY = 16; dim3 threads(tileX,tileY); dim3 blocks((int)ceil(float(width)/float(tileX)), (int)ceil(float(height)/float(tileY))); hipLaunchKernelGGL(( horizontalGradientKernel), dim3(blocks),dim3(threads), 0, 0, image_in, gradient_out, width, height); } __host__ void verticalGradient(float* image_in, float* gradient_out, int width, int height) { int tileX = 16; int tileY = 16; dim3 threads(tileX,tileY); dim3 blocks((int)ceil(float(width)/float(tileX)), (int)ceil(float(height)/float(tileY))); hipLaunchKernelGGL(( verticalGradientKernel), dim3(blocks),dim3(threads), 0, 0, image_in, gradient_out, width, height); }
108fa0699af63ba82a9fc55ccacde7e1e7765c7e.cu
#include "gradient.h" __global__ void horizontalGradientKernel(float* image, float* grad, int xRes, int yRes) { int u = (blockIdx.x * blockDim.x) + threadIdx.x; int v = (blockIdx.y * blockDim.y) + threadIdx.y; int i = v * xRes + u; if(u < xRes && v < yRes){ float diff = 0.0f; if(u < xRes - 1 && u > 0) { //Diff to right diff = image[i+1] - image[i-1]; } grad[i] = diff;//isnan(diff)?0.0f:diff; } } __global__ void verticalGradientKernel(float* image, float* grad, int xRes, int yRes) { int u = (blockIdx.x * blockDim.x) + threadIdx.x; int v = (blockIdx.y * blockDim.y) + threadIdx.y; int i = v * xRes + u; if(u < xRes && v < yRes){ float diff = 0.0f; if(v < yRes - 1 && v > 0) { //Diff to right diff = image[i+xRes] - image[i-xRes]; } grad[i] = diff;//isnan(diff)?0.0f:diff; } } __host__ void horizontalGradient(float* image_in, float* gradient_out, int width, int height) { int tileX = 16; int tileY = 16; dim3 threads(tileX,tileY); dim3 blocks((int)ceil(float(width)/float(tileX)), (int)ceil(float(height)/float(tileY))); horizontalGradientKernel<<<blocks,threads>>>(image_in, gradient_out, width, height); } __host__ void verticalGradient(float* image_in, float* gradient_out, int width, int height) { int tileX = 16; int tileY = 16; dim3 threads(tileX,tileY); dim3 blocks((int)ceil(float(width)/float(tileX)), (int)ceil(float(height)/float(tileY))); verticalGradientKernel<<<blocks,threads>>>(image_in, gradient_out, width, height); }
151d06c9b08167075f14179e50c6998a5810ac76.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "AssembleArrayOfNoticedChannels.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int nmbrOfChnnls = 1; const float lwrNtcdEnrg = 1; const float hghrNtcdEnrg = 1; const float *lwrChnnlBndrs = NULL; hipMalloc(&lwrChnnlBndrs, XSIZE*YSIZE); const float *hghrChnnlBndrs = NULL; hipMalloc(&hghrChnnlBndrs, XSIZE*YSIZE); const float *gdQltChnnls = NULL; hipMalloc(&gdQltChnnls, XSIZE*YSIZE); float *ntcdChnnls = NULL; hipMalloc(&ntcdChnnls, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( AssembleArrayOfNoticedChannels), dim3(gridBlock),dim3(threadBlock), 0, 0, nmbrOfChnnls,lwrNtcdEnrg,hghrNtcdEnrg,lwrChnnlBndrs,hghrChnnlBndrs,gdQltChnnls,ntcdChnnls); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( AssembleArrayOfNoticedChannels), dim3(gridBlock),dim3(threadBlock), 0, 0, nmbrOfChnnls,lwrNtcdEnrg,hghrNtcdEnrg,lwrChnnlBndrs,hghrChnnlBndrs,gdQltChnnls,ntcdChnnls); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( AssembleArrayOfNoticedChannels), dim3(gridBlock),dim3(threadBlock), 0, 0, nmbrOfChnnls,lwrNtcdEnrg,hghrNtcdEnrg,lwrChnnlBndrs,hghrChnnlBndrs,gdQltChnnls,ntcdChnnls); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
151d06c9b08167075f14179e50c6998a5810ac76.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "AssembleArrayOfNoticedChannels.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int nmbrOfChnnls = 1; const float lwrNtcdEnrg = 1; const float hghrNtcdEnrg = 1; const float *lwrChnnlBndrs = NULL; cudaMalloc(&lwrChnnlBndrs, XSIZE*YSIZE); const float *hghrChnnlBndrs = NULL; cudaMalloc(&hghrChnnlBndrs, XSIZE*YSIZE); const float *gdQltChnnls = NULL; cudaMalloc(&gdQltChnnls, XSIZE*YSIZE); float *ntcdChnnls = NULL; cudaMalloc(&ntcdChnnls, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); AssembleArrayOfNoticedChannels<<<gridBlock,threadBlock>>>(nmbrOfChnnls,lwrNtcdEnrg,hghrNtcdEnrg,lwrChnnlBndrs,hghrChnnlBndrs,gdQltChnnls,ntcdChnnls); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { AssembleArrayOfNoticedChannels<<<gridBlock,threadBlock>>>(nmbrOfChnnls,lwrNtcdEnrg,hghrNtcdEnrg,lwrChnnlBndrs,hghrChnnlBndrs,gdQltChnnls,ntcdChnnls); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { AssembleArrayOfNoticedChannels<<<gridBlock,threadBlock>>>(nmbrOfChnnls,lwrNtcdEnrg,hghrNtcdEnrg,lwrChnnlBndrs,hghrChnnlBndrs,gdQltChnnls,ntcdChnnls); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
711713b7c1eebfcf57baa35ff993f21ded3ceb97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; #define i_size 6//image size #define o_size 6 #define k_size 3//kernel size int input[i_size][i_size]; int kernel[k_size][k_size]; int output[i_size][i_size]; typedef int itype[i_size]; typedef int ktype[k_size]; void fill_image(int m[i_size][i_size]) { static int n = 0; int i, j; for (i = 0; i < i_size; i++) for (j = 0; j < i_size; j++) m[i][j] = n++; } void fill_kernel(int m[k_size][k_size]) { static int n = 0; int i, j; for (i = 0; i < k_size; i++) for (j = 0; j < k_size; j++) m[i][j] = n++; } void fill_output(int m[i_size][i_size]) { int i, j; for (i = 0; i < o_size; i++) { cout << "\n \t\t |"; for (j = 0; j < o_size; j++) cout << "\t\t" << m[i][j]; cout << "|"; } } __global__ void add_arrays_gpu(int* a, int *b, int* c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } __global__ void processing(itype *a,ktype *kernel, itype *o) { int r = 0; int i = (blockIdx.y*blockDim.y + threadIdx.y)+1; int j = (blockIdx.x*blockDim.x + threadIdx.x)+1; for (int k = -1; k < 2; k++) { for (int m = -1; m < 2; m++) { r += a[i + k][j + m] * kernel[k + 1][m + 1]; o[i][j] = r; } } } int main() {fill_image(input); fill_kernel(kernel); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); itype *device_a, *device_c; ktype *device_b; const int size = i_size*i_size * sizeof(int); size_t pitch; hipMallocPitch((void**)&device_a, &pitch, i_size * sizeof(float), i_size); hipMallocPitch((void**)&device_b, &pitch, k_size * sizeof(float), k_size); hipMallocPitch((void**)&device_c, &pitch, i_size * sizeof(float), i_size); dim3 blockspergrid(2,2,1); dim3 threadperblock(2,2,1); hipMemcpy( device_a, input, size, hipMemcpyHostToDevice ); hipMemcpy( device_b, kernel, size, hipMemcpyHostToDevice ); int r = 0; //add_arra<< <1, count >> > (device_a, device_b, device_c); hipEventRecord(start); //processing <<<blockspergrid, threadperblock >> > (device_a,device_b,device_c); for (int i = 1; i < i_size - 1; i++) { for (int j = 1; j < i_size - 1; j++) for (int k = -1; k < 2; k++) { for (int m = -1; m < 2; m++) { r += input[i + k][j + m] * kernel[k + 1][m + 1]; output[i][j] = r; } } } hipEventRecord(stop); /*hipMemcpy( output, device_c, size, hipMemcpyDeviceToHost );*/ hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << "\n total time to execute operation is " << milliseconds << "\n"; fill_output(output); //getchar(); return 0; }
711713b7c1eebfcf57baa35ff993f21ded3ceb97.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; #define i_size 6//image size #define o_size 6 #define k_size 3//kernel size int input[i_size][i_size]; int kernel[k_size][k_size]; int output[i_size][i_size]; typedef int itype[i_size]; typedef int ktype[k_size]; void fill_image(int m[i_size][i_size]) { static int n = 0; int i, j; for (i = 0; i < i_size; i++) for (j = 0; j < i_size; j++) m[i][j] = n++; } void fill_kernel(int m[k_size][k_size]) { static int n = 0; int i, j; for (i = 0; i < k_size; i++) for (j = 0; j < k_size; j++) m[i][j] = n++; } void fill_output(int m[i_size][i_size]) { int i, j; for (i = 0; i < o_size; i++) { cout << "\n \t\t |"; for (j = 0; j < o_size; j++) cout << "\t\t" << m[i][j]; cout << "|"; } } __global__ void add_arrays_gpu(int* a, int *b, int* c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } __global__ void processing(itype *a,ktype *kernel, itype *o) { int r = 0; int i = (blockIdx.y*blockDim.y + threadIdx.y)+1; int j = (blockIdx.x*blockDim.x + threadIdx.x)+1; for (int k = -1; k < 2; k++) { for (int m = -1; m < 2; m++) { r += a[i + k][j + m] * kernel[k + 1][m + 1]; o[i][j] = r; } } } int main() {fill_image(input); fill_kernel(kernel); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); itype *device_a, *device_c; ktype *device_b; const int size = i_size*i_size * sizeof(int); size_t pitch; cudaMallocPitch((void**)&device_a, &pitch, i_size * sizeof(float), i_size); cudaMallocPitch((void**)&device_b, &pitch, k_size * sizeof(float), k_size); cudaMallocPitch((void**)&device_c, &pitch, i_size * sizeof(float), i_size); dim3 blockspergrid(2,2,1); dim3 threadperblock(2,2,1); cudaMemcpy( device_a, input, size, cudaMemcpyHostToDevice ); cudaMemcpy( device_b, kernel, size, cudaMemcpyHostToDevice ); int r = 0; //add_arra<< <1, count >> > (device_a, device_b, device_c); cudaEventRecord(start); //processing <<<blockspergrid, threadperblock >> > (device_a,device_b,device_c); for (int i = 1; i < i_size - 1; i++) { for (int j = 1; j < i_size - 1; j++) for (int k = -1; k < 2; k++) { for (int m = -1; m < 2; m++) { r += input[i + k][j + m] * kernel[k + 1][m + 1]; output[i][j] = r; } } } cudaEventRecord(stop); /*cudaMemcpy( output, device_c, size, cudaMemcpyDeviceToHost );*/ cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << "\n total time to execute operation is " << milliseconds << "\n"; fill_output(output); //getchar(); return 0; }
2e7928cca0cd9f36ecbac19c16a1f602ff4d6ee3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : nanoBraggCUDA.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <stdio.h> #include "nanotypes.h" #include "cuda_compatibility.h" static void CheckCudaErrorAux(const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #ifndef CUDAREAL #define CUDAREAL float #endif #define THREADS_PER_BLOCK_X 128 #define THREADS_PER_BLOCK_Y 1 #define THREADS_PER_BLOCK_TOTAL (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y) #define VECTOR_SIZE 4 struct hklParams { int hkls; int h_min; int h_max; int h_range; int k_min; int k_max; int k_range; int l_min; int l_max; int l_range; }; /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement << " returned " << hipGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; exit(1); } static hipError_t cudaMemcpyVectorDoubleToDevice(CUDAREAL *dst, double *src, size_t vector_items) { CUDAREAL * temp = new CUDAREAL[vector_items]; for (size_t i = 0; i < vector_items; i++) { temp[i] = src[i]; } hipError_t ret = hipMemcpy(dst, temp, sizeof(*dst) * vector_items, hipMemcpyHostToDevice); delete temp; return ret; } /* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */ double cpu_unitize(double *vector, double *new_unit_vector); double cpu_unitize(double * vector, double * new_unit_vector) { double v1 = vector[1]; double v2 = vector[2]; double v3 = vector[3]; double mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3); if (mag != 0.0) { /* normalize it */ new_unit_vector[0] = mag; new_unit_vector[1] = v1 / mag; new_unit_vector[2] = v2 / mag; new_unit_vector[3] = v3 / mag; } else { /* can't normalize, report zero vector */ new_unit_vector[0] = 0.0; new_unit_vector[1] = 0.0; new_unit_vector[2] = 0.0; new_unit_vector[3] = 0.0; } return mag; } __global__ void nanoBraggSpotsInitCUDAKernel(int spixels, int fpixesl, float * floatimage, float * omega_reduction, float * max_I_x_reduction, float * max_I_y_reduction, bool * rangemap); __global__ void nanoBraggSpotsCUDAKernel(int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, CUDAREAL pixel_size, CUDAREAL subpixel_size, int steps, CUDAREAL detector_thickstep, int detector_thicksteps, CUDAREAL detector_thick, CUDAREAL detector_mu, const CUDAREAL * __restrict__ sdet_vector, const CUDAREAL * __restrict__ fdet_vector, const CUDAREAL * __restrict__ odet_vector, const CUDAREAL * __restrict__ pix0_vector, int curved_detector, CUDAREAL distance, CUDAREAL close_distance, const CUDAREAL * __restrict__ beam_vector, CUDAREAL Xbeam, CUDAREAL Ybeam, CUDAREAL dmin, CUDAREAL phi0, CUDAREAL phistep, int phisteps, const CUDAREAL * __restrict__ spindle_vector, int sources, const CUDAREAL * __restrict__ source_X, const CUDAREAL * __restrict__ source_Y, const CUDAREAL * __restrict__ source_Z, const CUDAREAL * __restrict__ source_I, const CUDAREAL * __restrict__ source_lambda, const CUDAREAL * __restrict__ a0, const CUDAREAL * __restrict__ b0, const CUDAREAL * __restrict c0, shapetype xtal_shape, CUDAREAL mosaic_spread, int mosaic_domains, const CUDAREAL * __restrict__ mosaic_umats, CUDAREAL Na, CUDAREAL Nb, CUDAREAL Nc, CUDAREAL V_cell, CUDAREAL water_size, CUDAREAL water_F, CUDAREAL water_MW, CUDAREAL r_e_sqr, CUDAREAL fluence, CUDAREAL Avogadro, CUDAREAL spot_scale, int integral_form, CUDAREAL default_F, int interpolate, const CUDAREAL * __restrict__ Fhkl, const hklParams * __restrict__ Fhklparams, int nopolar, const CUDAREAL * __restrict__ polar_vector, CUDAREAL polarization, CUDAREAL fudge, const int unsigned short * __restrict__ maskimage, float * floatimage /*out*/, float * omega_reduction/*out*/, float * max_I_x_reduction/*out*/, float * max_I_y_reduction /*out*/, bool * rangemap); extern "C" void nanoBraggSpotsCUDA(int deviceId, int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, double pixel_size, double subpixel_size, int steps, double detector_thickstep, int detector_thicksteps, double detector_thick, double detector_mu, double sdet_vector[4], double fdet_vector[4], double odet_vector[4], double pix0_vector[4], int curved_detector, double distance, double close_distance, double beam_vector[4], double Xbeam, double Ybeam, double dmin, double phi0, double phistep, int phisteps, double spindle_vector[4], int sources, double *source_X, double *source_Y, double * source_Z, double * source_I, double * source_lambda, double a0[4], double b0[4], double c0[4], shapetype xtal_shape, double mosaic_spread, int mosaic_domains, double * mosaic_umats, double Na, double Nb, double Nc, double V_cell, double water_size, double water_F, double water_MW, double r_e_sqr, double fluence, double Avogadro, int integral_form, double default_F, int interpolate, double *** Fhkl, int h_min, int h_max, int h_range, int k_min, int k_max, int k_range, int l_min, int l_max, int l_range, int hkls, int nopolar, double polar_vector[4], double polarization, double fudge, int unsigned short * maskimage, float * floatimage /*out*/, double * omega_sum/*out*/, int * sumn /*out*/, double * sum /*out*/, double * sumsqr /*out*/, double * max_I/*out*/, double * max_I_x/*out*/, double * max_I_y /*out*/, double spot_scale) { int total_pixels = spixels * fpixels; hipSetDevice(deviceId); /*allocate and zero reductions */ bool * rangemap = (bool*) calloc(total_pixels, sizeof(bool)); float * omega_reduction = (float*) calloc(total_pixels, sizeof(float)); float * max_I_x_reduction = (float*) calloc(total_pixels, sizeof(float)); float * max_I_y_reduction = (float*) calloc(total_pixels, sizeof(float)); /* clear memory (TODO: consider this being optional) */ memset(floatimage, 0, sizeof(typeof(*floatimage)) * total_pixels); /*create transfer arguments to device space*/ int cu_spixels = spixels, cu_fpixels = fpixels; int cu_roi_xmin = roi_xmin, cu_roi_xmax = roi_xmax, cu_roi_ymin = roi_ymin, cu_roi_ymax = roi_ymax; int cu_oversample = oversample; int cu_point_pixel = point_pixel; CUDAREAL cu_pixel_size = pixel_size, cu_subpixel_size = subpixel_size; int cu_steps = steps; CUDAREAL cu_detector_thickstep = detector_thickstep, cu_detector_thick = detector_thick, cu_detector_mu = detector_mu; int cu_detector_thicksteps = detector_thicksteps; int cu_curved_detector = curved_detector; CUDAREAL cu_distance = distance, cu_close_distance = close_distance; CUDAREAL cu_Xbeam = Xbeam, cu_Ybeam = Ybeam; CUDAREAL cu_dmin = dmin, cu_phi0 = phi0, cu_phistep = phistep; int cu_phisteps = phisteps; shapetype cu_xtal_shape = xtal_shape; int cu_sources = sources; CUDAREAL cu_mosaic_spread = mosaic_spread; int cu_mosaic_domains = mosaic_domains; CUDAREAL cu_Na = Na, cu_Nb = Nb, cu_Nc = Nc, cu_V_cell = V_cell, cu_water_size = water_size, cu_water_F = water_F, cu_water_MW = water_MW; CUDAREAL cu_r_e_sqr = r_e_sqr, cu_fluence = fluence, cu_Avogadro = Avogadro, cu_spot_scale = spot_scale; int cu_integral_form = integral_form; CUDAREAL cu_default_F = default_F; int cu_interpolate = interpolate; // int cu_h_min = h_min, cu_h_max = h_max, cu_h_range = h_range; // int cu_k_min = k_min, cu_k_max = k_max, cu_k_range = k_range; // int cu_l_min = l_min, cu_l_max = l_max, cu_l_range = l_range; // int cu_hkls = hkls; int cu_nopolar = nopolar; CUDAREAL cu_polarization = polarization, cu_fudge = fudge; hklParams FhklParams = { hkls, h_min, h_max, h_range, k_min, k_max, k_range, l_min, l_max, l_range }; hklParams * cu_FhklParams; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_FhklParams, sizeof(*cu_FhklParams))); CUDA_CHECK_RETURN(hipMemcpy(cu_FhklParams, &FhklParams, sizeof(*cu_FhklParams), hipMemcpyHostToDevice)); const int vector_length = 4; CUDAREAL * cu_sdet_vector; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_sdet_vector, sizeof(*cu_sdet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_sdet_vector, sdet_vector, vector_length)); CUDAREAL * cu_fdet_vector; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_fdet_vector, sizeof(*cu_fdet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_fdet_vector, fdet_vector, vector_length)); CUDAREAL * cu_odet_vector; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_odet_vector, sizeof(*cu_odet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_odet_vector, odet_vector, vector_length)); CUDAREAL * cu_pix0_vector; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_pix0_vector, sizeof(*cu_pix0_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_pix0_vector, pix0_vector, vector_length)); CUDAREAL * cu_beam_vector; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_beam_vector, sizeof(*cu_beam_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_beam_vector, beam_vector, vector_length)); CUDAREAL * cu_spindle_vector; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_spindle_vector, sizeof(*cu_spindle_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_spindle_vector, spindle_vector, vector_length)); CUDAREAL * cu_a0; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_a0, sizeof(*cu_a0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_a0, a0, vector_length)); CUDAREAL * cu_b0; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_b0, sizeof(*cu_b0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_b0, b0, vector_length)); CUDAREAL * cu_c0; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_c0, sizeof(*cu_c0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_c0, c0, vector_length)); // Unitize polar vector before sending it to the GPU. Optimization do it only once here rather than multiple time per pixel in the GPU. CUDAREAL * cu_polar_vector; double polar_vector_unitized[4]; cpu_unitize(polar_vector, polar_vector_unitized); CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_polar_vector, sizeof(*cu_polar_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_polar_vector, polar_vector_unitized, vector_length)); CUDAREAL * cu_source_X = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_source_X, sizeof(*cu_source_X) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_X, source_X, sources)); CUDAREAL * cu_source_Y = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_source_Y, sizeof(*cu_source_Y) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_Y, source_Y, sources)); CUDAREAL * cu_source_Z = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_source_Z, sizeof(*cu_source_Z) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_Z, source_Z, sources)); CUDAREAL * cu_source_I = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_source_I, sizeof(*cu_source_I) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_I, source_I, sources)); CUDAREAL * cu_source_lambda = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_source_lambda, sizeof(*cu_source_lambda) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, source_lambda, sources)); CUDAREAL * cu_mosaic_umats = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_mosaic_umats, sizeof(*cu_mosaic_umats) * mosaic_domains * 9)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_mosaic_umats, mosaic_umats, mosaic_domains * 9)); float * cu_floatimage = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_floatimage, sizeof(*cu_floatimage) * total_pixels)); CUDA_CHECK_RETURN(hipMemcpy(cu_floatimage, floatimage, sizeof(*cu_floatimage) * total_pixels, hipMemcpyHostToDevice)); float * cu_omega_reduction = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_omega_reduction, sizeof(*cu_omega_reduction) * total_pixels)); CUDA_CHECK_RETURN(hipMemcpy(cu_omega_reduction, omega_reduction, sizeof(*cu_omega_reduction) * total_pixels, hipMemcpyHostToDevice)); float * cu_max_I_x_reduction = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels)); CUDA_CHECK_RETURN(hipMemcpy(cu_max_I_x_reduction, max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels, hipMemcpyHostToDevice)); float * cu_max_I_y_reduction = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels)); CUDA_CHECK_RETURN(hipMemcpy(cu_max_I_y_reduction, max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels, hipMemcpyHostToDevice)); bool * cu_rangemap = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_rangemap, sizeof(*cu_rangemap) * total_pixels)); CUDA_CHECK_RETURN(hipMemcpy(cu_rangemap, rangemap, sizeof(*cu_rangemap) * total_pixels, hipMemcpyHostToDevice)); int unsigned short * cu_maskimage = NULL; if (maskimage != NULL) { CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_maskimage, sizeof(*cu_maskimage) * total_pixels)); CUDA_CHECK_RETURN(hipMemcpy(cu_maskimage, maskimage, sizeof(*cu_maskimage) * total_pixels, hipMemcpyHostToDevice)); } int hklsize = h_range * k_range * l_range; CUDAREAL * FhklLinear = (CUDAREAL*) calloc(hklsize, sizeof(*FhklLinear)); for (int h = 0; h < h_range; h++) { for (int k = 0; k < k_range; k++) { // memcpy(FhklLinear + (h * k_range * l_range + k * l_range), Fhkl[h][k], sizeof(*FhklLinear) * l_range); for (int l = 0; l < l_range; l++) { // convert Fhkl double to CUDAREAL FhklLinear[h * k_range * l_range + k * l_range + l] = Fhkl[h][k][l]; } } } CUDAREAL * cu_Fhkl = NULL; CUDA_CHECK_RETURN(hipMalloc((void ** )&cu_Fhkl, sizeof(*cu_Fhkl) * hklsize)); CUDA_CHECK_RETURN(hipMemcpy(cu_Fhkl, FhklLinear, sizeof(*cu_Fhkl) * hklsize, hipMemcpyHostToDevice)); free(FhklLinear); //int deviceId = 0; CUDA_CHECK_RETURN(hipGetDevice(&deviceId)); hipDeviceProp_t deviceProps = { 0 }; CUDA_CHECK_RETURN(hipGetDeviceProperties(&deviceProps, deviceId)); int smCount = deviceProps.multiProcessorCount; // CUDA_CHECK_RETURN(hipFuncSetCacheConfig(nanoBraggSpotsCUDAKernel, hipFuncCachePreferShared)); // CUDA_CHECK_RETURN(hipFuncSetCacheConfig(nanoBraggSpotsCUDAKernel, hipFuncCachePreferL1)); dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); // dim3 numBlocks((spixels - 1) / threadsPerBlock.x + 1, (fpixels - 1) / threadsPerBlock.y + 1); dim3 numBlocks(smCount * 8, 1); // initialize the device memory within a kernel. // nanoBraggSpotsInitCUDAKernel<<<numBlocks, threadsPerBlock>>>(cu_spixels, cu_fpixels, cu_floatimage, cu_omega_reduction, cu_max_I_x_reduction, cu_max_I_y_reduction, cu_rangemap); // CUDA_CHECK_RETURN(hipPeekAtLastError()); // CUDA_CHECK_RETURN(hipDeviceSynchronize()); hipLaunchKernelGGL(( nanoBraggSpotsCUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cu_spixels, cu_fpixels, cu_roi_xmin, cu_roi_xmax, cu_roi_ymin, cu_roi_ymax, cu_oversample, cu_point_pixel, cu_pixel_size, cu_subpixel_size, cu_steps, cu_detector_thickstep, cu_detector_thicksteps, cu_detector_thick, cu_detector_mu, cu_sdet_vector, cu_fdet_vector, cu_odet_vector, cu_pix0_vector, cu_curved_detector, cu_distance, cu_close_distance, cu_beam_vector, cu_Xbeam, cu_Ybeam, cu_dmin, cu_phi0, cu_phistep, cu_phisteps, cu_spindle_vector, cu_sources, cu_source_X, cu_source_Y, cu_source_Z, cu_source_I, cu_source_lambda, cu_a0, cu_b0, cu_c0, cu_xtal_shape, cu_mosaic_spread, cu_mosaic_domains, cu_mosaic_umats, cu_Na, cu_Nb, cu_Nc, cu_V_cell, cu_water_size, cu_water_F, cu_water_MW, cu_r_e_sqr, cu_fluence, cu_Avogadro, cu_spot_scale, cu_integral_form, cu_default_F, cu_interpolate, cu_Fhkl, cu_FhklParams, cu_nopolar, cu_polar_vector, cu_polarization, cu_fudge, cu_maskimage, cu_floatimage /*out*/, cu_omega_reduction/*out*/, cu_max_I_x_reduction/*out*/, cu_max_I_y_reduction /*out*/, cu_rangemap /*out*/); CUDA_CHECK_RETURN(hipPeekAtLastError()); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipMemcpy(floatimage, cu_floatimage, sizeof(*cu_floatimage) * total_pixels, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(omega_reduction, cu_omega_reduction, sizeof(*cu_omega_reduction) * total_pixels, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(max_I_x_reduction, cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(max_I_y_reduction, cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(rangemap, cu_rangemap, sizeof(*cu_rangemap) * total_pixels, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipFree(cu_sdet_vector)); CUDA_CHECK_RETURN(hipFree(cu_fdet_vector)); CUDA_CHECK_RETURN(hipFree(cu_odet_vector)); CUDA_CHECK_RETURN(hipFree(cu_pix0_vector)); CUDA_CHECK_RETURN(hipFree(cu_beam_vector)); CUDA_CHECK_RETURN(hipFree(cu_spindle_vector)); CUDA_CHECK_RETURN(hipFree(cu_polar_vector)); CUDA_CHECK_RETURN(hipFree(cu_a0)); CUDA_CHECK_RETURN(hipFree(cu_b0)); CUDA_CHECK_RETURN(hipFree(cu_c0)); CUDA_CHECK_RETURN(hipFree(cu_source_X)); CUDA_CHECK_RETURN(hipFree(cu_source_Y)); CUDA_CHECK_RETURN(hipFree(cu_source_Z)); CUDA_CHECK_RETURN(hipFree(cu_source_I)); CUDA_CHECK_RETURN(hipFree(cu_source_lambda)); CUDA_CHECK_RETURN(hipFree(cu_FhklParams)); CUDA_CHECK_RETURN(hipFree(cu_mosaic_umats)); CUDA_CHECK_RETURN(hipFree(cu_floatimage)); CUDA_CHECK_RETURN(hipFree(cu_omega_reduction)); CUDA_CHECK_RETURN(hipFree(cu_max_I_x_reduction)); CUDA_CHECK_RETURN(hipFree(cu_max_I_y_reduction)); CUDA_CHECK_RETURN(hipFree(cu_maskimage)); CUDA_CHECK_RETURN(hipFree(cu_rangemap)); CUDA_CHECK_RETURN(hipFree(cu_Fhkl)); *max_I = 0; *max_I_x = 0; *max_I_y = 0; *sum = 0.0; *sumsqr = 0.0; *sumn = 0; *omega_sum = 0.0; for (int i = 0; i < total_pixels; i++) { if (!rangemap[i]) { continue; } float pixel = floatimage[i]; if (pixel > (double) *max_I) { *max_I = pixel; *max_I_x = max_I_x_reduction[i]; *max_I_y = max_I_y_reduction[i]; } *sum += pixel; *sumsqr += pixel * pixel; ++(*sumn); *omega_sum += omega_reduction[i]; } free(rangemap); free(omega_reduction); free(max_I_x_reduction); free(max_I_y_reduction); } /* cubic spline interpolation functions */ __device__ static void polint(CUDAREAL *xa, CUDAREAL *ya, CUDAREAL x, CUDAREAL *y); __device__ static void polin2(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL ya[4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL *y); __device__ static void polin3(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL *x3a, CUDAREAL ya[4][4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL x3, CUDAREAL *y); /* rotate a 3-vector about a unit vector axis */ __device__ static CUDAREAL *rotate_axis(const CUDAREAL * __restrict__ v, CUDAREAL *newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi); __device__ static CUDAREAL *rotate_axis_ldg(const CUDAREAL * __restrict__ v, CUDAREAL * newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi); /* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */ __device__ static CUDAREAL unitize(CUDAREAL * vector, CUDAREAL *new_unit_vector); /* vector cross product where vector magnitude is 0th element */ __device__ static CUDAREAL *cross_product(CUDAREAL * x, CUDAREAL * y, CUDAREAL * z); /* vector inner product where vector magnitude is 0th element */ __device__ static CUDAREAL dot_product(const CUDAREAL * x, const CUDAREAL * y); __device__ static CUDAREAL dot_product_ldg(const CUDAREAL * __restrict__ x, CUDAREAL * y); /* measure magnitude of vector and put it in 0th element */ __device__ static void magnitude(CUDAREAL *vector); /* scale the magnitude of a vector */ __device__ static CUDAREAL vector_scale(CUDAREAL *vector, CUDAREAL *new_vector, CUDAREAL scale); /* rotate a 3-vector using a 9-element unitary matrix */ __device__ void rotate_umat_ldg(CUDAREAL * v, CUDAREAL *newv, const CUDAREAL * __restrict__ umat); /* Fourier transform of a truncated lattice */ __device__ static CUDAREAL sincg(CUDAREAL x, CUDAREAL N); //__device__ static CUDAREAL sincgrad(CUDAREAL x, CUDAREAL N); /* Fourier transform of a sphere */ __device__ static CUDAREAL sinc3(CUDAREAL x); /* polarization factor from vectors */ __device__ static CUDAREAL polarization_factor(CUDAREAL kahn_factor, CUDAREAL *incident, CUDAREAL *diffracted, const CUDAREAL * __restrict__ axis); __device__ __inline__ static int flatten3dindex(int x, int y, int z, int x_range, int y_range, int z_range); __device__ __inline__ CUDAREAL quickFcell_ldg(int hkls, int h_max, int h_min, int k_max, int k_min, int l_min, int l_max, int h0, int k0, int l0, int h_range, int k_range, int l_range, CUDAREAL defaultF, const CUDAREAL * __restrict__ Fhkl); __global__ void nanoBraggSpotsInitCUDAKernel(int spixels, int fpixels, float * floatimage, float * omega_reduction, float * max_I_x_reduction, float * max_I_y_reduction, bool * rangemap) { const int total_pixels = spixels * fpixels; const int fstride = gridDim.x * blockDim.x; const int sstride = gridDim.y * blockDim.y; const int stride = fstride * sstride; for (int pixIdx = (blockDim.y * blockIdx.y + threadIdx.y) * fstride + blockDim.x * blockIdx.x + threadIdx.x; pixIdx < total_pixels; pixIdx += stride) { const int fpixel = pixIdx % fpixels; const int spixel = pixIdx / fpixels; /* position in pixel array */ int j = spixel * fpixels + fpixel; if (j < total_pixels) { floatimage[j] = 0; omega_reduction[j] = 0; max_I_x_reduction[j] = 0; max_I_y_reduction[j] = 0; rangemap[j] = false; } } } __global__ void nanoBraggSpotsCUDAKernel(int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, CUDAREAL pixel_size, CUDAREAL subpixel_size, int steps, CUDAREAL detector_thickstep, int detector_thicksteps, CUDAREAL detector_thick, CUDAREAL detector_mu, const CUDAREAL * __restrict__ sdet_vector, const CUDAREAL * __restrict__ fdet_vector, const CUDAREAL * __restrict__ odet_vector, const CUDAREAL * __restrict__ pix0_vector, int curved_detector, CUDAREAL distance, CUDAREAL close_distance, const CUDAREAL * __restrict__ beam_vector, CUDAREAL Xbeam, CUDAREAL Ybeam, CUDAREAL dmin, CUDAREAL phi0, CUDAREAL phistep, int phisteps, const CUDAREAL * __restrict__ spindle_vector, int sources, const CUDAREAL * __restrict__ source_X, const CUDAREAL * __restrict__ source_Y, const CUDAREAL * __restrict__ source_Z, const CUDAREAL * __restrict__ source_I, const CUDAREAL * __restrict__ source_lambda, const CUDAREAL * __restrict__ a0, const CUDAREAL * __restrict__ b0, const CUDAREAL * __restrict c0, shapetype xtal_shape, CUDAREAL mosaic_spread, int mosaic_domains, const CUDAREAL * __restrict__ mosaic_umats, CUDAREAL Na, CUDAREAL Nb, CUDAREAL Nc, CUDAREAL V_cell, CUDAREAL water_size, CUDAREAL water_F, CUDAREAL water_MW, CUDAREAL r_e_sqr, CUDAREAL fluence, CUDAREAL Avogadro, CUDAREAL spot_scale, int integral_form, CUDAREAL default_F, int interpolate, const CUDAREAL * __restrict__ Fhkl, const hklParams * __restrict__ FhklParams, int nopolar, const CUDAREAL * __restrict__ polar_vector, CUDAREAL polarization, CUDAREAL fudge, const int unsigned short * __restrict__ maskimage, float * floatimage /*out*/, float * omega_reduction/*out*/, float * max_I_x_reduction/*out*/, float * max_I_y_reduction /*out*/, bool * rangemap) { __shared__ CUDAREAL s_dmin; __shared__ bool s_nopolar; __shared__ int s_phisteps; __shared__ CUDAREAL s_phi0, s_phistep; __shared__ int s_mosaic_domains; __shared__ CUDAREAL s_mosaic_spread; __shared__ shapetype s_xtal_shape; __shared__ CUDAREAL s_Na, s_Nb, s_Nc; __shared__ bool s_interpolate; __shared__ int s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, s_h_range, s_k_range, s_l_range; if (threadIdx.x == 0 && threadIdx.y == 0) { s_dmin = dmin; s_nopolar = nopolar; s_phisteps = phisteps; s_phi0 = phi0; s_phistep = phistep; s_mosaic_domains = mosaic_domains; s_mosaic_spread = mosaic_spread; s_xtal_shape = xtal_shape; s_Na = Na; s_Nb = Nb; s_Nc = Nc; s_interpolate = interpolate; s_hkls = FhklParams->hkls; s_h_max = FhklParams->h_max; s_h_min = FhklParams->h_min; s_k_max = FhklParams->k_max; s_k_min = FhklParams->k_min; s_l_max = FhklParams->l_max; s_l_min = FhklParams->l_min; s_h_range = FhklParams->h_range; s_k_range = FhklParams->k_range; s_l_range = FhklParams->l_range; } __syncthreads(); const int total_pixels = spixels * fpixels; const int fstride = gridDim.x * blockDim.x; const int sstride = gridDim.y * blockDim.y; const int stride = fstride * sstride; // const int tidx = blockDim.x * threadIdx.y * +threadIdx.x; // __shared__ int sharedVectors[THREADS_PER_BLOCK_TOTAL + 1][1][9]; // __shared__ CUDAREAL sharedVectors[THREADS_PER_BLOCK_TOTAL + 1][1][VECTOR_SIZE]; // CUDAREAL * tmpVector1 = sharedVectors[tidx][0]; // CUDAREAL * tmpVector2 = sharedVectors[tidx][1]; /* add background from something amorphous */ CUDAREAL F_bg = water_F; CUDAREAL I_bg = F_bg * F_bg * r_e_sqr * fluence * water_size * water_size * water_size * 1e6 * Avogadro / water_MW; // hklParams[0] = h_min; // hklParams[1] = h_max; // hklParams[2] = h_range; // hklParams[3] = k_min; // hklParams[4] = k_max; // hklParams[5] = k_range; // hklParams[6] = l_min; // hklParams[7] = l_max; // hklParams[8] = l_range; for (int pixIdx = (blockDim.y * blockIdx.y + threadIdx.y) * fstride + blockDim.x * blockIdx.x + threadIdx.x; pixIdx < total_pixels; pixIdx += stride) { const int fpixel = pixIdx % fpixels; const int spixel = pixIdx / fpixels; /* allow for just one part of detector to be rendered */ if (fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { //ROI region of interest continue; } /* position in pixel array */ const int j = pixIdx; /* allow for the use of a mask */ if (maskimage != NULL) { /* skip any flagged pixels in the mask */ if (maskimage[j] == 0) { continue; } } /* reset photon count for this pixel */ CUDAREAL I = I_bg; CUDAREAL omega_sub_reduction = 0.0; CUDAREAL max_I_x_sub_reduction = 0.0; CUDAREAL max_I_y_sub_reduction = 0.0; CUDAREAL polar = 0.0; if (s_nopolar) { polar = 1.0; } /* add this now to avoid problems with skipping later */ // move this to the bottom to avoid accessing global device memory. floatimage[j] = I_bg; /* loop over sub-pixels */ int subS, subF; for (subS = 0; subS < oversample; ++subS) { // Y voxel for (subF = 0; subF < oversample; ++subF) { // X voxel /* absolute mm position on detector (relative to its origin) */ CUDAREAL Fdet = subpixel_size * (fpixel * oversample + subF) + subpixel_size / 2.0; // X voxel CUDAREAL Sdet = subpixel_size * (spixel * oversample + subS) + subpixel_size / 2.0; // Y voxel // Fdet = pixel_size*fpixel; // Sdet = pixel_size*spixel; max_I_x_sub_reduction = Fdet; max_I_y_sub_reduction = Sdet; int thick_tic; for (thick_tic = 0; thick_tic < detector_thicksteps; ++thick_tic) { /* assume "distance" is to the front of the detector sensor layer */ CUDAREAL Odet = thick_tic * detector_thickstep; // Z Orthagonal voxel. /* construct detector subpixel position in 3D space */ // pixel_X = distance; // pixel_Y = Sdet-Ybeam; // pixel_Z = Fdet-Xbeam; //CUDAREAL * pixel_pos = tmpVector1; CUDAREAL pixel_pos[4]; pixel_pos[1] = Fdet * __ldg(&fdet_vector[1]) + Sdet * __ldg(&sdet_vector[1]) + Odet * __ldg(&odet_vector[1]) + __ldg(&pix0_vector[1]); // X pixel_pos[2] = Fdet * __ldg(&fdet_vector[2]) + Sdet * __ldg(&sdet_vector[2]) + Odet * __ldg(&odet_vector[2]) + __ldg(&pix0_vector[2]); // X pixel_pos[3] = Fdet * __ldg(&fdet_vector[3]) + Sdet * __ldg(&sdet_vector[3]) + Odet * __ldg(&odet_vector[3]) + __ldg(&pix0_vector[3]); // X // pixel_pos[1] = Fdet * fdet_vector[1] + Sdet * sdet_vector[1] + Odet * odet_vector[1] + pix0_vector[1]; // X // pixel_pos[2] = Fdet * fdet_vector[2] + Sdet * sdet_vector[2] + Odet * odet_vector[2] + pix0_vector[2]; // Y // pixel_pos[3] = Fdet * fdet_vector[3] + Sdet * sdet_vector[3] + Odet * odet_vector[3] + pix0_vector[3]; // Z if (curved_detector) { /* construct detector pixel that is always "distance" from the sample */ CUDAREAL dbvector[4]; dbvector[1] = distance * beam_vector[1]; dbvector[2] = distance * beam_vector[2]; dbvector[3] = distance * beam_vector[3]; /* treat detector pixel coordinates as radians */ CUDAREAL newvector[] = { 0.0, 0.0, 0.0, 0.0 }; rotate_axis(dbvector, newvector, sdet_vector, pixel_pos[2] / distance); rotate_axis(newvector, pixel_pos, fdet_vector, pixel_pos[3] / distance); // rotate(vector,pixel_pos,0,pixel_pos[3]/distance,pixel_pos[2]/distance); } /* construct the diffracted-beam unit vector to this sub-pixel */ //CUDAREAL * diffracted = tmpVector2; CUDAREAL diffracted[4]; CUDAREAL airpath = unitize(pixel_pos, diffracted); /* solid angle subtended by a pixel: (pix/airpath)^2*cos(2theta) */ CUDAREAL omega_pixel = pixel_size * pixel_size / airpath / airpath * close_distance / airpath; /* option to turn off obliquity effect, inverse-square-law only */ if (point_pixel) { omega_pixel = 1.0 / airpath / airpath; } /* now calculate detector thickness effects */ CUDAREAL capture_fraction = 1.0; if (detector_thick > 0.0 && detector_mu> 0.0) { /* inverse of effective thickness increase */ CUDAREAL parallax = dot_product_ldg(odet_vector, diffracted); capture_fraction = exp(-thick_tic * detector_thickstep / detector_mu / parallax) - exp(-(thick_tic + 1) * detector_thickstep / detector_mu / parallax); } /* loop over sources now */ int source; for (source = 0; source < sources; ++source) { /* retrieve stuff from cache */ //CUDAREAL * incident = tmpVector1; CUDAREAL incident[4]; incident[1] = -__ldg(&source_X[source]); incident[2] = -__ldg(&source_Y[source]); incident[3] = -__ldg(&source_Z[source]); CUDAREAL lambda = __ldg(&source_lambda[source]); CUDAREAL source_fraction = __ldg(&source_I[source]); /* construct the incident beam unit vector while recovering source distance */ // TODO[Giles]: Optimization! We can unitize the source vectors before passing them in. unitize(incident, incident); // CUDAREAL source_path = unitize(incident, incident); // CUDAREAL source_path = norm3d(incident[1], incident[2], incident[3]); // CUDAREAL * d = tmpVector2; // d[0] = diffracted[0]; // d[1] = diffracted[1]; // d[2] = diffracted[2]; // d[3] = diffracted[3]; /* construct the scattering vector for this pixel */ // CUDAREAL * scattering = tmpVector1; CUDAREAL scattering[4]; scattering[1] = (diffracted[1] - incident[1]) / lambda; scattering[2] = (diffracted[2] - incident[2]) / lambda; scattering[3] = (diffracted[3] - incident[3]) / lambda; // CUDAREAL scattering[] = { 0.0, (diffracted[1] - incident[1]) / lambda, (diffracted[2] - incident[2]) / lambda, (diffracted[3] // - incident[3]) / lambda }; /* sin(theta)/lambda is half the scattering vector length */ // magnitude(scattering); // CUDAREAL stol = 0.5 * scattering[0]; CUDAREAL stol = 0.5 * norm3d(scattering[1], scattering[2], scattering[3]); /* rough cut to speed things up when we aren't using whole detector */ if (s_dmin > 0.0 && stol > 0.0) { if (s_dmin > 0.5 / stol) { continue; } } /* polarization factor */ if (!s_nopolar) { /* need to compute polarization factor */ polar = polarization_factor(polarization, incident, diffracted, polar_vector); } else { polar = 1.0; } /* sweep over phi angles */ for (int phi_tic = 0; phi_tic < s_phisteps; ++phi_tic) { CUDAREAL phi = s_phistep * phi_tic + s_phi0; // CUDAREAL ap[] = { 0.0, 0.0, 0.0, 0.0 }; // CUDAREAL bp[] = { 0.0, 0.0, 0.0, 0.0 }; // CUDAREAL cp[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL ap[4]; CUDAREAL bp[4]; CUDAREAL cp[4]; /* rotate about spindle if necessary */ rotate_axis_ldg(a0, ap, spindle_vector, phi); rotate_axis_ldg(b0, bp, spindle_vector, phi); rotate_axis_ldg(c0, cp, spindle_vector, phi); /* enumerate mosaic domains */ for (int mos_tic = 0; mos_tic < s_mosaic_domains; ++mos_tic) { /* apply mosaic rotation after phi rotation */ CUDAREAL a[4]; CUDAREAL b[4]; CUDAREAL c[4]; if (s_mosaic_spread > 0.0) { rotate_umat_ldg(ap, a, &mosaic_umats[mos_tic * 9]); rotate_umat_ldg(bp, b, &mosaic_umats[mos_tic * 9]); rotate_umat_ldg(cp, c, &mosaic_umats[mos_tic * 9]); } else { a[1] = ap[1]; a[2] = ap[2]; a[3] = ap[3]; b[1] = bp[1]; b[2] = bp[2]; b[3] = bp[3]; c[1] = cp[1]; c[2] = cp[2]; c[3] = cp[3]; } // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+0],mosaic_umats[mos_tic*9+1],mosaic_umats[mos_tic*9+2]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+3],mosaic_umats[mos_tic*9+4],mosaic_umats[mos_tic*9+5]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+6],mosaic_umats[mos_tic*9+7],mosaic_umats[mos_tic*9+8]); /* construct fractional Miller indicies */ // CUDAREAL * scat_s = tmpVector2; // scat_s[0] = scattering[0]; // scat_s[1] = scattering[1]; // scat_s[2] = scattering[2]; // scat_s[3] = scattering[3]; // // CUDAREAL h = dot_product(a, scat_s); // CUDAREAL k = dot_product(b, scat_s); // CUDAREAL l = dot_product(c, scat_s); CUDAREAL h = dot_product(a, scattering); CUDAREAL k = dot_product(b, scattering); CUDAREAL l = dot_product(c, scattering); /* round off to nearest whole index */ int h0 = ceil(h - 0.5); int k0 = ceil(k - 0.5); int l0 = ceil(l - 0.5); /* structure factor of the lattice (paralelpiped crystal) F_latt = sin(M_PI*Na*h)*sin(M_PI*Nb*k)*sin(M_PI*Nc*l)/sin(M_PI*h)/sin(M_PI*k)/sin(M_PI*l); */ CUDAREAL F_latt = 1.0; // Shape transform for the crystal. CUDAREAL hrad_sqr = 0.0; if (s_xtal_shape == SQUARE) { /* xtal is a paralelpiped */ if (Na > 1) { // F_latt *= sincgrad(h, s_Na); F_latt *= sincg(M_PI * h, s_Na); } if (Nb > 1) { // F_latt *= sincgrad(k, s_Nb); F_latt *= sincg(M_PI * k, s_Nb); } if (Nc > 1) { // F_latt *= sincgrad(l, s_Nc); F_latt *= sincg(M_PI * l, s_Nc); } } else { /* handy radius in reciprocal space, squared */ hrad_sqr = (h - h0) * (h - h0) * Na * Na + (k - k0) * (k - k0) * Nb * Nb + (l - l0) * (l - l0) * Nc * Nc; } if (s_xtal_shape == ROUND) { /* use sinc3 for elliptical xtal shape, correcting for sqrt of volume ratio between cube and sphere */ F_latt = Na * Nb * Nc * 0.723601254558268 * sinc3(M_PI * sqrt(hrad_sqr * fudge)); } if (s_xtal_shape == GAUSS) { /* fudge the radius so that volume and FWHM are similar to square_xtal spots */ F_latt = Na * Nb * Nc * exp(-(hrad_sqr / 0.63 * fudge)); } if (s_xtal_shape == TOPHAT) { /* make a flat-top spot of same height and volume as square_xtal spots */ F_latt = Na * Nb * Nc * (hrad_sqr * fudge < 0.3969); } /* no need to go further if result will be zero? */ if (F_latt == 0.0 && water_size == 0.0) continue; /* find nearest point on Ewald sphere surface? */ if (integral_form) { /* need to calculate reciprocal matrix */ /* various cross products */ CUDAREAL a_cross_b[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL b_cross_c[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL c_cross_a[] = { 0.0, 0.0, 0.0, 0.0 }; cross_product(a, b, a_cross_b); cross_product(b, c, b_cross_c); cross_product(c, a, c_cross_a); /* new reciprocal-space cell vectors */ CUDAREAL a_star[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL b_star[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL c_star[] = { 0.0, 0.0, 0.0, 0.0 }; vector_scale(b_cross_c, a_star, 1e20 / V_cell); vector_scale(c_cross_a, b_star, 1e20 / V_cell); vector_scale(a_cross_b, c_star, 1e20 / V_cell); /* reciprocal-space coordinates of nearest relp */ CUDAREAL relp[] = { 0.0, 0.0, 0.0, 0.0 }; relp[1] = h0 * a_star[1] + k0 * b_star[1] + l0 * c_star[1]; relp[2] = h0 * a_star[2] + k0 * b_star[2] + l0 * c_star[2]; relp[3] = h0 * a_star[3] + k0 * b_star[3] + l0 * c_star[3]; // d_star = magnitude(relp) /* reciprocal-space coordinates of center of Ewald sphere */ CUDAREAL Ewald0[] = { 0.0, 0.0, 0.0, 0.0 }; Ewald0[1] = -incident[1] / lambda / 1e10; Ewald0[2] = -incident[2] / lambda / 1e10; Ewald0[3] = -incident[3] / lambda / 1e10; // 1/lambda = magnitude(Ewald0) /* distance from Ewald sphere in lambda=1 units */ CUDAREAL dEwald0[] = { 0.0, 0.0, 0.0, 0.0 }; dEwald0[1] = relp[1] - Ewald0[1]; dEwald0[2] = relp[2] - Ewald0[2]; dEwald0[3] = relp[3] - Ewald0[3]; magnitude(dEwald0); CUDAREAL d_r = dEwald0[0] - 1.0; /* unit vector of diffracted ray through relp */ CUDAREAL diffracted0[] = { 0.0, 0.0, 0.0, 0.0 }; unitize(dEwald0, diffracted0); /* intersection with detector plane */ CUDAREAL xd = dot_product_ldg(fdet_vector, diffracted0); CUDAREAL yd = dot_product_ldg(sdet_vector, diffracted0); CUDAREAL zd = dot_product_ldg(odet_vector, diffracted0); /* where does the central direct-beam hit */ CUDAREAL xd0 = dot_product_ldg(fdet_vector, incident); CUDAREAL yd0 = dot_product_ldg(sdet_vector, incident); CUDAREAL zd0 = dot_product_ldg(odet_vector, incident); /* convert to mm coordinates */ CUDAREAL Fdet0 = distance * (xd / zd) + Xbeam; CUDAREAL Sdet0 = distance * (yd / zd) + Ybeam; //printf("GOTHERE %g %g %g %g\n",Fdet,Sdet,Fdet0,Sdet0); CUDAREAL test = exp(-((Fdet - Fdet0) * (Fdet - Fdet0) + (Sdet - Sdet0) * (Sdet - Sdet0) + d_r * d_r) / 1e-8); } // end of integral form /* structure factor of the unit cell */ CUDAREAL F_cell = default_F; if (s_interpolate) { int h0_flr = floor(h); int k0_flr = floor(k); int l0_flr = floor(l); if (((h - s_h_min + 3) > s_h_range) || (h - 2 < s_h_min) || ((k - s_k_min + 3) > s_k_range) || (k - 2 < s_k_min) || ((l - s_l_min + 3) > s_l_range) || (l - 2 < s_l_min)) { // if (babble) { // babble = 0; // printf("WARNING: out of range for three point interpolation: h,k,l,h0,k0,l0: %g,%g,%g,%d,%d,%d \n", h, k, l, h0, // k0, l0); // printf("WARNING: further warnings will not be printed! "); // } F_cell = quickFcell_ldg(s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, h0, k0, l0, s_h_range, s_k_range, s_l_range, default_F, Fhkl); } else { /* integer versions of nearest HKL indicies */ int h_interp[] = { 0.0, 0.0, 0.0, 0.0 }; int k_interp[] = { 0.0, 0.0, 0.0, 0.0 }; int l_interp[] = { 0.0, 0.0, 0.0, 0.0 }; h_interp[0] = h0_flr - 1; h_interp[1] = h0_flr; h_interp[2] = h0_flr + 1; h_interp[3] = h0_flr + 2; k_interp[0] = k0_flr - 1; k_interp[1] = k0_flr; k_interp[2] = k0_flr + 1; k_interp[3] = k0_flr + 2; l_interp[0] = l0_flr - 1; l_interp[1] = l0_flr; l_interp[2] = l0_flr + 1; l_interp[3] = l0_flr + 2; /* polin function needs doubles */ CUDAREAL h_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL k_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL l_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; h_interp_d[0] = (CUDAREAL) h_interp[0]; h_interp_d[1] = (CUDAREAL) h_interp[1]; h_interp_d[2] = (CUDAREAL) h_interp[2]; h_interp_d[3] = (CUDAREAL) h_interp[3]; k_interp_d[0] = (CUDAREAL) k_interp[0]; k_interp_d[1] = (CUDAREAL) k_interp[1]; k_interp_d[2] = (CUDAREAL) k_interp[2]; k_interp_d[3] = (CUDAREAL) k_interp[3]; l_interp_d[0] = (CUDAREAL) l_interp[0]; l_interp_d[1] = (CUDAREAL) l_interp[1]; l_interp_d[2] = (CUDAREAL) l_interp[2]; l_interp_d[3] = (CUDAREAL) l_interp[3]; /* now populate the "y" values (nearest four structure factors in each direction) */ CUDAREAL sub_Fhkl[4][4][4]; int i1, i2, i3; for (i1 = 0; i1 < 4; i1++) { for (i2 = 0; i2 < 4; i2++) { for (i3 = 0; i3 < 4; i3++) { sub_Fhkl[i1][i2][i3] = __ldg( &Fhkl[flatten3dindex(h_interp[i1] - s_h_min, k_interp[i2] - s_k_min, l_interp[i3] - s_l_min, s_h_range, s_k_range, s_l_range)]); } } } /* run the tricubic polynomial interpolation */ polin3(h_interp_d, k_interp_d, l_interp_d, sub_Fhkl, h, k, l, &F_cell); } } else { // if (!interpolate) { // if (hkls && (h0 <= hklParams[1]) && (h0 >= hklParams[0]) && (k0 <= hklParams[4]) && (k0 >= hklParams[3]) && (l0 <= hklParams[7]) && (l0 >= hklParams[6])) { // /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - hklParams[0], k0 - hklParams[3], l0 - hklParams[6], hklParams[2], hklParams[5], hklParams[8])]); // } else { // F_cell = default_F; // usually zero // } // } F_cell = quickFcell_ldg(s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, h0, k0, l0, s_h_range, s_k_range, s_l_range, default_F, Fhkl); // if (s_hkls && (h0 <= s_h_max) && (h0 >= s_h_min) && (k0 <= s_k_max) && (k0 >= s_k_min) && (l0 <= s_l_max) && (l0 >= s_l_min)) { // /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - s_h_min, k0 - s_k_min, l0 - s_l_min, s_h_range, s_k_range, s_l_range)]); //// F_cell = __ldg(&Fhkl[flatten3dindex(h0 - __ldg(&FhklParams->h_min), k0 - __ldg(&FhklParams->k_min), l0 - __ldg(&FhklParams->l_min), s_h_range, s_k_range, s_l_range)]); //// F_cell = __ldg(&Fhkl[flatten3dindex(h0 - FhklParams->h_min, k0 - FhklParams->k_min, l0 - FhklParams->l_min, FhklParams->h_range, FhklParams->k_range, FhklParams->l_range)]); // } } /* now we have the structure factor for this pixel */ /* convert amplitudes into intensity (photons per steradian) */ I += F_cell * F_cell * F_latt * F_latt * source_fraction * capture_fraction * omega_pixel; omega_sub_reduction += omega_pixel; } /* end of mosaic loop */ } /* end of phi loop */ } /* end of source loop */ } /* end of detector thickness loop */ } /* end of sub-pixel y loop */ } /* end of sub-pixel x loop */ const double photons = I_bg + (r_e_sqr * spot_scale * fluence * polar * I) / steps; floatimage[j] = photons; omega_reduction[j] = omega_sub_reduction; // shared contention max_I_x_reduction[j] = max_I_x_sub_reduction; max_I_y_reduction[j] = max_I_y_sub_reduction; rangemap[j] = true; } } __device__ __inline__ CUDAREAL quickFcell_ldg(int hkls, int h_max, int h_min, int k_max, int k_min, int l_max, int l_min, int h0, int k0, int l0, int h_range, int k_range, int l_range, CUDAREAL defaultF, const CUDAREAL * __restrict__ Fhkl) { if (hkls && (h0 <= h_max) && (h0 >= h_min) && (k0 <= k_max) && (k0 >= k_min) && (l0 <= l_max) && (l0 >= l_min)) { /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - s_h_min, k0 - s_k_min, l0 - s_l_min, s_h_range, s_k_range, s_l_range)]); return __ldg(&Fhkl[flatten3dindex(h0 - h_min, k0 - k_min, l0 - l_min, h_range, k_range, l_range)]); } else { return defaultF; // usually zero } } __device__ __inline__ int flatten3dindex(int x, int y, int z, int x_range, int y_range, int z_range) { return x * y_range * z_range + y * z_range + z; } /* rotate a point about a unit vector axis */ __device__ CUDAREAL *rotate_axis(const CUDAREAL * __restrict__ v, CUDAREAL * newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi) { const CUDAREAL sinphi = sin(phi); const CUDAREAL cosphi = cos(phi); const CUDAREAL a1 = axis[1]; const CUDAREAL a2 = axis[2]; const CUDAREAL a3 = axis[3]; const CUDAREAL v1 = v[1]; const CUDAREAL v2 = v[2]; const CUDAREAL v3 = v[3]; const CUDAREAL dot = (a1 * v1 + a2 * v2 + a3 * v3) * (1.0 - cosphi); newv[1] = a1 * dot + v1 * cosphi + (-a3 * v2 + a2 * v3) * sinphi; newv[2] = a2 * dot + v2 * cosphi + (+a3 * v1 - a1 * v3) * sinphi; newv[3] = a3 * dot + v3 * cosphi + (-a2 * v1 + a1 * v2) * sinphi; return newv; } /* rotate a point about a unit vector axis */ __device__ CUDAREAL *rotate_axis_ldg(const CUDAREAL * __restrict__ v, CUDAREAL * newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi) { const CUDAREAL sinphi = sin(phi); const CUDAREAL cosphi = cos(phi); const CUDAREAL a1 = __ldg(&axis[1]); const CUDAREAL a2 = __ldg(&axis[2]); const CUDAREAL a3 = __ldg(&axis[3]); const CUDAREAL v1 = __ldg(&v[1]); const CUDAREAL v2 = __ldg(&v[2]); const CUDAREAL v3 = __ldg(&v[3]); const CUDAREAL dot = (a1 * v1 + a2 * v2 + a3 * v3) * (1.0 - cosphi); newv[1] = a1 * dot + v1 * cosphi + (-a3 * v2 + a2 * v3) * sinphi; newv[2] = a2 * dot + v2 * cosphi + (+a3 * v1 - a1 * v3) * sinphi; newv[3] = a3 * dot + v3 * cosphi + (-a2 * v1 + a1 * v2) * sinphi; return newv; } /* make provided vector a unit vector */ __device__ CUDAREAL unitize(CUDAREAL * vector, CUDAREAL * new_unit_vector) { CUDAREAL v1 = vector[1]; CUDAREAL v2 = vector[2]; CUDAREAL v3 = vector[3]; // CUDAREAL mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3); CUDAREAL mag = norm3d(v1, v2, v3); if (mag != 0.0) { /* normalize it */ new_unit_vector[0] = mag; new_unit_vector[1] = v1 / mag; new_unit_vector[2] = v2 / mag; new_unit_vector[3] = v3 / mag; } else { /* can't normalize, report zero vector */ new_unit_vector[0] = 0.0; new_unit_vector[1] = 0.0; new_unit_vector[2] = 0.0; new_unit_vector[3] = 0.0; } return mag; } /* vector cross product where vector magnitude is 0th element */ __device__ CUDAREAL *cross_product(CUDAREAL * x, CUDAREAL * y, CUDAREAL * z) { z[1] = x[2] * y[3] - x[3] * y[2]; z[2] = x[3] * y[1] - x[1] * y[3]; z[3] = x[1] * y[2] - x[2] * y[1]; z[0] = 0.0; return z; } /* vector inner product where vector magnitude is 0th element */ __device__ CUDAREAL dot_product(const CUDAREAL * x, const CUDAREAL * y) { return x[1] * y[1] + x[2] * y[2] + x[3] * y[3]; } __device__ CUDAREAL dot_product_ldg(const CUDAREAL * __restrict__ x, CUDAREAL * y) { return __ldg(&x[1]) * y[1] + __ldg(&x[2]) * y[2] + __ldg(&x[3]) * y[3]; } /* measure magnitude of provided vector */ __device__ void magnitude(CUDAREAL *vector) { /* measure the magnitude */ vector[0] = sqrt(vector[1] * vector[1] + vector[2] * vector[2] + vector[3] * vector[3]); } /* scale magnitude of provided vector */ __device__ CUDAREAL vector_scale(CUDAREAL *vector, CUDAREAL *new_vector, CUDAREAL scale) { new_vector[1] = scale * vector[1]; new_vector[2] = scale * vector[2]; new_vector[3] = scale * vector[3]; magnitude(new_vector); return new_vector[0]; } /* rotate a vector using a 9-element unitary matrix */ __device__ void rotate_umat_ldg(CUDAREAL * v, CUDAREAL *newv, const CUDAREAL * __restrict__ umat) { /* for convenience, assign matrix x-y coordinate */ CUDAREAL uxx = __ldg(&umat[0]); CUDAREAL uxy = __ldg(&umat[1]); CUDAREAL uxz = __ldg(&umat[2]); CUDAREAL uyx = __ldg(&umat[3]); CUDAREAL uyy = __ldg(&umat[4]); CUDAREAL uyz = __ldg(&umat[5]); CUDAREAL uzx = __ldg(&umat[6]); CUDAREAL uzy = __ldg(&umat[7]); CUDAREAL uzz = __ldg(&umat[8]); CUDAREAL v1 = v[1]; CUDAREAL v2 = v[2]; CUDAREAL v3 = v[3]; /* rotate the vector (x=1,y=2,z=3) */ newv[1] = uxx * v1 + uxy * v2 + uxz * v3; newv[2] = uyx * v1 + uyy * v2 + uyz * v3; newv[3] = uzx * v1 + uzy * v2 + uzz * v3; } /* Fourier transform of a grating */ __device__ CUDAREAL sincg(CUDAREAL x, CUDAREAL N) { if (x != 0.0) return sin(x * N) / sin(x); return N; } __device__ CUDAREAL sincgrad(CUDAREAL x, CUDAREAL N) { if (x != 0.0) return sinpi(x * N) / sinpi(x); return N; } /* Fourier transform of a sphere */ __device__ CUDAREAL sinc3(CUDAREAL x) { if (x != 0.0) return 3.0 * (sin(x) / x - cos(x)) / (x * x); return 1.0; } __device__ void polint(CUDAREAL *xa, CUDAREAL *ya, CUDAREAL x, CUDAREAL *y) { CUDAREAL x0, x1, x2, x3; x0 = (x - xa[1]) * (x - xa[2]) * (x - xa[3]) * ya[0] / ((xa[0] - xa[1]) * (xa[0] - xa[2]) * (xa[0] - xa[3])); x1 = (x - xa[0]) * (x - xa[2]) * (x - xa[3]) * ya[1] / ((xa[1] - xa[0]) * (xa[1] - xa[2]) * (xa[1] - xa[3])); x2 = (x - xa[0]) * (x - xa[1]) * (x - xa[3]) * ya[2] / ((xa[2] - xa[0]) * (xa[2] - xa[1]) * (xa[2] - xa[3])); x3 = (x - xa[0]) * (x - xa[1]) * (x - xa[2]) * ya[3] / ((xa[3] - xa[0]) * (xa[3] - xa[1]) * (xa[3] - xa[2])); *y = x0 + x1 + x2 + x3; } __device__ void polin2(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL ya[4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL *y) { int j; CUDAREAL ymtmp[4]; for (j = 1; j <= 4; j++) { polint(x2a, ya[j - 1], x2, &ymtmp[j - 1]); } polint(x1a, ymtmp, x1, y); } __device__ void polin3(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL *x3a, CUDAREAL ya[4][4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL x3, CUDAREAL *y) { int j; CUDAREAL ymtmp[4]; for (j = 1; j <= 4; j++) { polin2(x2a, x3a, &ya[j - 1][0], x2, x3, &ymtmp[j - 1]); } polint(x1a, ymtmp, x1, y); } /* polarization factor */ __device__ CUDAREAL polarization_factor(CUDAREAL kahn_factor, CUDAREAL *incident, CUDAREAL *diffracted, const CUDAREAL * __restrict__ axis) { CUDAREAL cos2theta, cos2theta_sqr, sin2theta_sqr; CUDAREAL psi = 0.0; CUDAREAL E_in[4], B_in[4], E_out[4], B_out[4]; // these are already unitized before entering this loop. Optimize this out. // unitize(incident, incident); // unitize(diffracted, diffracted); /* component of diffracted unit vector along incident beam unit vector */ cos2theta = dot_product(incident, diffracted); cos2theta_sqr = cos2theta * cos2theta; sin2theta_sqr = 1 - cos2theta_sqr; if (kahn_factor != 0.0) { /* tricky bit here is deciding which direciton the E-vector lies in for each source here we assume it is closest to the "axis" defined above */ CUDAREAL unitAxis[] = { axis[0], axis[1], axis[2], axis[3] }; // this is already unitized. Optimize this out. unitize(unitAxis, unitAxis); /* cross product to get "vertical" axis that is orthogonal to the cannonical "polarization" */ cross_product(unitAxis, incident, B_in); /* make it a unit vector */ unitize(B_in, B_in); /* cross product with incident beam to get E-vector direction */ cross_product(incident, B_in, E_in); /* make it a unit vector */ unitize(E_in, E_in); /* get components of diffracted ray projected onto the E-B plane */ E_out[0] = dot_product(diffracted, E_in); B_out[0] = dot_product(diffracted, B_in); /* compute the angle of the diffracted ray projected onto the incident E-B plane */ psi = -atan2(B_out[0], E_out[0]); } /* correction for polarized incident beam */ return 0.5 * (1.0 + cos2theta_sqr - kahn_factor * cos(2 * psi) * sin2theta_sqr); }
2e7928cca0cd9f36ecbac19c16a1f602ff4d6ee3.cu
/* ============================================================================ Name : nanoBraggCUDA.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include <stdio.h> #include "nanotypes.h" #include "cuda_compatibility.h" static void CheckCudaErrorAux(const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #ifndef CUDAREAL #define CUDAREAL float #endif #define THREADS_PER_BLOCK_X 128 #define THREADS_PER_BLOCK_Y 1 #define THREADS_PER_BLOCK_TOTAL (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y) #define VECTOR_SIZE 4 struct hklParams { int hkls; int h_min; int h_max; int h_range; int k_min; int k_max; int k_range; int l_min; int l_max; int l_range; }; /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl; exit(1); } static cudaError_t cudaMemcpyVectorDoubleToDevice(CUDAREAL *dst, double *src, size_t vector_items) { CUDAREAL * temp = new CUDAREAL[vector_items]; for (size_t i = 0; i < vector_items; i++) { temp[i] = src[i]; } cudaError_t ret = cudaMemcpy(dst, temp, sizeof(*dst) * vector_items, cudaMemcpyHostToDevice); delete temp; return ret; } /* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */ double cpu_unitize(double *vector, double *new_unit_vector); double cpu_unitize(double * vector, double * new_unit_vector) { double v1 = vector[1]; double v2 = vector[2]; double v3 = vector[3]; double mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3); if (mag != 0.0) { /* normalize it */ new_unit_vector[0] = mag; new_unit_vector[1] = v1 / mag; new_unit_vector[2] = v2 / mag; new_unit_vector[3] = v3 / mag; } else { /* can't normalize, report zero vector */ new_unit_vector[0] = 0.0; new_unit_vector[1] = 0.0; new_unit_vector[2] = 0.0; new_unit_vector[3] = 0.0; } return mag; } __global__ void nanoBraggSpotsInitCUDAKernel(int spixels, int fpixesl, float * floatimage, float * omega_reduction, float * max_I_x_reduction, float * max_I_y_reduction, bool * rangemap); __global__ void nanoBraggSpotsCUDAKernel(int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, CUDAREAL pixel_size, CUDAREAL subpixel_size, int steps, CUDAREAL detector_thickstep, int detector_thicksteps, CUDAREAL detector_thick, CUDAREAL detector_mu, const CUDAREAL * __restrict__ sdet_vector, const CUDAREAL * __restrict__ fdet_vector, const CUDAREAL * __restrict__ odet_vector, const CUDAREAL * __restrict__ pix0_vector, int curved_detector, CUDAREAL distance, CUDAREAL close_distance, const CUDAREAL * __restrict__ beam_vector, CUDAREAL Xbeam, CUDAREAL Ybeam, CUDAREAL dmin, CUDAREAL phi0, CUDAREAL phistep, int phisteps, const CUDAREAL * __restrict__ spindle_vector, int sources, const CUDAREAL * __restrict__ source_X, const CUDAREAL * __restrict__ source_Y, const CUDAREAL * __restrict__ source_Z, const CUDAREAL * __restrict__ source_I, const CUDAREAL * __restrict__ source_lambda, const CUDAREAL * __restrict__ a0, const CUDAREAL * __restrict__ b0, const CUDAREAL * __restrict c0, shapetype xtal_shape, CUDAREAL mosaic_spread, int mosaic_domains, const CUDAREAL * __restrict__ mosaic_umats, CUDAREAL Na, CUDAREAL Nb, CUDAREAL Nc, CUDAREAL V_cell, CUDAREAL water_size, CUDAREAL water_F, CUDAREAL water_MW, CUDAREAL r_e_sqr, CUDAREAL fluence, CUDAREAL Avogadro, CUDAREAL spot_scale, int integral_form, CUDAREAL default_F, int interpolate, const CUDAREAL * __restrict__ Fhkl, const hklParams * __restrict__ Fhklparams, int nopolar, const CUDAREAL * __restrict__ polar_vector, CUDAREAL polarization, CUDAREAL fudge, const int unsigned short * __restrict__ maskimage, float * floatimage /*out*/, float * omega_reduction/*out*/, float * max_I_x_reduction/*out*/, float * max_I_y_reduction /*out*/, bool * rangemap); extern "C" void nanoBraggSpotsCUDA(int deviceId, int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, double pixel_size, double subpixel_size, int steps, double detector_thickstep, int detector_thicksteps, double detector_thick, double detector_mu, double sdet_vector[4], double fdet_vector[4], double odet_vector[4], double pix0_vector[4], int curved_detector, double distance, double close_distance, double beam_vector[4], double Xbeam, double Ybeam, double dmin, double phi0, double phistep, int phisteps, double spindle_vector[4], int sources, double *source_X, double *source_Y, double * source_Z, double * source_I, double * source_lambda, double a0[4], double b0[4], double c0[4], shapetype xtal_shape, double mosaic_spread, int mosaic_domains, double * mosaic_umats, double Na, double Nb, double Nc, double V_cell, double water_size, double water_F, double water_MW, double r_e_sqr, double fluence, double Avogadro, int integral_form, double default_F, int interpolate, double *** Fhkl, int h_min, int h_max, int h_range, int k_min, int k_max, int k_range, int l_min, int l_max, int l_range, int hkls, int nopolar, double polar_vector[4], double polarization, double fudge, int unsigned short * maskimage, float * floatimage /*out*/, double * omega_sum/*out*/, int * sumn /*out*/, double * sum /*out*/, double * sumsqr /*out*/, double * max_I/*out*/, double * max_I_x/*out*/, double * max_I_y /*out*/, double spot_scale) { int total_pixels = spixels * fpixels; cudaSetDevice(deviceId); /*allocate and zero reductions */ bool * rangemap = (bool*) calloc(total_pixels, sizeof(bool)); float * omega_reduction = (float*) calloc(total_pixels, sizeof(float)); float * max_I_x_reduction = (float*) calloc(total_pixels, sizeof(float)); float * max_I_y_reduction = (float*) calloc(total_pixels, sizeof(float)); /* clear memory (TODO: consider this being optional) */ memset(floatimage, 0, sizeof(typeof(*floatimage)) * total_pixels); /*create transfer arguments to device space*/ int cu_spixels = spixels, cu_fpixels = fpixels; int cu_roi_xmin = roi_xmin, cu_roi_xmax = roi_xmax, cu_roi_ymin = roi_ymin, cu_roi_ymax = roi_ymax; int cu_oversample = oversample; int cu_point_pixel = point_pixel; CUDAREAL cu_pixel_size = pixel_size, cu_subpixel_size = subpixel_size; int cu_steps = steps; CUDAREAL cu_detector_thickstep = detector_thickstep, cu_detector_thick = detector_thick, cu_detector_mu = detector_mu; int cu_detector_thicksteps = detector_thicksteps; int cu_curved_detector = curved_detector; CUDAREAL cu_distance = distance, cu_close_distance = close_distance; CUDAREAL cu_Xbeam = Xbeam, cu_Ybeam = Ybeam; CUDAREAL cu_dmin = dmin, cu_phi0 = phi0, cu_phistep = phistep; int cu_phisteps = phisteps; shapetype cu_xtal_shape = xtal_shape; int cu_sources = sources; CUDAREAL cu_mosaic_spread = mosaic_spread; int cu_mosaic_domains = mosaic_domains; CUDAREAL cu_Na = Na, cu_Nb = Nb, cu_Nc = Nc, cu_V_cell = V_cell, cu_water_size = water_size, cu_water_F = water_F, cu_water_MW = water_MW; CUDAREAL cu_r_e_sqr = r_e_sqr, cu_fluence = fluence, cu_Avogadro = Avogadro, cu_spot_scale = spot_scale; int cu_integral_form = integral_form; CUDAREAL cu_default_F = default_F; int cu_interpolate = interpolate; // int cu_h_min = h_min, cu_h_max = h_max, cu_h_range = h_range; // int cu_k_min = k_min, cu_k_max = k_max, cu_k_range = k_range; // int cu_l_min = l_min, cu_l_max = l_max, cu_l_range = l_range; // int cu_hkls = hkls; int cu_nopolar = nopolar; CUDAREAL cu_polarization = polarization, cu_fudge = fudge; hklParams FhklParams = { hkls, h_min, h_max, h_range, k_min, k_max, k_range, l_min, l_max, l_range }; hklParams * cu_FhklParams; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_FhklParams, sizeof(*cu_FhklParams))); CUDA_CHECK_RETURN(cudaMemcpy(cu_FhklParams, &FhklParams, sizeof(*cu_FhklParams), cudaMemcpyHostToDevice)); const int vector_length = 4; CUDAREAL * cu_sdet_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_sdet_vector, sizeof(*cu_sdet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_sdet_vector, sdet_vector, vector_length)); CUDAREAL * cu_fdet_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_fdet_vector, sizeof(*cu_fdet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_fdet_vector, fdet_vector, vector_length)); CUDAREAL * cu_odet_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_odet_vector, sizeof(*cu_odet_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_odet_vector, odet_vector, vector_length)); CUDAREAL * cu_pix0_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_pix0_vector, sizeof(*cu_pix0_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_pix0_vector, pix0_vector, vector_length)); CUDAREAL * cu_beam_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_beam_vector, sizeof(*cu_beam_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_beam_vector, beam_vector, vector_length)); CUDAREAL * cu_spindle_vector; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_spindle_vector, sizeof(*cu_spindle_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_spindle_vector, spindle_vector, vector_length)); CUDAREAL * cu_a0; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_a0, sizeof(*cu_a0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_a0, a0, vector_length)); CUDAREAL * cu_b0; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_b0, sizeof(*cu_b0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_b0, b0, vector_length)); CUDAREAL * cu_c0; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_c0, sizeof(*cu_c0) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_c0, c0, vector_length)); // Unitize polar vector before sending it to the GPU. Optimization do it only once here rather than multiple time per pixel in the GPU. CUDAREAL * cu_polar_vector; double polar_vector_unitized[4]; cpu_unitize(polar_vector, polar_vector_unitized); CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_polar_vector, sizeof(*cu_polar_vector) * vector_length)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_polar_vector, polar_vector_unitized, vector_length)); CUDAREAL * cu_source_X = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_X, sizeof(*cu_source_X) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_X, source_X, sources)); CUDAREAL * cu_source_Y = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_Y, sizeof(*cu_source_Y) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_Y, source_Y, sources)); CUDAREAL * cu_source_Z = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_Z, sizeof(*cu_source_Z) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_Z, source_Z, sources)); CUDAREAL * cu_source_I = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_I, sizeof(*cu_source_I) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_I, source_I, sources)); CUDAREAL * cu_source_lambda = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_source_lambda, sizeof(*cu_source_lambda) * sources)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_source_lambda, source_lambda, sources)); CUDAREAL * cu_mosaic_umats = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_mosaic_umats, sizeof(*cu_mosaic_umats) * mosaic_domains * 9)); CUDA_CHECK_RETURN(cudaMemcpyVectorDoubleToDevice(cu_mosaic_umats, mosaic_umats, mosaic_domains * 9)); float * cu_floatimage = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_floatimage, sizeof(*cu_floatimage) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_floatimage, floatimage, sizeof(*cu_floatimage) * total_pixels, cudaMemcpyHostToDevice)); float * cu_omega_reduction = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_omega_reduction, sizeof(*cu_omega_reduction) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_omega_reduction, omega_reduction, sizeof(*cu_omega_reduction) * total_pixels, cudaMemcpyHostToDevice)); float * cu_max_I_x_reduction = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_max_I_x_reduction, max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels, cudaMemcpyHostToDevice)); float * cu_max_I_y_reduction = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_max_I_y_reduction, max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels, cudaMemcpyHostToDevice)); bool * cu_rangemap = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_rangemap, sizeof(*cu_rangemap) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_rangemap, rangemap, sizeof(*cu_rangemap) * total_pixels, cudaMemcpyHostToDevice)); int unsigned short * cu_maskimage = NULL; if (maskimage != NULL) { CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_maskimage, sizeof(*cu_maskimage) * total_pixels)); CUDA_CHECK_RETURN(cudaMemcpy(cu_maskimage, maskimage, sizeof(*cu_maskimage) * total_pixels, cudaMemcpyHostToDevice)); } int hklsize = h_range * k_range * l_range; CUDAREAL * FhklLinear = (CUDAREAL*) calloc(hklsize, sizeof(*FhklLinear)); for (int h = 0; h < h_range; h++) { for (int k = 0; k < k_range; k++) { // memcpy(FhklLinear + (h * k_range * l_range + k * l_range), Fhkl[h][k], sizeof(*FhklLinear) * l_range); for (int l = 0; l < l_range; l++) { // convert Fhkl double to CUDAREAL FhklLinear[h * k_range * l_range + k * l_range + l] = Fhkl[h][k][l]; } } } CUDAREAL * cu_Fhkl = NULL; CUDA_CHECK_RETURN(cudaMalloc((void ** )&cu_Fhkl, sizeof(*cu_Fhkl) * hklsize)); CUDA_CHECK_RETURN(cudaMemcpy(cu_Fhkl, FhklLinear, sizeof(*cu_Fhkl) * hklsize, cudaMemcpyHostToDevice)); free(FhklLinear); //int deviceId = 0; CUDA_CHECK_RETURN(cudaGetDevice(&deviceId)); cudaDeviceProp deviceProps = { 0 }; CUDA_CHECK_RETURN(cudaGetDeviceProperties(&deviceProps, deviceId)); int smCount = deviceProps.multiProcessorCount; // CUDA_CHECK_RETURN(cudaFuncSetCacheConfig(nanoBraggSpotsCUDAKernel, cudaFuncCachePreferShared)); // CUDA_CHECK_RETURN(cudaFuncSetCacheConfig(nanoBraggSpotsCUDAKernel, cudaFuncCachePreferL1)); dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); // dim3 numBlocks((spixels - 1) / threadsPerBlock.x + 1, (fpixels - 1) / threadsPerBlock.y + 1); dim3 numBlocks(smCount * 8, 1); // initialize the device memory within a kernel. // nanoBraggSpotsInitCUDAKernel<<<numBlocks, threadsPerBlock>>>(cu_spixels, cu_fpixels, cu_floatimage, cu_omega_reduction, cu_max_I_x_reduction, cu_max_I_y_reduction, cu_rangemap); // CUDA_CHECK_RETURN(cudaPeekAtLastError()); // CUDA_CHECK_RETURN(cudaDeviceSynchronize()); nanoBraggSpotsCUDAKernel<<<numBlocks, threadsPerBlock>>>(cu_spixels, cu_fpixels, cu_roi_xmin, cu_roi_xmax, cu_roi_ymin, cu_roi_ymax, cu_oversample, cu_point_pixel, cu_pixel_size, cu_subpixel_size, cu_steps, cu_detector_thickstep, cu_detector_thicksteps, cu_detector_thick, cu_detector_mu, cu_sdet_vector, cu_fdet_vector, cu_odet_vector, cu_pix0_vector, cu_curved_detector, cu_distance, cu_close_distance, cu_beam_vector, cu_Xbeam, cu_Ybeam, cu_dmin, cu_phi0, cu_phistep, cu_phisteps, cu_spindle_vector, cu_sources, cu_source_X, cu_source_Y, cu_source_Z, cu_source_I, cu_source_lambda, cu_a0, cu_b0, cu_c0, cu_xtal_shape, cu_mosaic_spread, cu_mosaic_domains, cu_mosaic_umats, cu_Na, cu_Nb, cu_Nc, cu_V_cell, cu_water_size, cu_water_F, cu_water_MW, cu_r_e_sqr, cu_fluence, cu_Avogadro, cu_spot_scale, cu_integral_form, cu_default_F, cu_interpolate, cu_Fhkl, cu_FhklParams, cu_nopolar, cu_polar_vector, cu_polarization, cu_fudge, cu_maskimage, cu_floatimage /*out*/, cu_omega_reduction/*out*/, cu_max_I_x_reduction/*out*/, cu_max_I_y_reduction /*out*/, cu_rangemap /*out*/); CUDA_CHECK_RETURN(cudaPeekAtLastError()); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaMemcpy(floatimage, cu_floatimage, sizeof(*cu_floatimage) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(omega_reduction, cu_omega_reduction, sizeof(*cu_omega_reduction) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(max_I_x_reduction, cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(max_I_y_reduction, cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(rangemap, cu_rangemap, sizeof(*cu_rangemap) * total_pixels, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(cu_sdet_vector)); CUDA_CHECK_RETURN(cudaFree(cu_fdet_vector)); CUDA_CHECK_RETURN(cudaFree(cu_odet_vector)); CUDA_CHECK_RETURN(cudaFree(cu_pix0_vector)); CUDA_CHECK_RETURN(cudaFree(cu_beam_vector)); CUDA_CHECK_RETURN(cudaFree(cu_spindle_vector)); CUDA_CHECK_RETURN(cudaFree(cu_polar_vector)); CUDA_CHECK_RETURN(cudaFree(cu_a0)); CUDA_CHECK_RETURN(cudaFree(cu_b0)); CUDA_CHECK_RETURN(cudaFree(cu_c0)); CUDA_CHECK_RETURN(cudaFree(cu_source_X)); CUDA_CHECK_RETURN(cudaFree(cu_source_Y)); CUDA_CHECK_RETURN(cudaFree(cu_source_Z)); CUDA_CHECK_RETURN(cudaFree(cu_source_I)); CUDA_CHECK_RETURN(cudaFree(cu_source_lambda)); CUDA_CHECK_RETURN(cudaFree(cu_FhklParams)); CUDA_CHECK_RETURN(cudaFree(cu_mosaic_umats)); CUDA_CHECK_RETURN(cudaFree(cu_floatimage)); CUDA_CHECK_RETURN(cudaFree(cu_omega_reduction)); CUDA_CHECK_RETURN(cudaFree(cu_max_I_x_reduction)); CUDA_CHECK_RETURN(cudaFree(cu_max_I_y_reduction)); CUDA_CHECK_RETURN(cudaFree(cu_maskimage)); CUDA_CHECK_RETURN(cudaFree(cu_rangemap)); CUDA_CHECK_RETURN(cudaFree(cu_Fhkl)); *max_I = 0; *max_I_x = 0; *max_I_y = 0; *sum = 0.0; *sumsqr = 0.0; *sumn = 0; *omega_sum = 0.0; for (int i = 0; i < total_pixels; i++) { if (!rangemap[i]) { continue; } float pixel = floatimage[i]; if (pixel > (double) *max_I) { *max_I = pixel; *max_I_x = max_I_x_reduction[i]; *max_I_y = max_I_y_reduction[i]; } *sum += pixel; *sumsqr += pixel * pixel; ++(*sumn); *omega_sum += omega_reduction[i]; } free(rangemap); free(omega_reduction); free(max_I_x_reduction); free(max_I_y_reduction); } /* cubic spline interpolation functions */ __device__ static void polint(CUDAREAL *xa, CUDAREAL *ya, CUDAREAL x, CUDAREAL *y); __device__ static void polin2(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL ya[4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL *y); __device__ static void polin3(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL *x3a, CUDAREAL ya[4][4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL x3, CUDAREAL *y); /* rotate a 3-vector about a unit vector axis */ __device__ static CUDAREAL *rotate_axis(const CUDAREAL * __restrict__ v, CUDAREAL *newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi); __device__ static CUDAREAL *rotate_axis_ldg(const CUDAREAL * __restrict__ v, CUDAREAL * newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi); /* make a unit vector pointing in same direction and report magnitude (both args can be same vector) */ __device__ static CUDAREAL unitize(CUDAREAL * vector, CUDAREAL *new_unit_vector); /* vector cross product where vector magnitude is 0th element */ __device__ static CUDAREAL *cross_product(CUDAREAL * x, CUDAREAL * y, CUDAREAL * z); /* vector inner product where vector magnitude is 0th element */ __device__ static CUDAREAL dot_product(const CUDAREAL * x, const CUDAREAL * y); __device__ static CUDAREAL dot_product_ldg(const CUDAREAL * __restrict__ x, CUDAREAL * y); /* measure magnitude of vector and put it in 0th element */ __device__ static void magnitude(CUDAREAL *vector); /* scale the magnitude of a vector */ __device__ static CUDAREAL vector_scale(CUDAREAL *vector, CUDAREAL *new_vector, CUDAREAL scale); /* rotate a 3-vector using a 9-element unitary matrix */ __device__ void rotate_umat_ldg(CUDAREAL * v, CUDAREAL *newv, const CUDAREAL * __restrict__ umat); /* Fourier transform of a truncated lattice */ __device__ static CUDAREAL sincg(CUDAREAL x, CUDAREAL N); //__device__ static CUDAREAL sincgrad(CUDAREAL x, CUDAREAL N); /* Fourier transform of a sphere */ __device__ static CUDAREAL sinc3(CUDAREAL x); /* polarization factor from vectors */ __device__ static CUDAREAL polarization_factor(CUDAREAL kahn_factor, CUDAREAL *incident, CUDAREAL *diffracted, const CUDAREAL * __restrict__ axis); __device__ __inline__ static int flatten3dindex(int x, int y, int z, int x_range, int y_range, int z_range); __device__ __inline__ CUDAREAL quickFcell_ldg(int hkls, int h_max, int h_min, int k_max, int k_min, int l_min, int l_max, int h0, int k0, int l0, int h_range, int k_range, int l_range, CUDAREAL defaultF, const CUDAREAL * __restrict__ Fhkl); __global__ void nanoBraggSpotsInitCUDAKernel(int spixels, int fpixels, float * floatimage, float * omega_reduction, float * max_I_x_reduction, float * max_I_y_reduction, bool * rangemap) { const int total_pixels = spixels * fpixels; const int fstride = gridDim.x * blockDim.x; const int sstride = gridDim.y * blockDim.y; const int stride = fstride * sstride; for (int pixIdx = (blockDim.y * blockIdx.y + threadIdx.y) * fstride + blockDim.x * blockIdx.x + threadIdx.x; pixIdx < total_pixels; pixIdx += stride) { const int fpixel = pixIdx % fpixels; const int spixel = pixIdx / fpixels; /* position in pixel array */ int j = spixel * fpixels + fpixel; if (j < total_pixels) { floatimage[j] = 0; omega_reduction[j] = 0; max_I_x_reduction[j] = 0; max_I_y_reduction[j] = 0; rangemap[j] = false; } } } __global__ void nanoBraggSpotsCUDAKernel(int spixels, int fpixels, int roi_xmin, int roi_xmax, int roi_ymin, int roi_ymax, int oversample, int point_pixel, CUDAREAL pixel_size, CUDAREAL subpixel_size, int steps, CUDAREAL detector_thickstep, int detector_thicksteps, CUDAREAL detector_thick, CUDAREAL detector_mu, const CUDAREAL * __restrict__ sdet_vector, const CUDAREAL * __restrict__ fdet_vector, const CUDAREAL * __restrict__ odet_vector, const CUDAREAL * __restrict__ pix0_vector, int curved_detector, CUDAREAL distance, CUDAREAL close_distance, const CUDAREAL * __restrict__ beam_vector, CUDAREAL Xbeam, CUDAREAL Ybeam, CUDAREAL dmin, CUDAREAL phi0, CUDAREAL phistep, int phisteps, const CUDAREAL * __restrict__ spindle_vector, int sources, const CUDAREAL * __restrict__ source_X, const CUDAREAL * __restrict__ source_Y, const CUDAREAL * __restrict__ source_Z, const CUDAREAL * __restrict__ source_I, const CUDAREAL * __restrict__ source_lambda, const CUDAREAL * __restrict__ a0, const CUDAREAL * __restrict__ b0, const CUDAREAL * __restrict c0, shapetype xtal_shape, CUDAREAL mosaic_spread, int mosaic_domains, const CUDAREAL * __restrict__ mosaic_umats, CUDAREAL Na, CUDAREAL Nb, CUDAREAL Nc, CUDAREAL V_cell, CUDAREAL water_size, CUDAREAL water_F, CUDAREAL water_MW, CUDAREAL r_e_sqr, CUDAREAL fluence, CUDAREAL Avogadro, CUDAREAL spot_scale, int integral_form, CUDAREAL default_F, int interpolate, const CUDAREAL * __restrict__ Fhkl, const hklParams * __restrict__ FhklParams, int nopolar, const CUDAREAL * __restrict__ polar_vector, CUDAREAL polarization, CUDAREAL fudge, const int unsigned short * __restrict__ maskimage, float * floatimage /*out*/, float * omega_reduction/*out*/, float * max_I_x_reduction/*out*/, float * max_I_y_reduction /*out*/, bool * rangemap) { __shared__ CUDAREAL s_dmin; __shared__ bool s_nopolar; __shared__ int s_phisteps; __shared__ CUDAREAL s_phi0, s_phistep; __shared__ int s_mosaic_domains; __shared__ CUDAREAL s_mosaic_spread; __shared__ shapetype s_xtal_shape; __shared__ CUDAREAL s_Na, s_Nb, s_Nc; __shared__ bool s_interpolate; __shared__ int s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, s_h_range, s_k_range, s_l_range; if (threadIdx.x == 0 && threadIdx.y == 0) { s_dmin = dmin; s_nopolar = nopolar; s_phisteps = phisteps; s_phi0 = phi0; s_phistep = phistep; s_mosaic_domains = mosaic_domains; s_mosaic_spread = mosaic_spread; s_xtal_shape = xtal_shape; s_Na = Na; s_Nb = Nb; s_Nc = Nc; s_interpolate = interpolate; s_hkls = FhklParams->hkls; s_h_max = FhklParams->h_max; s_h_min = FhklParams->h_min; s_k_max = FhklParams->k_max; s_k_min = FhklParams->k_min; s_l_max = FhklParams->l_max; s_l_min = FhklParams->l_min; s_h_range = FhklParams->h_range; s_k_range = FhklParams->k_range; s_l_range = FhklParams->l_range; } __syncthreads(); const int total_pixels = spixels * fpixels; const int fstride = gridDim.x * blockDim.x; const int sstride = gridDim.y * blockDim.y; const int stride = fstride * sstride; // const int tidx = blockDim.x * threadIdx.y * +threadIdx.x; // __shared__ int sharedVectors[THREADS_PER_BLOCK_TOTAL + 1][1][9]; // __shared__ CUDAREAL sharedVectors[THREADS_PER_BLOCK_TOTAL + 1][1][VECTOR_SIZE]; // CUDAREAL * tmpVector1 = sharedVectors[tidx][0]; // CUDAREAL * tmpVector2 = sharedVectors[tidx][1]; /* add background from something amorphous */ CUDAREAL F_bg = water_F; CUDAREAL I_bg = F_bg * F_bg * r_e_sqr * fluence * water_size * water_size * water_size * 1e6 * Avogadro / water_MW; // hklParams[0] = h_min; // hklParams[1] = h_max; // hklParams[2] = h_range; // hklParams[3] = k_min; // hklParams[4] = k_max; // hklParams[5] = k_range; // hklParams[6] = l_min; // hklParams[7] = l_max; // hklParams[8] = l_range; for (int pixIdx = (blockDim.y * blockIdx.y + threadIdx.y) * fstride + blockDim.x * blockIdx.x + threadIdx.x; pixIdx < total_pixels; pixIdx += stride) { const int fpixel = pixIdx % fpixels; const int spixel = pixIdx / fpixels; /* allow for just one part of detector to be rendered */ if (fpixel < roi_xmin || fpixel > roi_xmax || spixel < roi_ymin || spixel > roi_ymax) { //ROI region of interest continue; } /* position in pixel array */ const int j = pixIdx; /* allow for the use of a mask */ if (maskimage != NULL) { /* skip any flagged pixels in the mask */ if (maskimage[j] == 0) { continue; } } /* reset photon count for this pixel */ CUDAREAL I = I_bg; CUDAREAL omega_sub_reduction = 0.0; CUDAREAL max_I_x_sub_reduction = 0.0; CUDAREAL max_I_y_sub_reduction = 0.0; CUDAREAL polar = 0.0; if (s_nopolar) { polar = 1.0; } /* add this now to avoid problems with skipping later */ // move this to the bottom to avoid accessing global device memory. floatimage[j] = I_bg; /* loop over sub-pixels */ int subS, subF; for (subS = 0; subS < oversample; ++subS) { // Y voxel for (subF = 0; subF < oversample; ++subF) { // X voxel /* absolute mm position on detector (relative to its origin) */ CUDAREAL Fdet = subpixel_size * (fpixel * oversample + subF) + subpixel_size / 2.0; // X voxel CUDAREAL Sdet = subpixel_size * (spixel * oversample + subS) + subpixel_size / 2.0; // Y voxel // Fdet = pixel_size*fpixel; // Sdet = pixel_size*spixel; max_I_x_sub_reduction = Fdet; max_I_y_sub_reduction = Sdet; int thick_tic; for (thick_tic = 0; thick_tic < detector_thicksteps; ++thick_tic) { /* assume "distance" is to the front of the detector sensor layer */ CUDAREAL Odet = thick_tic * detector_thickstep; // Z Orthagonal voxel. /* construct detector subpixel position in 3D space */ // pixel_X = distance; // pixel_Y = Sdet-Ybeam; // pixel_Z = Fdet-Xbeam; //CUDAREAL * pixel_pos = tmpVector1; CUDAREAL pixel_pos[4]; pixel_pos[1] = Fdet * __ldg(&fdet_vector[1]) + Sdet * __ldg(&sdet_vector[1]) + Odet * __ldg(&odet_vector[1]) + __ldg(&pix0_vector[1]); // X pixel_pos[2] = Fdet * __ldg(&fdet_vector[2]) + Sdet * __ldg(&sdet_vector[2]) + Odet * __ldg(&odet_vector[2]) + __ldg(&pix0_vector[2]); // X pixel_pos[3] = Fdet * __ldg(&fdet_vector[3]) + Sdet * __ldg(&sdet_vector[3]) + Odet * __ldg(&odet_vector[3]) + __ldg(&pix0_vector[3]); // X // pixel_pos[1] = Fdet * fdet_vector[1] + Sdet * sdet_vector[1] + Odet * odet_vector[1] + pix0_vector[1]; // X // pixel_pos[2] = Fdet * fdet_vector[2] + Sdet * sdet_vector[2] + Odet * odet_vector[2] + pix0_vector[2]; // Y // pixel_pos[3] = Fdet * fdet_vector[3] + Sdet * sdet_vector[3] + Odet * odet_vector[3] + pix0_vector[3]; // Z if (curved_detector) { /* construct detector pixel that is always "distance" from the sample */ CUDAREAL dbvector[4]; dbvector[1] = distance * beam_vector[1]; dbvector[2] = distance * beam_vector[2]; dbvector[3] = distance * beam_vector[3]; /* treat detector pixel coordinates as radians */ CUDAREAL newvector[] = { 0.0, 0.0, 0.0, 0.0 }; rotate_axis(dbvector, newvector, sdet_vector, pixel_pos[2] / distance); rotate_axis(newvector, pixel_pos, fdet_vector, pixel_pos[3] / distance); // rotate(vector,pixel_pos,0,pixel_pos[3]/distance,pixel_pos[2]/distance); } /* construct the diffracted-beam unit vector to this sub-pixel */ //CUDAREAL * diffracted = tmpVector2; CUDAREAL diffracted[4]; CUDAREAL airpath = unitize(pixel_pos, diffracted); /* solid angle subtended by a pixel: (pix/airpath)^2*cos(2theta) */ CUDAREAL omega_pixel = pixel_size * pixel_size / airpath / airpath * close_distance / airpath; /* option to turn off obliquity effect, inverse-square-law only */ if (point_pixel) { omega_pixel = 1.0 / airpath / airpath; } /* now calculate detector thickness effects */ CUDAREAL capture_fraction = 1.0; if (detector_thick > 0.0 && detector_mu> 0.0) { /* inverse of effective thickness increase */ CUDAREAL parallax = dot_product_ldg(odet_vector, diffracted); capture_fraction = exp(-thick_tic * detector_thickstep / detector_mu / parallax) - exp(-(thick_tic + 1) * detector_thickstep / detector_mu / parallax); } /* loop over sources now */ int source; for (source = 0; source < sources; ++source) { /* retrieve stuff from cache */ //CUDAREAL * incident = tmpVector1; CUDAREAL incident[4]; incident[1] = -__ldg(&source_X[source]); incident[2] = -__ldg(&source_Y[source]); incident[3] = -__ldg(&source_Z[source]); CUDAREAL lambda = __ldg(&source_lambda[source]); CUDAREAL source_fraction = __ldg(&source_I[source]); /* construct the incident beam unit vector while recovering source distance */ // TODO[Giles]: Optimization! We can unitize the source vectors before passing them in. unitize(incident, incident); // CUDAREAL source_path = unitize(incident, incident); // CUDAREAL source_path = norm3d(incident[1], incident[2], incident[3]); // CUDAREAL * d = tmpVector2; // d[0] = diffracted[0]; // d[1] = diffracted[1]; // d[2] = diffracted[2]; // d[3] = diffracted[3]; /* construct the scattering vector for this pixel */ // CUDAREAL * scattering = tmpVector1; CUDAREAL scattering[4]; scattering[1] = (diffracted[1] - incident[1]) / lambda; scattering[2] = (diffracted[2] - incident[2]) / lambda; scattering[3] = (diffracted[3] - incident[3]) / lambda; // CUDAREAL scattering[] = { 0.0, (diffracted[1] - incident[1]) / lambda, (diffracted[2] - incident[2]) / lambda, (diffracted[3] // - incident[3]) / lambda }; /* sin(theta)/lambda is half the scattering vector length */ // magnitude(scattering); // CUDAREAL stol = 0.5 * scattering[0]; CUDAREAL stol = 0.5 * norm3d(scattering[1], scattering[2], scattering[3]); /* rough cut to speed things up when we aren't using whole detector */ if (s_dmin > 0.0 && stol > 0.0) { if (s_dmin > 0.5 / stol) { continue; } } /* polarization factor */ if (!s_nopolar) { /* need to compute polarization factor */ polar = polarization_factor(polarization, incident, diffracted, polar_vector); } else { polar = 1.0; } /* sweep over phi angles */ for (int phi_tic = 0; phi_tic < s_phisteps; ++phi_tic) { CUDAREAL phi = s_phistep * phi_tic + s_phi0; // CUDAREAL ap[] = { 0.0, 0.0, 0.0, 0.0 }; // CUDAREAL bp[] = { 0.0, 0.0, 0.0, 0.0 }; // CUDAREAL cp[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL ap[4]; CUDAREAL bp[4]; CUDAREAL cp[4]; /* rotate about spindle if necessary */ rotate_axis_ldg(a0, ap, spindle_vector, phi); rotate_axis_ldg(b0, bp, spindle_vector, phi); rotate_axis_ldg(c0, cp, spindle_vector, phi); /* enumerate mosaic domains */ for (int mos_tic = 0; mos_tic < s_mosaic_domains; ++mos_tic) { /* apply mosaic rotation after phi rotation */ CUDAREAL a[4]; CUDAREAL b[4]; CUDAREAL c[4]; if (s_mosaic_spread > 0.0) { rotate_umat_ldg(ap, a, &mosaic_umats[mos_tic * 9]); rotate_umat_ldg(bp, b, &mosaic_umats[mos_tic * 9]); rotate_umat_ldg(cp, c, &mosaic_umats[mos_tic * 9]); } else { a[1] = ap[1]; a[2] = ap[2]; a[3] = ap[3]; b[1] = bp[1]; b[2] = bp[2]; b[3] = bp[3]; c[1] = cp[1]; c[2] = cp[2]; c[3] = cp[3]; } // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+0],mosaic_umats[mos_tic*9+1],mosaic_umats[mos_tic*9+2]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+3],mosaic_umats[mos_tic*9+4],mosaic_umats[mos_tic*9+5]); // printf("%d %f %f %f\n",mos_tic,mosaic_umats[mos_tic*9+6],mosaic_umats[mos_tic*9+7],mosaic_umats[mos_tic*9+8]); /* construct fractional Miller indicies */ // CUDAREAL * scat_s = tmpVector2; // scat_s[0] = scattering[0]; // scat_s[1] = scattering[1]; // scat_s[2] = scattering[2]; // scat_s[3] = scattering[3]; // // CUDAREAL h = dot_product(a, scat_s); // CUDAREAL k = dot_product(b, scat_s); // CUDAREAL l = dot_product(c, scat_s); CUDAREAL h = dot_product(a, scattering); CUDAREAL k = dot_product(b, scattering); CUDAREAL l = dot_product(c, scattering); /* round off to nearest whole index */ int h0 = ceil(h - 0.5); int k0 = ceil(k - 0.5); int l0 = ceil(l - 0.5); /* structure factor of the lattice (paralelpiped crystal) F_latt = sin(M_PI*Na*h)*sin(M_PI*Nb*k)*sin(M_PI*Nc*l)/sin(M_PI*h)/sin(M_PI*k)/sin(M_PI*l); */ CUDAREAL F_latt = 1.0; // Shape transform for the crystal. CUDAREAL hrad_sqr = 0.0; if (s_xtal_shape == SQUARE) { /* xtal is a paralelpiped */ if (Na > 1) { // F_latt *= sincgrad(h, s_Na); F_latt *= sincg(M_PI * h, s_Na); } if (Nb > 1) { // F_latt *= sincgrad(k, s_Nb); F_latt *= sincg(M_PI * k, s_Nb); } if (Nc > 1) { // F_latt *= sincgrad(l, s_Nc); F_latt *= sincg(M_PI * l, s_Nc); } } else { /* handy radius in reciprocal space, squared */ hrad_sqr = (h - h0) * (h - h0) * Na * Na + (k - k0) * (k - k0) * Nb * Nb + (l - l0) * (l - l0) * Nc * Nc; } if (s_xtal_shape == ROUND) { /* use sinc3 for elliptical xtal shape, correcting for sqrt of volume ratio between cube and sphere */ F_latt = Na * Nb * Nc * 0.723601254558268 * sinc3(M_PI * sqrt(hrad_sqr * fudge)); } if (s_xtal_shape == GAUSS) { /* fudge the radius so that volume and FWHM are similar to square_xtal spots */ F_latt = Na * Nb * Nc * exp(-(hrad_sqr / 0.63 * fudge)); } if (s_xtal_shape == TOPHAT) { /* make a flat-top spot of same height and volume as square_xtal spots */ F_latt = Na * Nb * Nc * (hrad_sqr * fudge < 0.3969); } /* no need to go further if result will be zero? */ if (F_latt == 0.0 && water_size == 0.0) continue; /* find nearest point on Ewald sphere surface? */ if (integral_form) { /* need to calculate reciprocal matrix */ /* various cross products */ CUDAREAL a_cross_b[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL b_cross_c[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL c_cross_a[] = { 0.0, 0.0, 0.0, 0.0 }; cross_product(a, b, a_cross_b); cross_product(b, c, b_cross_c); cross_product(c, a, c_cross_a); /* new reciprocal-space cell vectors */ CUDAREAL a_star[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL b_star[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL c_star[] = { 0.0, 0.0, 0.0, 0.0 }; vector_scale(b_cross_c, a_star, 1e20 / V_cell); vector_scale(c_cross_a, b_star, 1e20 / V_cell); vector_scale(a_cross_b, c_star, 1e20 / V_cell); /* reciprocal-space coordinates of nearest relp */ CUDAREAL relp[] = { 0.0, 0.0, 0.0, 0.0 }; relp[1] = h0 * a_star[1] + k0 * b_star[1] + l0 * c_star[1]; relp[2] = h0 * a_star[2] + k0 * b_star[2] + l0 * c_star[2]; relp[3] = h0 * a_star[3] + k0 * b_star[3] + l0 * c_star[3]; // d_star = magnitude(relp) /* reciprocal-space coordinates of center of Ewald sphere */ CUDAREAL Ewald0[] = { 0.0, 0.0, 0.0, 0.0 }; Ewald0[1] = -incident[1] / lambda / 1e10; Ewald0[2] = -incident[2] / lambda / 1e10; Ewald0[3] = -incident[3] / lambda / 1e10; // 1/lambda = magnitude(Ewald0) /* distance from Ewald sphere in lambda=1 units */ CUDAREAL dEwald0[] = { 0.0, 0.0, 0.0, 0.0 }; dEwald0[1] = relp[1] - Ewald0[1]; dEwald0[2] = relp[2] - Ewald0[2]; dEwald0[3] = relp[3] - Ewald0[3]; magnitude(dEwald0); CUDAREAL d_r = dEwald0[0] - 1.0; /* unit vector of diffracted ray through relp */ CUDAREAL diffracted0[] = { 0.0, 0.0, 0.0, 0.0 }; unitize(dEwald0, diffracted0); /* intersection with detector plane */ CUDAREAL xd = dot_product_ldg(fdet_vector, diffracted0); CUDAREAL yd = dot_product_ldg(sdet_vector, diffracted0); CUDAREAL zd = dot_product_ldg(odet_vector, diffracted0); /* where does the central direct-beam hit */ CUDAREAL xd0 = dot_product_ldg(fdet_vector, incident); CUDAREAL yd0 = dot_product_ldg(sdet_vector, incident); CUDAREAL zd0 = dot_product_ldg(odet_vector, incident); /* convert to mm coordinates */ CUDAREAL Fdet0 = distance * (xd / zd) + Xbeam; CUDAREAL Sdet0 = distance * (yd / zd) + Ybeam; //printf("GOTHERE %g %g %g %g\n",Fdet,Sdet,Fdet0,Sdet0); CUDAREAL test = exp(-((Fdet - Fdet0) * (Fdet - Fdet0) + (Sdet - Sdet0) * (Sdet - Sdet0) + d_r * d_r) / 1e-8); } // end of integral form /* structure factor of the unit cell */ CUDAREAL F_cell = default_F; if (s_interpolate) { int h0_flr = floor(h); int k0_flr = floor(k); int l0_flr = floor(l); if (((h - s_h_min + 3) > s_h_range) || (h - 2 < s_h_min) || ((k - s_k_min + 3) > s_k_range) || (k - 2 < s_k_min) || ((l - s_l_min + 3) > s_l_range) || (l - 2 < s_l_min)) { // if (babble) { // babble = 0; // printf("WARNING: out of range for three point interpolation: h,k,l,h0,k0,l0: %g,%g,%g,%d,%d,%d \n", h, k, l, h0, // k0, l0); // printf("WARNING: further warnings will not be printed! "); // } F_cell = quickFcell_ldg(s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, h0, k0, l0, s_h_range, s_k_range, s_l_range, default_F, Fhkl); } else { /* integer versions of nearest HKL indicies */ int h_interp[] = { 0.0, 0.0, 0.0, 0.0 }; int k_interp[] = { 0.0, 0.0, 0.0, 0.0 }; int l_interp[] = { 0.0, 0.0, 0.0, 0.0 }; h_interp[0] = h0_flr - 1; h_interp[1] = h0_flr; h_interp[2] = h0_flr + 1; h_interp[3] = h0_flr + 2; k_interp[0] = k0_flr - 1; k_interp[1] = k0_flr; k_interp[2] = k0_flr + 1; k_interp[3] = k0_flr + 2; l_interp[0] = l0_flr - 1; l_interp[1] = l0_flr; l_interp[2] = l0_flr + 1; l_interp[3] = l0_flr + 2; /* polin function needs doubles */ CUDAREAL h_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL k_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; CUDAREAL l_interp_d[] = { 0.0, 0.0, 0.0, 0.0 }; h_interp_d[0] = (CUDAREAL) h_interp[0]; h_interp_d[1] = (CUDAREAL) h_interp[1]; h_interp_d[2] = (CUDAREAL) h_interp[2]; h_interp_d[3] = (CUDAREAL) h_interp[3]; k_interp_d[0] = (CUDAREAL) k_interp[0]; k_interp_d[1] = (CUDAREAL) k_interp[1]; k_interp_d[2] = (CUDAREAL) k_interp[2]; k_interp_d[3] = (CUDAREAL) k_interp[3]; l_interp_d[0] = (CUDAREAL) l_interp[0]; l_interp_d[1] = (CUDAREAL) l_interp[1]; l_interp_d[2] = (CUDAREAL) l_interp[2]; l_interp_d[3] = (CUDAREAL) l_interp[3]; /* now populate the "y" values (nearest four structure factors in each direction) */ CUDAREAL sub_Fhkl[4][4][4]; int i1, i2, i3; for (i1 = 0; i1 < 4; i1++) { for (i2 = 0; i2 < 4; i2++) { for (i3 = 0; i3 < 4; i3++) { sub_Fhkl[i1][i2][i3] = __ldg( &Fhkl[flatten3dindex(h_interp[i1] - s_h_min, k_interp[i2] - s_k_min, l_interp[i3] - s_l_min, s_h_range, s_k_range, s_l_range)]); } } } /* run the tricubic polynomial interpolation */ polin3(h_interp_d, k_interp_d, l_interp_d, sub_Fhkl, h, k, l, &F_cell); } } else { // if (!interpolate) { // if (hkls && (h0 <= hklParams[1]) && (h0 >= hklParams[0]) && (k0 <= hklParams[4]) && (k0 >= hklParams[3]) && (l0 <= hklParams[7]) && (l0 >= hklParams[6])) { // /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - hklParams[0], k0 - hklParams[3], l0 - hklParams[6], hklParams[2], hklParams[5], hklParams[8])]); // } else { // F_cell = default_F; // usually zero // } // } F_cell = quickFcell_ldg(s_hkls, s_h_max, s_h_min, s_k_max, s_k_min, s_l_max, s_l_min, h0, k0, l0, s_h_range, s_k_range, s_l_range, default_F, Fhkl); // if (s_hkls && (h0 <= s_h_max) && (h0 >= s_h_min) && (k0 <= s_k_max) && (k0 >= s_k_min) && (l0 <= s_l_max) && (l0 >= s_l_min)) { // /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - s_h_min, k0 - s_k_min, l0 - s_l_min, s_h_range, s_k_range, s_l_range)]); //// F_cell = __ldg(&Fhkl[flatten3dindex(h0 - __ldg(&FhklParams->h_min), k0 - __ldg(&FhklParams->k_min), l0 - __ldg(&FhklParams->l_min), s_h_range, s_k_range, s_l_range)]); //// F_cell = __ldg(&Fhkl[flatten3dindex(h0 - FhklParams->h_min, k0 - FhklParams->k_min, l0 - FhklParams->l_min, FhklParams->h_range, FhklParams->k_range, FhklParams->l_range)]); // } } /* now we have the structure factor for this pixel */ /* convert amplitudes into intensity (photons per steradian) */ I += F_cell * F_cell * F_latt * F_latt * source_fraction * capture_fraction * omega_pixel; omega_sub_reduction += omega_pixel; } /* end of mosaic loop */ } /* end of phi loop */ } /* end of source loop */ } /* end of detector thickness loop */ } /* end of sub-pixel y loop */ } /* end of sub-pixel x loop */ const double photons = I_bg + (r_e_sqr * spot_scale * fluence * polar * I) / steps; floatimage[j] = photons; omega_reduction[j] = omega_sub_reduction; // shared contention max_I_x_reduction[j] = max_I_x_sub_reduction; max_I_y_reduction[j] = max_I_y_sub_reduction; rangemap[j] = true; } } __device__ __inline__ CUDAREAL quickFcell_ldg(int hkls, int h_max, int h_min, int k_max, int k_min, int l_max, int l_min, int h0, int k0, int l0, int h_range, int k_range, int l_range, CUDAREAL defaultF, const CUDAREAL * __restrict__ Fhkl) { if (hkls && (h0 <= h_max) && (h0 >= h_min) && (k0 <= k_max) && (k0 >= k_min) && (l0 <= l_max) && (l0 >= l_min)) { /* just take nearest-neighbor */ // F_cell = __ldg(&Fhkl[flatten3dindex(h0 - s_h_min, k0 - s_k_min, l0 - s_l_min, s_h_range, s_k_range, s_l_range)]); return __ldg(&Fhkl[flatten3dindex(h0 - h_min, k0 - k_min, l0 - l_min, h_range, k_range, l_range)]); } else { return defaultF; // usually zero } } __device__ __inline__ int flatten3dindex(int x, int y, int z, int x_range, int y_range, int z_range) { return x * y_range * z_range + y * z_range + z; } /* rotate a point about a unit vector axis */ __device__ CUDAREAL *rotate_axis(const CUDAREAL * __restrict__ v, CUDAREAL * newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi) { const CUDAREAL sinphi = sin(phi); const CUDAREAL cosphi = cos(phi); const CUDAREAL a1 = axis[1]; const CUDAREAL a2 = axis[2]; const CUDAREAL a3 = axis[3]; const CUDAREAL v1 = v[1]; const CUDAREAL v2 = v[2]; const CUDAREAL v3 = v[3]; const CUDAREAL dot = (a1 * v1 + a2 * v2 + a3 * v3) * (1.0 - cosphi); newv[1] = a1 * dot + v1 * cosphi + (-a3 * v2 + a2 * v3) * sinphi; newv[2] = a2 * dot + v2 * cosphi + (+a3 * v1 - a1 * v3) * sinphi; newv[3] = a3 * dot + v3 * cosphi + (-a2 * v1 + a1 * v2) * sinphi; return newv; } /* rotate a point about a unit vector axis */ __device__ CUDAREAL *rotate_axis_ldg(const CUDAREAL * __restrict__ v, CUDAREAL * newv, const CUDAREAL * __restrict__ axis, const CUDAREAL phi) { const CUDAREAL sinphi = sin(phi); const CUDAREAL cosphi = cos(phi); const CUDAREAL a1 = __ldg(&axis[1]); const CUDAREAL a2 = __ldg(&axis[2]); const CUDAREAL a3 = __ldg(&axis[3]); const CUDAREAL v1 = __ldg(&v[1]); const CUDAREAL v2 = __ldg(&v[2]); const CUDAREAL v3 = __ldg(&v[3]); const CUDAREAL dot = (a1 * v1 + a2 * v2 + a3 * v3) * (1.0 - cosphi); newv[1] = a1 * dot + v1 * cosphi + (-a3 * v2 + a2 * v3) * sinphi; newv[2] = a2 * dot + v2 * cosphi + (+a3 * v1 - a1 * v3) * sinphi; newv[3] = a3 * dot + v3 * cosphi + (-a2 * v1 + a1 * v2) * sinphi; return newv; } /* make provided vector a unit vector */ __device__ CUDAREAL unitize(CUDAREAL * vector, CUDAREAL * new_unit_vector) { CUDAREAL v1 = vector[1]; CUDAREAL v2 = vector[2]; CUDAREAL v3 = vector[3]; // CUDAREAL mag = sqrt(v1 * v1 + v2 * v2 + v3 * v3); CUDAREAL mag = norm3d(v1, v2, v3); if (mag != 0.0) { /* normalize it */ new_unit_vector[0] = mag; new_unit_vector[1] = v1 / mag; new_unit_vector[2] = v2 / mag; new_unit_vector[3] = v3 / mag; } else { /* can't normalize, report zero vector */ new_unit_vector[0] = 0.0; new_unit_vector[1] = 0.0; new_unit_vector[2] = 0.0; new_unit_vector[3] = 0.0; } return mag; } /* vector cross product where vector magnitude is 0th element */ __device__ CUDAREAL *cross_product(CUDAREAL * x, CUDAREAL * y, CUDAREAL * z) { z[1] = x[2] * y[3] - x[3] * y[2]; z[2] = x[3] * y[1] - x[1] * y[3]; z[3] = x[1] * y[2] - x[2] * y[1]; z[0] = 0.0; return z; } /* vector inner product where vector magnitude is 0th element */ __device__ CUDAREAL dot_product(const CUDAREAL * x, const CUDAREAL * y) { return x[1] * y[1] + x[2] * y[2] + x[3] * y[3]; } __device__ CUDAREAL dot_product_ldg(const CUDAREAL * __restrict__ x, CUDAREAL * y) { return __ldg(&x[1]) * y[1] + __ldg(&x[2]) * y[2] + __ldg(&x[3]) * y[3]; } /* measure magnitude of provided vector */ __device__ void magnitude(CUDAREAL *vector) { /* measure the magnitude */ vector[0] = sqrt(vector[1] * vector[1] + vector[2] * vector[2] + vector[3] * vector[3]); } /* scale magnitude of provided vector */ __device__ CUDAREAL vector_scale(CUDAREAL *vector, CUDAREAL *new_vector, CUDAREAL scale) { new_vector[1] = scale * vector[1]; new_vector[2] = scale * vector[2]; new_vector[3] = scale * vector[3]; magnitude(new_vector); return new_vector[0]; } /* rotate a vector using a 9-element unitary matrix */ __device__ void rotate_umat_ldg(CUDAREAL * v, CUDAREAL *newv, const CUDAREAL * __restrict__ umat) { /* for convenience, assign matrix x-y coordinate */ CUDAREAL uxx = __ldg(&umat[0]); CUDAREAL uxy = __ldg(&umat[1]); CUDAREAL uxz = __ldg(&umat[2]); CUDAREAL uyx = __ldg(&umat[3]); CUDAREAL uyy = __ldg(&umat[4]); CUDAREAL uyz = __ldg(&umat[5]); CUDAREAL uzx = __ldg(&umat[6]); CUDAREAL uzy = __ldg(&umat[7]); CUDAREAL uzz = __ldg(&umat[8]); CUDAREAL v1 = v[1]; CUDAREAL v2 = v[2]; CUDAREAL v3 = v[3]; /* rotate the vector (x=1,y=2,z=3) */ newv[1] = uxx * v1 + uxy * v2 + uxz * v3; newv[2] = uyx * v1 + uyy * v2 + uyz * v3; newv[3] = uzx * v1 + uzy * v2 + uzz * v3; } /* Fourier transform of a grating */ __device__ CUDAREAL sincg(CUDAREAL x, CUDAREAL N) { if (x != 0.0) return sin(x * N) / sin(x); return N; } __device__ CUDAREAL sincgrad(CUDAREAL x, CUDAREAL N) { if (x != 0.0) return sinpi(x * N) / sinpi(x); return N; } /* Fourier transform of a sphere */ __device__ CUDAREAL sinc3(CUDAREAL x) { if (x != 0.0) return 3.0 * (sin(x) / x - cos(x)) / (x * x); return 1.0; } __device__ void polint(CUDAREAL *xa, CUDAREAL *ya, CUDAREAL x, CUDAREAL *y) { CUDAREAL x0, x1, x2, x3; x0 = (x - xa[1]) * (x - xa[2]) * (x - xa[3]) * ya[0] / ((xa[0] - xa[1]) * (xa[0] - xa[2]) * (xa[0] - xa[3])); x1 = (x - xa[0]) * (x - xa[2]) * (x - xa[3]) * ya[1] / ((xa[1] - xa[0]) * (xa[1] - xa[2]) * (xa[1] - xa[3])); x2 = (x - xa[0]) * (x - xa[1]) * (x - xa[3]) * ya[2] / ((xa[2] - xa[0]) * (xa[2] - xa[1]) * (xa[2] - xa[3])); x3 = (x - xa[0]) * (x - xa[1]) * (x - xa[2]) * ya[3] / ((xa[3] - xa[0]) * (xa[3] - xa[1]) * (xa[3] - xa[2])); *y = x0 + x1 + x2 + x3; } __device__ void polin2(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL ya[4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL *y) { int j; CUDAREAL ymtmp[4]; for (j = 1; j <= 4; j++) { polint(x2a, ya[j - 1], x2, &ymtmp[j - 1]); } polint(x1a, ymtmp, x1, y); } __device__ void polin3(CUDAREAL *x1a, CUDAREAL *x2a, CUDAREAL *x3a, CUDAREAL ya[4][4][4], CUDAREAL x1, CUDAREAL x2, CUDAREAL x3, CUDAREAL *y) { int j; CUDAREAL ymtmp[4]; for (j = 1; j <= 4; j++) { polin2(x2a, x3a, &ya[j - 1][0], x2, x3, &ymtmp[j - 1]); } polint(x1a, ymtmp, x1, y); } /* polarization factor */ __device__ CUDAREAL polarization_factor(CUDAREAL kahn_factor, CUDAREAL *incident, CUDAREAL *diffracted, const CUDAREAL * __restrict__ axis) { CUDAREAL cos2theta, cos2theta_sqr, sin2theta_sqr; CUDAREAL psi = 0.0; CUDAREAL E_in[4], B_in[4], E_out[4], B_out[4]; // these are already unitized before entering this loop. Optimize this out. // unitize(incident, incident); // unitize(diffracted, diffracted); /* component of diffracted unit vector along incident beam unit vector */ cos2theta = dot_product(incident, diffracted); cos2theta_sqr = cos2theta * cos2theta; sin2theta_sqr = 1 - cos2theta_sqr; if (kahn_factor != 0.0) { /* tricky bit here is deciding which direciton the E-vector lies in for each source here we assume it is closest to the "axis" defined above */ CUDAREAL unitAxis[] = { axis[0], axis[1], axis[2], axis[3] }; // this is already unitized. Optimize this out. unitize(unitAxis, unitAxis); /* cross product to get "vertical" axis that is orthogonal to the cannonical "polarization" */ cross_product(unitAxis, incident, B_in); /* make it a unit vector */ unitize(B_in, B_in); /* cross product with incident beam to get E-vector direction */ cross_product(incident, B_in, E_in); /* make it a unit vector */ unitize(E_in, E_in); /* get components of diffracted ray projected onto the E-B plane */ E_out[0] = dot_product(diffracted, E_in); B_out[0] = dot_product(diffracted, B_in); /* compute the angle of the diffracted ray projected onto the incident E-B plane */ psi = -atan2(B_out[0], E_out[0]); } /* correction for polarized incident beam */ return 0.5 * (1.0 + cos2theta_sqr - kahn_factor * cos(2 * psi) * sin2theta_sqr); }
d2c9509bcc9540b6ccf29cc5ea82d230223192e0.hip
// !!! This is a file automatically generated by hipify!!! // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" // setting the number of trials in the monte carlo simulation: #ifndef NUMTRIALS #define NUMTRIALS ( 1024*1024 ) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE ) // ranges for the random numbers: const float XCMIN = 0.0; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); __global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits ) { unsigned int wgNumber = blockIdx.x; unsigned int wgDimension = blockDim.x; unsigned int threadNum = threadIdx.x; unsigned int gid = wgNumber*wgDimension + threadNum; // all the monte carlo stuff goes in here // if we make it all the way through, then Hits[gid] = 1 // randomize the location and radius of the circle: float xc = Xcs[gid]; float yc = Ycs[gid]; float r = Rs[gid]; float tn = tanf( (float)( (M_PI/180.) * 30. ) ); Hits[gid] = 0; // solve for the intersection using the quadratic formula: float a = 1. + tn*tn; float b = -2.*( xc + yc*tn ); float c = xc*xc + yc*yc - r*r; float d = b*b - 4.*a*c; // cascading if-statements: // if you used "continue;" in project #1, change to this style because, // if there is no for-loop, then there is nowhere to continue to //If d is less than 0., then the circle was completely missed. (Case A) Continue on to the next trial in the for-loop. if( d>=0 ) { d = sqrt( d ); float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection //If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) Continue on to the next trial in the for-loop. if( tmin>=0 ) { // where does it intersect the circle? float xcir = tmin; float ycir = tmin*tn; // get the unitized normal vector at the point of intersection: float nx = xcir - xc; float ny = ycir - yc; float n = sqrt( nx*nx + ny*ny ); nx /= n; // unit vector ny /= n; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt( inx*inx + iny*iny ); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx*nx + iny*ny; float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence` float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = ( 0. - ycir ) / outy; if( t >= 0. ) { Hits[gid] = 1; } } } } // main program: int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float *hXcs = new float[NUMTRIALS]; float *hYcs = new float[NUMTRIALS]; float * hRs = new float[NUMTRIALS]; int *hHits = new int[NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hXcs[n] = Ranf( XCMIN, XCMAX ); hYcs[n] = Ranf( YCMIN, YCMAX ); hRs[n] = Ranf( RMIN, RMAX ); } // allocate device memory: float *dXcs, *dYcs, *dRs; int *dHits; dim3 dimsXcs( NUMTRIALS, 1, 1 ); dim3 dimsYcs( NUMTRIALS, 1, 1 ); dim3 dimsRs( NUMTRIALS, 1, 1 ); dim3 dimsHits( NUMTRIALS, 1, 1 ); hipError_t status; status = hipMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) ); checkCudaErrors( status ); // copy host memory to the device: status = hipMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid(NUMBLOCKS, 1, 1 ); // create and start timer hipDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: hipEvent_t start, stop; status = hipEventCreate( &start ); checkCudaErrors( status ); status = hipEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = hipEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads) , 0, 0, dXcs, dYcs, dRs, dHits ); // record the stop event: status = hipEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = hipEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = hipEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double trialsPerSecond = (float)NUMTRIALS / secondsTotal; double megaTrialsPerSecond = trialsPerSecond / 1000000.; fprintf( stderr, "BLOCKSIZE = %10d\n", BLOCKSIZE ); fprintf( stderr, "Number of Trials = %10d, MegaTrials/Second = %10.4lf\n", NUMTRIALS, megaTrialsPerSecond ); // copy result from the device to the host: status = hipMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), hipMemcpyDeviceToHost ); checkCudaErrors( status ); hipDeviceSynchronize( ); // compute the probability: int numHits = 0; for(int i = 0; i < NUMTRIALS; i++ ) { numHits += hHits[i]; } float probability = 100.f * (float)numHits / (float)NUMTRIALS; fprintf(stderr, "\nProbability = %6.3f %%\n", probability ); // clean up memory: delete [ ] hXcs; delete [ ] hYcs; delete [ ] hRs; delete [ ] hHits; status = hipFree( dXcs ); status = hipFree( dYcs ); status = hipFree( dRs ); status = hipFree( dHits ); checkCudaErrors( status ); return 0; } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
d2c9509bcc9540b6ccf29cc5ea82d230223192e0.cu
// System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" // setting the number of trials in the monte carlo simulation: #ifndef NUMTRIALS #define NUMTRIALS ( 1024*1024 ) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE ) // ranges for the random numbers: const float XCMIN = 0.0; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); __global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits ) { unsigned int wgNumber = blockIdx.x; unsigned int wgDimension = blockDim.x; unsigned int threadNum = threadIdx.x; unsigned int gid = wgNumber*wgDimension + threadNum; // all the monte carlo stuff goes in here // if we make it all the way through, then Hits[gid] = 1 // randomize the location and radius of the circle: float xc = Xcs[gid]; float yc = Ycs[gid]; float r = Rs[gid]; float tn = tanf( (float)( (M_PI/180.) * 30. ) ); Hits[gid] = 0; // solve for the intersection using the quadratic formula: float a = 1. + tn*tn; float b = -2.*( xc + yc*tn ); float c = xc*xc + yc*yc - r*r; float d = b*b - 4.*a*c; // cascading if-statements: // if you used "continue;" in project #1, change to this style because, // if there is no for-loop, then there is nowhere to continue to //If d is less than 0., then the circle was completely missed. (Case A) Continue on to the next trial in the for-loop. if( d>=0 ) { d = sqrt( d ); float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection //If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) Continue on to the next trial in the for-loop. if( tmin>=0 ) { // where does it intersect the circle? float xcir = tmin; float ycir = tmin*tn; // get the unitized normal vector at the point of intersection: float nx = xcir - xc; float ny = ycir - yc; float n = sqrt( nx*nx + ny*ny ); nx /= n; // unit vector ny /= n; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt( inx*inx + iny*iny ); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx*nx + iny*ny; float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence` float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = ( 0. - ycir ) / outy; if( t >= 0. ) { Hits[gid] = 1; } } } } // main program: int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float *hXcs = new float[NUMTRIALS]; float *hYcs = new float[NUMTRIALS]; float * hRs = new float[NUMTRIALS]; int *hHits = new int[NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hXcs[n] = Ranf( XCMIN, XCMAX ); hYcs[n] = Ranf( YCMIN, YCMAX ); hRs[n] = Ranf( RMIN, RMAX ); } // allocate device memory: float *dXcs, *dYcs, *dRs; int *dHits; dim3 dimsXcs( NUMTRIALS, 1, 1 ); dim3 dimsYcs( NUMTRIALS, 1, 1 ); dim3 dimsRs( NUMTRIALS, 1, 1 ); dim3 dimsHits( NUMTRIALS, 1, 1 ); cudaError_t status; status = cudaMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) ); checkCudaErrors( status ); // copy host memory to the device: status = cudaMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid(NUMBLOCKS, 1, 1 ); // create and start timer cudaDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: cudaEvent_t start, stop; status = cudaEventCreate( &start ); checkCudaErrors( status ); status = cudaEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = cudaEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: MonteCarlo<<< grid, threads >>>( dXcs, dYcs, dRs, dHits ); // record the stop event: status = cudaEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = cudaEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = cudaEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double trialsPerSecond = (float)NUMTRIALS / secondsTotal; double megaTrialsPerSecond = trialsPerSecond / 1000000.; fprintf( stderr, "BLOCKSIZE = %10d\n", BLOCKSIZE ); fprintf( stderr, "Number of Trials = %10d, MegaTrials/Second = %10.4lf\n", NUMTRIALS, megaTrialsPerSecond ); // copy result from the device to the host: status = cudaMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), cudaMemcpyDeviceToHost ); checkCudaErrors( status ); cudaDeviceSynchronize( ); // compute the probability: int numHits = 0; for(int i = 0; i < NUMTRIALS; i++ ) { numHits += hHits[i]; } float probability = 100.f * (float)numHits / (float)NUMTRIALS; fprintf(stderr, "\nProbability = %6.3f %%\n", probability ); // clean up memory: delete [ ] hXcs; delete [ ] hYcs; delete [ ] hRs; delete [ ] hHits; status = cudaFree( dXcs ); status = cudaFree( dYcs ); status = cudaFree( dRs ); status = cudaFree( dHits ); checkCudaErrors( status ); return 0; } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
13d7925c2938cd684abd07b388df04564b752608.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2008-2011 Yung-Yu Chen <[email protected]>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "cueuler.h" #ifdef __HIPCC__ __global__ void cuda_bound_nonrefl_soln(exedata *exd, int nbnd, int *facn) { int ibnd = blockDim.x * blockIdx.x + threadIdx.x; #else int bound_nonrefl_soln(exedata *exd, int nbnd, int *facn) { int ibnd; #endif // pointers. int *pfacn, *pfccls; double *pisol, *pisoln, *pjsoln; // iterators. int ifc, icl, jcl, ieq; #ifdef __HIPCC__ if (ibnd < nbnd) { pfacn = facn + ibnd*BFREL; #else pfacn = facn; for (ibnd=0; ibnd<nbnd; ibnd++) { #endif ifc = pfacn[0]; pfccls = exd->fccls + ifc*FCREL; icl = pfccls[0]; jcl = pfccls[1]; // set solutions. pisol = exd->sol + icl*NEQ; pisoln = exd->soln + icl*NEQ; pjsoln = exd->soln + jcl*NEQ; for (ieq=0; ieq<NEQ; ieq++) { pjsoln[ieq] = pisoln[ieq] + exd->taylor*(pisol[ieq] - pisoln[ieq]); }; #ifndef __HIPCC__ // advance boundary face. pfacn += BFREL; }; return 0; }; #else }; }; extern "C" int bound_nonrefl_soln(int nthread, void *gexc, int nbnd, void *gfacn) { int nblock = (nbnd + nthread-1) / nthread; hipLaunchKernelGGL(( cuda_bound_nonrefl_soln), dim3(nblock), dim3(nthread), 0, 0, (exedata *)gexc, nbnd, (int *)gfacn); hipDeviceSynchronize(); return 0; }; #endif #ifdef __HIPCC__ __global__ void cuda_bound_nonrefl_dsoln(exedata *exd, int nbnd, int *facn) { int ibnd = blockDim.x * blockIdx.x + threadIdx.x; #else int bound_nonrefl_dsoln(exedata *exd, int nbnd, int *facn) { int ibnd; #endif // pointers. int *pfacn, *pfccls; double *pidsol, *pidsoln, *pjdsoln, *pdsol, *pdsoln; double *pfcnml; #if NDIM == 3 int *pfcnds; double *pndcrd, *pfccnd; // scalars. double len; #endif // arrays. double dif[NDIM]; double vec[NEQ][NDIM]; double mat[NDIM][NDIM], matinv[NDIM][NDIM]; // iterators. int ifc, icl, jcl, ieq; #ifdef __HIPCC__ if (ibnd < nbnd) { pfacn = facn + ibnd*BFREL; #else pfacn = facn; for (ibnd=0; ibnd<nbnd; ibnd++) { #endif ifc = pfacn[0]; pfccls = exd->fccls + ifc*FCREL; icl = pfccls[0]; jcl = pfccls[1]; #if NDIM == 3 pfcnds = exd->fcnds + ifc*(FCMND+1); #endif pidsol = exd->dsol + icl*NEQ*NDIM; pidsoln = exd->dsoln + icl*NEQ*NDIM; pjdsoln = exd->dsoln + jcl*NEQ*NDIM; // coordinate transformation and set transformed vectors. pfcnml = exd->fcnml + ifc*NDIM; #if NDIM == 3 pfccnd = exd->fccnd + ifc*NDIM; #endif mat[0][0] = matinv[0][0] = pfcnml[0]; mat[0][1] = matinv[1][0] = pfcnml[1]; #if NDIM == 3 mat[0][2] = matinv[2][0] = pfcnml[2]; pndcrd = exd->ndcrd + pfcnds[1]*NDIM; mat[1][0] = pndcrd[0] - pfccnd[0]; mat[1][1] = pndcrd[1] - pfccnd[1]; mat[1][2] = pndcrd[2] - pfccnd[2]; len = sqrt(mat[1][0]*mat[1][0] + mat[1][1]*mat[1][1] + mat[1][2]*mat[1][2]); mat[1][0] = matinv[0][1] = mat[1][0]/len; mat[1][1] = matinv[1][1] = mat[1][1]/len; mat[1][2] = matinv[2][1] = mat[1][2]/len; mat[2][0] = matinv[0][2] = mat[0][1]*mat[1][2] - mat[0][2]*mat[1][1]; mat[2][1] = matinv[1][2] = mat[0][2]*mat[1][0] - mat[0][0]*mat[1][2]; mat[2][2] = matinv[2][2] = mat[0][0]*mat[1][1] - mat[0][1]*mat[1][0]; #else mat[1][0] = matinv[0][1] = pfcnml[1]; mat[1][1] = matinv[1][1] = -pfcnml[0]; #endif pdsol = pidsol; pdsoln = pidsoln; for (ieq=0; ieq<NEQ; ieq++) { vec[ieq][0] = 0.0; // set perpendicular gradient to zero. dif[0] = pdsoln[0] + exd->taylor*(pdsol[0] - pdsoln[0]); dif[1] = pdsoln[1] + exd->taylor*(pdsol[1] - pdsoln[1]); #if NDIM == 3 dif[2] = pdsoln[2] + exd->taylor*(pdsol[2] - pdsoln[2]); vec[ieq][1] = mat[1][0]*dif[0] + mat[1][1]*dif[1] + mat[1][2]*dif[2]; vec[ieq][2] = mat[2][0]*dif[0] + mat[2][1]*dif[1] + mat[2][2]*dif[2]; #else vec[ieq][1] = mat[1][0]*dif[0] + mat[1][1]*dif[1]; #endif pdsol += NDIM; pdsoln += NDIM; }; // inversely transform the coordinate and set ghost gradient. pdsoln = pjdsoln; for (ieq=0; ieq<NEQ; ieq++) { #if NDIM == 3 pdsoln[0] = matinv[0][0]*vec[ieq][0] + matinv[0][1]*vec[ieq][1] + matinv[0][2]*vec[ieq][2]; pdsoln[1] = matinv[1][0]*vec[ieq][0] + matinv[1][1]*vec[ieq][1] + matinv[1][2]*vec[ieq][2]; pdsoln[2] = matinv[2][0]*vec[ieq][0] + matinv[2][1]*vec[ieq][1] + matinv[2][2]*vec[ieq][2]; #else pdsoln[0] = matinv[0][0]*vec[ieq][0] + matinv[0][1]*vec[ieq][1]; pdsoln[1] = matinv[1][0]*vec[ieq][0] + matinv[1][1]*vec[ieq][1]; #endif pdsoln += NDIM; }; #ifndef __HIPCC__ // advance boundary face. pfacn += BFREL; }; return 0; }; #else }; }; extern "C" int bound_nonrefl_dsoln(int nthread, void *gexc, int nbnd, void *gfacn) { dim3 nblock = (nbnd + nthread-1) / nthread; hipLaunchKernelGGL(( cuda_bound_nonrefl_dsoln), dim3(nblock), dim3(nthread), 0, 0, (exedata *)gexc, nbnd, (int *)gfacn); hipDeviceSynchronize(); return 0; }; #endif // vim: set ts=4 et:
13d7925c2938cd684abd07b388df04564b752608.cu
/* * Copyright (C) 2008-2011 Yung-Yu Chen <[email protected]>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "cueuler.h" #ifdef __CUDACC__ __global__ void cuda_bound_nonrefl_soln(exedata *exd, int nbnd, int *facn) { int ibnd = blockDim.x * blockIdx.x + threadIdx.x; #else int bound_nonrefl_soln(exedata *exd, int nbnd, int *facn) { int ibnd; #endif // pointers. int *pfacn, *pfccls; double *pisol, *pisoln, *pjsoln; // iterators. int ifc, icl, jcl, ieq; #ifdef __CUDACC__ if (ibnd < nbnd) { pfacn = facn + ibnd*BFREL; #else pfacn = facn; for (ibnd=0; ibnd<nbnd; ibnd++) { #endif ifc = pfacn[0]; pfccls = exd->fccls + ifc*FCREL; icl = pfccls[0]; jcl = pfccls[1]; // set solutions. pisol = exd->sol + icl*NEQ; pisoln = exd->soln + icl*NEQ; pjsoln = exd->soln + jcl*NEQ; for (ieq=0; ieq<NEQ; ieq++) { pjsoln[ieq] = pisoln[ieq] + exd->taylor*(pisol[ieq] - pisoln[ieq]); }; #ifndef __CUDACC__ // advance boundary face. pfacn += BFREL; }; return 0; }; #else }; }; extern "C" int bound_nonrefl_soln(int nthread, void *gexc, int nbnd, void *gfacn) { int nblock = (nbnd + nthread-1) / nthread; cuda_bound_nonrefl_soln<<<nblock, nthread>>>((exedata *)gexc, nbnd, (int *)gfacn); cudaThreadSynchronize(); return 0; }; #endif #ifdef __CUDACC__ __global__ void cuda_bound_nonrefl_dsoln(exedata *exd, int nbnd, int *facn) { int ibnd = blockDim.x * blockIdx.x + threadIdx.x; #else int bound_nonrefl_dsoln(exedata *exd, int nbnd, int *facn) { int ibnd; #endif // pointers. int *pfacn, *pfccls; double *pidsol, *pidsoln, *pjdsoln, *pdsol, *pdsoln; double *pfcnml; #if NDIM == 3 int *pfcnds; double *pndcrd, *pfccnd; // scalars. double len; #endif // arrays. double dif[NDIM]; double vec[NEQ][NDIM]; double mat[NDIM][NDIM], matinv[NDIM][NDIM]; // iterators. int ifc, icl, jcl, ieq; #ifdef __CUDACC__ if (ibnd < nbnd) { pfacn = facn + ibnd*BFREL; #else pfacn = facn; for (ibnd=0; ibnd<nbnd; ibnd++) { #endif ifc = pfacn[0]; pfccls = exd->fccls + ifc*FCREL; icl = pfccls[0]; jcl = pfccls[1]; #if NDIM == 3 pfcnds = exd->fcnds + ifc*(FCMND+1); #endif pidsol = exd->dsol + icl*NEQ*NDIM; pidsoln = exd->dsoln + icl*NEQ*NDIM; pjdsoln = exd->dsoln + jcl*NEQ*NDIM; // coordinate transformation and set transformed vectors. pfcnml = exd->fcnml + ifc*NDIM; #if NDIM == 3 pfccnd = exd->fccnd + ifc*NDIM; #endif mat[0][0] = matinv[0][0] = pfcnml[0]; mat[0][1] = matinv[1][0] = pfcnml[1]; #if NDIM == 3 mat[0][2] = matinv[2][0] = pfcnml[2]; pndcrd = exd->ndcrd + pfcnds[1]*NDIM; mat[1][0] = pndcrd[0] - pfccnd[0]; mat[1][1] = pndcrd[1] - pfccnd[1]; mat[1][2] = pndcrd[2] - pfccnd[2]; len = sqrt(mat[1][0]*mat[1][0] + mat[1][1]*mat[1][1] + mat[1][2]*mat[1][2]); mat[1][0] = matinv[0][1] = mat[1][0]/len; mat[1][1] = matinv[1][1] = mat[1][1]/len; mat[1][2] = matinv[2][1] = mat[1][2]/len; mat[2][0] = matinv[0][2] = mat[0][1]*mat[1][2] - mat[0][2]*mat[1][1]; mat[2][1] = matinv[1][2] = mat[0][2]*mat[1][0] - mat[0][0]*mat[1][2]; mat[2][2] = matinv[2][2] = mat[0][0]*mat[1][1] - mat[0][1]*mat[1][0]; #else mat[1][0] = matinv[0][1] = pfcnml[1]; mat[1][1] = matinv[1][1] = -pfcnml[0]; #endif pdsol = pidsol; pdsoln = pidsoln; for (ieq=0; ieq<NEQ; ieq++) { vec[ieq][0] = 0.0; // set perpendicular gradient to zero. dif[0] = pdsoln[0] + exd->taylor*(pdsol[0] - pdsoln[0]); dif[1] = pdsoln[1] + exd->taylor*(pdsol[1] - pdsoln[1]); #if NDIM == 3 dif[2] = pdsoln[2] + exd->taylor*(pdsol[2] - pdsoln[2]); vec[ieq][1] = mat[1][0]*dif[0] + mat[1][1]*dif[1] + mat[1][2]*dif[2]; vec[ieq][2] = mat[2][0]*dif[0] + mat[2][1]*dif[1] + mat[2][2]*dif[2]; #else vec[ieq][1] = mat[1][0]*dif[0] + mat[1][1]*dif[1]; #endif pdsol += NDIM; pdsoln += NDIM; }; // inversely transform the coordinate and set ghost gradient. pdsoln = pjdsoln; for (ieq=0; ieq<NEQ; ieq++) { #if NDIM == 3 pdsoln[0] = matinv[0][0]*vec[ieq][0] + matinv[0][1]*vec[ieq][1] + matinv[0][2]*vec[ieq][2]; pdsoln[1] = matinv[1][0]*vec[ieq][0] + matinv[1][1]*vec[ieq][1] + matinv[1][2]*vec[ieq][2]; pdsoln[2] = matinv[2][0]*vec[ieq][0] + matinv[2][1]*vec[ieq][1] + matinv[2][2]*vec[ieq][2]; #else pdsoln[0] = matinv[0][0]*vec[ieq][0] + matinv[0][1]*vec[ieq][1]; pdsoln[1] = matinv[1][0]*vec[ieq][0] + matinv[1][1]*vec[ieq][1]; #endif pdsoln += NDIM; }; #ifndef __CUDACC__ // advance boundary face. pfacn += BFREL; }; return 0; }; #else }; }; extern "C" int bound_nonrefl_dsoln(int nthread, void *gexc, int nbnd, void *gfacn) { dim3 nblock = (nbnd + nthread-1) / nthread; cuda_bound_nonrefl_dsoln<<<nblock, nthread>>>((exedata *)gexc, nbnd, (int *)gfacn); cudaThreadSynchronize(); return 0; }; #endif // vim: set ts=4 et:
41b3eb7ed7d78d716d8fcb9dccde17f8567e6599.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //first cuda program //Hitender Prakash #include <stdio.h> //define gpu kernel __global__ void square(double *d_out, double *d_in){ int pos=threadIdx.x; d_out[pos]=d_in[pos]*d_in[pos]; } int main(int argc, char **argv){ if(argc <2 ||argc >2){ printf("\nUsage: sqaure <size of array>"); exit(0); } int siz=atoi(argv[1]); double *d_in, *d_out, *h_in, *h_out; h_in=(double *)malloc(siz*sizeof(double)); h_out=(double *)malloc(siz*sizeof(double)); for(int i=0;i<siz;i++){ h_in[i]=i+1.0; h_out[i]=0.0; } //allocate space on GPU hipMalloc((void**)&d_in, (size_t)siz*sizeof(double)); int err= hipGetLastError(); hipMalloc((void**)&d_out, (size_t)siz*sizeof(double)); //copy from host to device hipMemcpy(d_in, h_in, siz*sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( square), dim3(1),dim3(siz), 0, 0, d_out,d_in); hipMemcpy(h_out, d_out, siz*sizeof(double), hipMemcpyDeviceToHost); printf("\nBelow is the processed square values: "); for(int i=0;i<siz;i++){ printf("\n%lf ----> %lf",h_in[i],h_out[i]); } printf("\nLast cuda error in malloc: %d",err); printf("\n"); return 0; }
41b3eb7ed7d78d716d8fcb9dccde17f8567e6599.cu
//first cuda program //Hitender Prakash #include <stdio.h> //define gpu kernel __global__ void square(double *d_out, double *d_in){ int pos=threadIdx.x; d_out[pos]=d_in[pos]*d_in[pos]; } int main(int argc, char **argv){ if(argc <2 ||argc >2){ printf("\nUsage: sqaure <size of array>"); exit(0); } int siz=atoi(argv[1]); double *d_in, *d_out, *h_in, *h_out; h_in=(double *)malloc(siz*sizeof(double)); h_out=(double *)malloc(siz*sizeof(double)); for(int i=0;i<siz;i++){ h_in[i]=i+1.0; h_out[i]=0.0; } //allocate space on GPU cudaMalloc((void**)&d_in, (size_t)siz*sizeof(double)); int err= cudaGetLastError(); cudaMalloc((void**)&d_out, (size_t)siz*sizeof(double)); //copy from host to device cudaMemcpy(d_in, h_in, siz*sizeof(double), cudaMemcpyHostToDevice); square<<<1,siz>>>(d_out,d_in); cudaMemcpy(h_out, d_out, siz*sizeof(double), cudaMemcpyDeviceToHost); printf("\nBelow is the processed square values: "); for(int i=0;i<siz;i++){ printf("\n%lf ----> %lf",h_in[i],h_out[i]); } printf("\nLast cuda error in malloc: %d",err); printf("\n"); return 0; }
2e887c7f1284d43289fb5b3ff020ded6e661856f.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2017 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "negative_log_likelihood_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "../negative_log_likelihood_layer.h" #include "../neural_network_exception.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void negative_log_likelihood_upd_kernel( float * __restrict output, const float * __restrict predicted, const float * __restrict actual, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float actual_val = actual[input_offset]; float predicted_val = predicted[input_offset]; err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F; feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) #ifdef __CUDACC_VER_MAJOR__ #if __CUDACC_VER_MAJOR__ < 9 err += __shfl_down(err, tx); #else err += __shfl_down_sync(0xFFFFFFFF, err, tx); #endif #endif int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) #ifdef __CUDACC_VER_MAJOR__ #if __CUDACC_VER_MAJOR__ < 9 err += __shfl_down(err, tx); #else err += __shfl_down_sync(0xFFFFFFFF, err, tx); #endif #endif } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } template<bool add_update_to_destination> __global__ void negative_log_likelihood_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, float scale, int elem_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; if (elem_id < elem_count) { float actual_val = target_input_neurons[elem_id]; float predicted_val = deriv_input_neurons[elem_id]; float gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F; if (add_update_to_destination) output[elem_id] += scale * gradient; else output[elem_id] = scale * gradient; } } template<bool add_update_to_destination> __global__ void negative_log_likelihood_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, const float * __restrict scale_mask, float scale, int elem_count_per_feature_map, int input_feature_map_count, int entry_count) { int neuron_id = blockDim.x * blockIdx.x + threadIdx.x; int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count)) { int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id]; float gradient = 0.0F; if (mask != 0.0F) { float actual_val = target_input_neurons[elem_id]; float predicted_val = deriv_input_neurons[elem_id]; gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F; gradient *= scale * mask; } if (add_update_to_destination) output[elem_id] += gradient; else output[elem_id] = gradient; } } void negative_log_likelihood_layer_updater_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); hipLaunchKernelGGL(( negative_log_likelihood_upd_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id, *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], scale, entry_count); } void negative_log_likelihood_layer_updater_cuda::enqueue_backward_data_propagation( hipStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { if (input_neurons_buffers.size() > 2) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); if (add_update_to_destination) hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], *input_neurons_buffers[2], scale, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); else hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], *input_neurons_buffers[2], scale, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); } else { int elem_count = entry_count * input_elem_count_per_entry_list[0]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); if (add_update_to_destination) hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], scale, elem_count); else hipLaunchKernelGGL(( negative_log_likelihood_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], scale, elem_count); } } void negative_log_likelihood_layer_updater_cuda::updater_configured() { if (actions.find(layer_action(layer_action::backward_data, 1)) != actions.end()) throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for targets"); if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end()) throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for scale mask"); std::shared_ptr<const negative_log_likelihood_layer> layer_derived = std::dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema); scale = layer_derived->scale; } bool negative_log_likelihood_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } int negative_log_likelihood_layer_updater_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
2e887c7f1284d43289fb5b3ff020ded6e661856f.cu
/* * Copyright 2011-2017 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "negative_log_likelihood_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "../negative_log_likelihood_layer.h" #include "../neural_network_exception.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void negative_log_likelihood_upd_kernel( float * __restrict output, const float * __restrict predicted, const float * __restrict actual, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float actual_val = actual[input_offset]; float predicted_val = predicted[input_offset]; err -= (actual_val > 0.0F) ? actual_val * __logf(max(predicted_val, 1.0e-20F)) : 0.0F; feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) #ifdef __CUDACC_VER_MAJOR__ #if __CUDACC_VER_MAJOR__ < 9 err += __shfl_down(err, tx); #else err += __shfl_down_sync(0xFFFFFFFF, err, tx); #endif #endif int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) #ifdef __CUDACC_VER_MAJOR__ #if __CUDACC_VER_MAJOR__ < 9 err += __shfl_down(err, tx); #else err += __shfl_down_sync(0xFFFFFFFF, err, tx); #endif #endif } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } template<bool add_update_to_destination> __global__ void negative_log_likelihood_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, float scale, int elem_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; if (elem_id < elem_count) { float actual_val = target_input_neurons[elem_id]; float predicted_val = deriv_input_neurons[elem_id]; float gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F; if (add_update_to_destination) output[elem_id] += scale * gradient; else output[elem_id] = scale * gradient; } } template<bool add_update_to_destination> __global__ void negative_log_likelihood_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, const float * __restrict scale_mask, float scale, int elem_count_per_feature_map, int input_feature_map_count, int entry_count) { int neuron_id = blockDim.x * blockIdx.x + threadIdx.x; int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count)) { int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id]; float gradient = 0.0F; if (mask != 0.0F) { float actual_val = target_input_neurons[elem_id]; float predicted_val = deriv_input_neurons[elem_id]; gradient = (actual_val > 0.0F) ? __fdividef(actual_val, max(predicted_val, 1.0e-20F)) : 0.0F; gradient *= scale * mask; } if (add_update_to_destination) output[elem_id] += gradient; else output[elem_id] = gradient; } } void negative_log_likelihood_layer_updater_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_fixed_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); negative_log_likelihood_upd_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>( *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], scale, entry_count); } void negative_log_likelihood_layer_updater_cuda::enqueue_backward_data_propagation( cudaStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { if (input_neurons_buffers.size() > 2) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); if (add_update_to_destination) negative_log_likelihood_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], *input_neurons_buffers[2], scale, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); else negative_log_likelihood_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], *input_neurons_buffers[2], scale, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); } else { int elem_count = entry_count * input_elem_count_per_entry_list[0]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); if (add_update_to_destination) negative_log_likelihood_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], scale, elem_count); else negative_log_likelihood_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[0], *input_neurons_buffers[1], scale, elem_count); } } void negative_log_likelihood_layer_updater_cuda::updater_configured() { if (actions.find(layer_action(layer_action::backward_data, 1)) != actions.end()) throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for targets"); if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end()) throw neural_network_exception("negative_log_likelihood_layer_updater_cuda cannot do backward propagation for scale mask"); std::shared_ptr<const negative_log_likelihood_layer> layer_derived = std::dynamic_pointer_cast<const negative_log_likelihood_layer>(layer_schema); scale = layer_derived->scale; } bool negative_log_likelihood_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } int negative_log_likelihood_layer_updater_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
b7c8da4cef33cdf51f568703e69f866393813fa4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "gMF_bf_grid_engine.h" #include "gMF_bf_grid_engine_kernel.h" #include <fstream> using namespace std; using namespace gMF; gMF::bf_grid_engine::bf_grid_engine(Vector2i size_xy, Vector3i size_rgb, float sigma_xy, float sigma_rgb, int dim) { bgrid = new bf_grid(size_xy, size_rgb, sigma_xy, sigma_rgb, dim); xy_data = new Float2Image(size_xy, true, true); rgb_data = new Float3Image(size_xy, true, true); this->img_size = size_xy;; this->dim = dim; this->sigma_xy = sigma_xy; this->sigma_rgb = sigma_rgb; } gMF::bf_grid_engine::~bf_grid_engine() { delete bgrid; } void gMF::bf_grid_engine::filter_distribution(const UChar3Image* in_img, const float* in_array, float* out_array, int dim) { prepare_reference_img(in_img); splat(in_array, dim); blur(); slice(out_array); } void gMF::bf_grid_engine::splat(const float* in_array, int dim) { bgrid->set_zero(); float** grid_ptr = bgrid->grid->GetData(MEMORYDEVICE_CUDA); float* data_array_ptr = bgrid->data_array->GetData(MEMORYDEVICE_CUDA); //dim3 blockSize(bgrid->size_xy.x, bgrid->size_xy.y); //dim3 gridSize(bgrid->size_rgb.r, bgrid->size_rgb.g, bgrid->size_rgb.b); dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); } void gMF::bf_grid_engine::blur() { } void gMF::bf_grid_engine::slice(float* out_array) { } void gMF::bf_grid_engine::prepare_reference_img(const UChar3Image * in_img) { const Vector3u* in_img_ptr = in_img->GetData(MEMORYDEVICE_CUDA); Vector2f* out_xy_ptr = xy_data->GetData(MEMORYDEVICE_CUDA); Vector3f* out_rgb_ptr = rgb_data->GetData(MEMORYDEVICE_CUDA); dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); hipLaunchKernelGGL(( prepare_reference_img_device), dim3(gridSize),dim3(blockSize), 0, 0, in_img_ptr, out_xy_ptr, out_rgb_ptr, img_size, sigma_xy, sigma_rgb); }
b7c8da4cef33cdf51f568703e69f866393813fa4.cu
#pragma once #include "gMF_bf_grid_engine.h" #include "gMF_bf_grid_engine_kernel.h" #include <fstream> using namespace std; using namespace gMF; gMF::bf_grid_engine::bf_grid_engine(Vector2i size_xy, Vector3i size_rgb, float sigma_xy, float sigma_rgb, int dim) { bgrid = new bf_grid(size_xy, size_rgb, sigma_xy, sigma_rgb, dim); xy_data = new Float2Image(size_xy, true, true); rgb_data = new Float3Image(size_xy, true, true); this->img_size = size_xy;; this->dim = dim; this->sigma_xy = sigma_xy; this->sigma_rgb = sigma_rgb; } gMF::bf_grid_engine::~bf_grid_engine() { delete bgrid; } void gMF::bf_grid_engine::filter_distribution(const UChar3Image* in_img, const float* in_array, float* out_array, int dim) { prepare_reference_img(in_img); splat(in_array, dim); blur(); slice(out_array); } void gMF::bf_grid_engine::splat(const float* in_array, int dim) { bgrid->set_zero(); float** grid_ptr = bgrid->grid->GetData(MEMORYDEVICE_CUDA); float* data_array_ptr = bgrid->data_array->GetData(MEMORYDEVICE_CUDA); //dim3 blockSize(bgrid->size_xy.x, bgrid->size_xy.y); //dim3 gridSize(bgrid->size_rgb.r, bgrid->size_rgb.g, bgrid->size_rgb.b); dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); } void gMF::bf_grid_engine::blur() { } void gMF::bf_grid_engine::slice(float* out_array) { } void gMF::bf_grid_engine::prepare_reference_img(const UChar3Image * in_img) { const Vector3u* in_img_ptr = in_img->GetData(MEMORYDEVICE_CUDA); Vector2f* out_xy_ptr = xy_data->GetData(MEMORYDEVICE_CUDA); Vector3f* out_rgb_ptr = rgb_data->GetData(MEMORYDEVICE_CUDA); dim3 blockSize(BLOCK_DIM, BLOCK_DIM); dim3 gridSize((int)ceil((float)img_size.x / (float)blockSize.x), (int)ceil((float)img_size.y / (float)blockSize.y)); prepare_reference_img_device<<<gridSize,blockSize>>>(in_img_ptr, out_xy_ptr, out_rgb_ptr, img_size, sigma_xy, sigma_rgb); }
5907b64025bb76a97bccb29df727363031ad3720.hip
// !!! This is a file automatically generated by hipify!!! #ifdef USE_LEGACY_DSLASH #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <typeinfo> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <dslash.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> #include <dslash_policy.cuh> namespace quda { namespace twistedclover { #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_TWISTED_CLOVER_DIRAC #include <tmc_dslash_def.h> // Twisted Clover kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted_clover using namespace twistedclover; #ifdef GPU_TWISTED_CLOVER_DIRAC template <typename sFloat, typename gFloat, typename cFloat> class TwistedCloverDslashCuda : public SharedDslashCuda { private: const QudaTwistCloverDslashType dslashType; double a, b, c, d; const FullClover &clover; const FullClover &cloverInv; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: TwistedCloverDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const FullClover &clover, const FullClover &cloverInv, //const cFloat *clover, const float *cNorm, const cFloat *cloverInv, const float *cNrm2, int cl_stride, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int parity, const int dagger, const int *commOverride) : SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), clover(clover), cloverInv(cloverInv), dslashType(dslashType) { QudaPrecision clover_prec = bindTwistedCloverTex(clover, cloverInv, parity, dslashParam); if (in->Precision() != clover_prec) errorQuda("Mixing clover and spinor precision not supported"); #ifndef DYNAMIC_CLOVER if (clover.stride != cloverInv.stride) errorQuda("clover and cloverInv must have matching strides (%d != %d)", clover.stride, cloverInv.stride); #endif a = kappa; b = mu; c = epsilon; d = k; dslashParam.twist_a = 0.0; dslashParam.twist_b = 0.0; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.cl_stride = clover.stride; dslashParam.fl_stride = in->VolumeCB(); } virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); unbindTwistedCloverTex(clover); } void apply(const hipStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif #ifndef USE_TEXTURE_OBJECTS if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x); #endif // USE_TEXTURE_OBJECTS TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); setParam(); dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w; for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i]; switch(dslashType){ case QUDA_DEG_CLOVER_TWIST_INV_DSLASH: DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_CLOVER_TWIST_INV: DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY: DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; default: errorQuda("Invalid twisted clover dslash type"); } } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); switch (dslashType) { case QUDA_DEG_CLOVER_TWIST_INV_DSLASH: #ifndef DYNAMIC_CLOVER strcat(key.aux,",CloverTwistInvDslash"); #else strcat(key.aux,",CloverTwistInvDynDslash"); #endif break; case QUDA_DEG_DSLASH_CLOVER_TWIST_INV: #ifndef DYNAMIC_CLOVER strcat(key.aux,",Dslash"); #else strcat(key.aux,",DynDslash"); #endif break; case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY: #ifndef DYNAMIC_CLOVER strcat(key.aux,",DslashCloverTwist"); #else strcat(key.aux,",DynDslashCloverTwist"); #endif break; default: errorQuda("Unsupported twisted-dslash type %d", dslashType); } return key; } long long flops() const { int clover_flops = 504 + 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // clover flops are done in the interior kernel flops += clover_flops * in->VolumeCB(); break; } return flops; } long long bytes() const { bool isFixed = (in->Precision() == sizeof(short) || in->Precision() == sizeof(char)) ? true : false; int clover_bytes = 72 * in->Precision() + (isFixed ? 2*sizeof(float) : 0); long long bytes = DslashCuda::bytes(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: bytes += clover_bytes*in->VolumeCB(); break; } return bytes; } }; #endif // GPU_TWISTED_CLOVER_DIRAC void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { #ifdef GPU_TWISTED_CLOVER_DIRAC const_cast<cudaColorSpinorField*>(in)->createComms(1); DslashCuda *dslash = nullptr; if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new TwistedCloverDslashCuda<double2,double2,double2> (out, gauge, *clover, *cloverInv, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new TwistedCloverDslashCuda<float4,float4,float4> (out, gauge, *clover, *cloverInv, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new TwistedCloverDslashCuda<short4,short4,short4> (out, gauge, *clover, *cloverInv, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } int ghost_threads[4] = {0}; int bulk_threads = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->Volume() : in->Volume() / 2; for (int i=0;i<4;i++) ghost_threads[i] = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->GhostFace()[i] : in->GhostFace()[i] / 2; dslash::DslashPolicyTune<DslashCuda> dslash_policy( *dslash, const_cast<cudaColorSpinorField *>(in), bulk_threads, ghost_threads, profile); dslash_policy.apply(0); delete dslash; #else errorQuda("Twisted clover dslash has not been built"); #endif } } #endif
5907b64025bb76a97bccb29df727363031ad3720.cu
#ifdef USE_LEGACY_DSLASH #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <typeinfo> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <dslash.h> #include <sys/time.h> #include <blas_quda.h> #include <inline_ptx.h> #include <dslash_policy.cuh> namespace quda { namespace twistedclover { #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_TWISTED_CLOVER_DIRAC #include <tmc_dslash_def.h> // Twisted Clover kernels #endif #ifndef DSLASH_SHARED_FLOATS_PER_THREAD #define DSLASH_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted_clover using namespace twistedclover; #ifdef GPU_TWISTED_CLOVER_DIRAC template <typename sFloat, typename gFloat, typename cFloat> class TwistedCloverDslashCuda : public SharedDslashCuda { private: const QudaTwistCloverDslashType dslashType; double a, b, c, d; const FullClover &clover; const FullClover &cloverInv; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: TwistedCloverDslashCuda(cudaColorSpinorField *out, const GaugeField &gauge, const FullClover &clover, const FullClover &cloverInv, //const cFloat *clover, const float *cNorm, const cFloat *cloverInv, const float *cNrm2, int cl_stride, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistCloverDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int parity, const int dagger, const int *commOverride) : SharedDslashCuda(out, in, x, gauge, parity, dagger, commOverride), clover(clover), cloverInv(cloverInv), dslashType(dslashType) { QudaPrecision clover_prec = bindTwistedCloverTex(clover, cloverInv, parity, dslashParam); if (in->Precision() != clover_prec) errorQuda("Mixing clover and spinor precision not supported"); #ifndef DYNAMIC_CLOVER if (clover.stride != cloverInv.stride) errorQuda("clover and cloverInv must have matching strides (%d != %d)", clover.stride, cloverInv.stride); #endif a = kappa; b = mu; c = epsilon; d = k; dslashParam.twist_a = 0.0; dslashParam.twist_b = 0.0; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.cl_stride = clover.stride; dslashParam.fl_stride = in->VolumeCB(); } virtual ~TwistedCloverDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); unbindTwistedCloverTex(clover); } void apply(const cudaStream_t &stream) { #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif #ifndef USE_TEXTURE_OBJECTS if (dslashParam.kernel_type == INTERIOR_KERNEL) bindSpinorTex<sFloat>(in, out, x); #endif // USE_TEXTURE_OBJECTS TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); setParam(); dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w; for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i]; switch(dslashType){ case QUDA_DEG_CLOVER_TWIST_INV_DSLASH: DSLASH(twistedCloverInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_CLOVER_TWIST_INV: DSLASH(twistedCloverDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY: DSLASH(twistedCloverDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); break; default: errorQuda("Invalid twisted clover dslash type"); } } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); switch (dslashType) { case QUDA_DEG_CLOVER_TWIST_INV_DSLASH: #ifndef DYNAMIC_CLOVER strcat(key.aux,",CloverTwistInvDslash"); #else strcat(key.aux,",CloverTwistInvDynDslash"); #endif break; case QUDA_DEG_DSLASH_CLOVER_TWIST_INV: #ifndef DYNAMIC_CLOVER strcat(key.aux,",Dslash"); #else strcat(key.aux,",DynDslash"); #endif break; case QUDA_DEG_DSLASH_CLOVER_TWIST_XPAY: #ifndef DYNAMIC_CLOVER strcat(key.aux,",DslashCloverTwist"); #else strcat(key.aux,",DynDslashCloverTwist"); #endif break; default: errorQuda("Unsupported twisted-dslash type %d", dslashType); } return key; } long long flops() const { int clover_flops = 504 + 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // clover flops are done in the interior kernel flops += clover_flops * in->VolumeCB(); break; } return flops; } long long bytes() const { bool isFixed = (in->Precision() == sizeof(short) || in->Precision() == sizeof(char)) ? true : false; int clover_bytes = 72 * in->Precision() + (isFixed ? 2*sizeof(float) : 0); long long bytes = DslashCuda::bytes(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: bytes += clover_bytes*in->VolumeCB(); break; } return bytes; } }; #endif // GPU_TWISTED_CLOVER_DIRAC void twistedCloverDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const FullClover *clover, const FullClover *cloverInv, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistCloverDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { #ifdef GPU_TWISTED_CLOVER_DIRAC const_cast<cudaColorSpinorField*>(in)->createComms(1); DslashCuda *dslash = nullptr; if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new TwistedCloverDslashCuda<double2,double2,double2> (out, gauge, *clover, *cloverInv, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new TwistedCloverDslashCuda<float4,float4,float4> (out, gauge, *clover, *cloverInv, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new TwistedCloverDslashCuda<short4,short4,short4> (out, gauge, *clover, *cloverInv, in, x, type, kappa, mu, epsilon, k, parity, dagger, commOverride); } int ghost_threads[4] = {0}; int bulk_threads = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->Volume() : in->Volume() / 2; for (int i=0;i<4;i++) ghost_threads[i] = (in->TwistFlavor() == QUDA_TWIST_SINGLET) ? in->GhostFace()[i] : in->GhostFace()[i] / 2; dslash::DslashPolicyTune<DslashCuda> dslash_policy( *dslash, const_cast<cudaColorSpinorField *>(in), bulk_threads, ghost_threads, profile); dslash_policy.apply(0); delete dslash; #else errorQuda("Twisted clover dslash has not been built"); #endif } } #endif
c0e2a40cda019ffa5fe07a687c3fe7198128d551.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/bias_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void BiasForward(const int n, const Dtype* in, const Dtype* bias, const int bias_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { // index / inner_dim // % bias_dim const int bias_index = (index / inner_dim) % bias_dim; out[index] = in[index] + bias[bias_index]; } } template <typename Dtype> void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // N*C*H*W const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bias_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); // LOG(INFO)<<"bias_data"<<bias_data->shape_string(); Dtype* top_data = top[0]->mutable_gpu_data(); // BiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data); } template <typename Dtype> void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // propagate_down[0]=0 //LOG(INFO)<<"blas propagate_down[0]"<<propagate_down[0]; // 2 //LOG(INFO)<<"bottom[0] != top[0]"<<bottom[0] != top[0]; if (propagate_down[0] && bottom[0] != top[0]) { const Dtype* top_diff = top[0]->gpu_diff(); //LOG(INFO)<<"top_diff"<<top_diff->shape_string(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(bottom[0]->count(), top_diff, bottom_diff); } // in-place, we don't need to do anything with the data diff. // const bool bias_param = (bottom.size() == 1); //propagate_down[1]=0 // this->param_propagate_down_[0=1 //LOG(INFO)<<"propagate_down[1]"<<propagate_down[1]; //LOG(INFO)<<"this->param_propagate_down_[0]"<<this->param_propagate_down_[0]; if ((!bias_param && propagate_down[1]) || (bias_param && this->param_propagate_down_[0])) { //LOG(INFO)<<"bias_param"<<bias_param; const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) ->mutable_gpu_diff(); bool accum = bias_param; // 32 // blas d //LOG(INFO)<<"bias_dim_"<<bias_dim_; // dim ==N*H* W=100352 // LOG(INFO)<<"dim_"<<dim_; for (int n = 0; n < outer_dim_; ++n) { // c*H*W H*W bias_multiplier_=1 caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), top_diff, bias_multiplier_.gpu_data(), Dtype(accum), bias_diff); top_diff += dim_; accum = true; } } } INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer); } // namespace caffe
c0e2a40cda019ffa5fe07a687c3fe7198128d551.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layers/bias_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void BiasForward(const int n, const Dtype* in, const Dtype* bias, const int bias_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { // index / inner_dim 得到对应的第几个通道 总的所用样本 // % bias_dim 得到真正的通道号码。 const int bias_index = (index / inner_dim) % bias_dim; out[index] = in[index] + bias[bias_index]; } } template <typename Dtype> void BiasLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // N*C*H*W const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bias_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); // LOG(INFO)<<"bias_data"<<bias_data->shape_string(); Dtype* top_data = top[0]->mutable_gpu_data(); // 这里面进行发现他的数据是不是最优化的。 BiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>> ( count, bottom_data, bias_data, bias_dim_, inner_dim_, top_data); } template <typename Dtype> void BiasLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // propagate_down[0]=0 数据不需要反向 //LOG(INFO)<<"进入blas 反向 propagate_down[0]"<<propagate_down[0]; // 2这应该相等的,按照 //LOG(INFO)<<"bottom[0] != top[0]"<<bottom[0] != top[0]; if (propagate_down[0] && bottom[0] != top[0]) { const Dtype* top_diff = top[0]->gpu_diff(); //LOG(INFO)<<"top_diff"<<top_diff->shape_string(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(bottom[0]->count(), top_diff, bottom_diff); } // in-place, we don't need to do anything with the data diff. // 明白是的。 const bool bias_param = (bottom.size() == 1); //propagate_down[1]=0 // this->param_propagate_down_[0】=1 //LOG(INFO)<<"propagate_down[1]"<<propagate_down[1]; //LOG(INFO)<<"this->param_propagate_down_[0]"<<this->param_propagate_down_[0]; if ((!bias_param && propagate_down[1]) || (bias_param && this->param_propagate_down_[0])) { //LOG(INFO)<<"bias_param"<<bias_param; const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bias_diff = (bias_param ? this->blobs_[0].get() : bottom[1]) ->mutable_gpu_diff(); bool accum = bias_param; // 32 个样本 // blas d的维度和通道的数目是一样的 //LOG(INFO)<<"bias_dim_"<<bias_dim_; // dim ==N*H* W=100352 // LOG(INFO)<<"dim_"<<dim_; for (int n = 0; n < outer_dim_; ++n) { // 采用了 c*H*W 与 H*W 进乘法 bias_multiplier_=全部为1 caffe_gpu_gemv(CblasNoTrans, bias_dim_, inner_dim_, Dtype(1), top_diff, bias_multiplier_.gpu_data(), Dtype(accum), bias_diff); top_diff += dim_; accum = true; } } } INSTANTIATE_LAYER_GPU_FUNCS(BiasLayer); } // namespace caffe
e52dc14cecafe54abf35e144f2b4672943aac227.hip
// !!! This is a file automatically generated by hipify!!! #include "cupoch/visualization/shader/texture_phong_shader.h" #include "cupoch/geometry/image.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/utility/console.h" #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { GLenum GetFormat(const geometry::Geometry &geometry) { auto it = gl_helper::texture_format_map_.find( ((const geometry::TriangleMesh &)geometry).texture_.num_of_channels_); if (it == gl_helper::texture_format_map_.end()) { utility::LogWarning("Unknown texture format, abort!"); return false; } return it->second; } GLenum GetType(const geometry::Geometry &geometry) { auto it = gl_helper::texture_type_map_.find( ((const geometry::TriangleMesh &)geometry).texture_.bytes_per_channel_); if (it == gl_helper::texture_type_map_.end()) { utility::LogWarning("Unknown texture type, abort!"); return false; } return it->second; } struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, const Eigen::Vector2f* triangle_uvs, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), triangle_uvs_(triangle_uvs), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const Eigen::Vector2f* triangle_uvs_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector2f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertices_[vi], triangle_normals_[i], triangle_uvs_[k]); } else { return thrust::make_tuple(vertices_[vi], vertex_normals_[vi], triangle_uvs_[k]); } } }; } bool TexturePhongShader::Compile() { if (CompileShaders(texture_phong_vertex_shader, NULL, texture_phong_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); vertex_uv_ = glGetAttribLocation(program_, "vertex_uv"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); light_position_world_ = glGetUniformLocation(program_, "light_position_world_4"); light_color_ = glGetUniformLocation(program_, "light_color_4"); light_diffuse_power_ = glGetUniformLocation(program_, "light_diffuse_power_4"); light_specular_power_ = glGetUniformLocation(program_, "light_specular_power_4"); light_specular_shininess_ = glGetUniformLocation(program_, "light_specular_shininess_4"); light_ambient_ = glGetUniformLocation(program_, "light_ambient"); diffuse_texture_ = glGetUniformLocation(program_, "diffuse_texture"); return true; } void TexturePhongShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool TexturePhongShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); const size_t num_texture_height = GetTextureHeight(geometry); const size_t num_texture_width = GetTextureWidth(geometry); glGenTextures(1, &diffuse_texture_buffer_); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glTexImage2D(GL_TEXTURE_2D, 0, format, num_texture_width, num_texture_height, 0, format, type, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_uv_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], vertex_uv_buffer_, hipGraphicsMapFlagsNone)); glGenBuffers(1, &diffuse_texture_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); size_t texture_size = GetTextureSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, texture_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[3], diffuse_texture_pixel_buffer_, hipGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; Eigen::Vector2f* raw_uvs_ptr; uint8_t* raw_render_texture_ptr; size_t n_bytes; cudaSafeCall(hipGraphicsMapResources(4, cuda_graphics_resources_)); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[2])); cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_render_texture_ptr, &n_bytes, cuda_graphics_resources_[3])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr); thrust::device_ptr<uint8_t> dev_texture_ptr = thrust::device_pointer_cast(raw_render_texture_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr, dev_uvs_ptr, dev_texture_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(4); bound_ = true; return true; } bool TexturePhongShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetTextureHeight(geometry); const size_t num_data_width = GetTextureWidth(geometry); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glUniformMatrix4fv(light_position_world_, 1, GL_FALSE, light_position_world_data_.data()); glUniformMatrix4fv(light_color_, 1, GL_FALSE, light_color_data_.data()); glUniform4fv(light_diffuse_power_, 1, light_diffuse_power_data_.data()); glUniform4fv(light_specular_power_, 1, light_specular_power_data_.data()); glUniform4fv(light_specular_shininess_, 1, light_specular_shininess_data_.data()); glUniform4fv(light_ambient_, 1, light_ambient_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, format, type, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(diffuse_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_uv_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); glDisableVertexAttribArray(vertex_uv_); return true; } void TexturePhongShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[1])); cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[2])); cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[3])); } glDeleteBuffers(1, &diffuse_texture_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); glDeleteBuffers(1, &vertex_uv_buffer_); glDeleteTextures(1, &diffuse_texture_buffer_); bound_ = false; } } void TexturePhongShader::SetLighting(const ViewControl &view, const RenderOption &option) { const auto &box = view.GetBoundingBox(); light_position_world_data_.setOnes(); light_color_data_.setOnes(); for (int i = 0; i < 4; i++) { light_position_world_data_.block<3, 1>(0, i) = box.GetCenter().cast<GLfloat>() + (float)box.GetMaxExtent() * ((float)option.light_position_relative_[i](0) * view.GetRight() + (float)option.light_position_relative_[i](1) * view.GetUp() + (float)option.light_position_relative_[i](2) * view.GetFront()); light_color_data_.block<3, 1>(0, i) = option.light_color_[i].cast<GLfloat>(); } if (option.light_on_) { light_diffuse_power_data_ = Eigen::Vector4f(option.light_diffuse_power_).cast<GLfloat>(); light_specular_power_data_ = Eigen::Vector4f(option.light_specular_power_).cast<GLfloat>(); light_specular_shininess_data_ = Eigen::Vector4f(option.light_specular_shininess_) .cast<GLfloat>(); light_ambient_data_.block<3, 1>(0, 0) = option.light_ambient_color_.cast<GLfloat>(); light_ambient_data_(3) = 1.0f; } else { light_diffuse_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_shininess_data_ = gl_helper::GLVector4f::Ones(); light_ambient_data_ = gl_helper::GLVector4f(1.0f, 1.0f, 1.0f, 1.0f); } } bool TexturePhongShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } SetLighting(view, option); return true; } bool TexturePhongShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals, thrust::device_ptr<Eigen::Vector2f> &uvs, thrust::device_ptr<uint8_t> &texture_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), thrust::raw_pointer_cast(mesh.triangle_uvs_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals, uvs), func); thrust::copy(mesh.texture_.data_.begin(), mesh.texture_.data_.end(), texture_image); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t TexturePhongShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } size_t TexturePhongShaderForTriangleMesh::GetTextureSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.data_.size(); } size_t TexturePhongShaderForTriangleMesh::GetTextureHeight(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.height_; } size_t TexturePhongShaderForTriangleMesh::GetTextureWidth(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.width_; }
e52dc14cecafe54abf35e144f2b4672943aac227.cu
#include "cupoch/visualization/shader/texture_phong_shader.h" #include "cupoch/geometry/image.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/trianglemesh.h" #include "cupoch/visualization/shader/shader.h" #include "cupoch/visualization/utility/color_map.h" #include "cupoch/utility/console.h" #include <cuda_runtime.h> #include <cuda_gl_interop.h> using namespace cupoch; using namespace cupoch::visualization; using namespace cupoch::visualization::glsl; namespace { GLenum GetFormat(const geometry::Geometry &geometry) { auto it = gl_helper::texture_format_map_.find( ((const geometry::TriangleMesh &)geometry).texture_.num_of_channels_); if (it == gl_helper::texture_format_map_.end()) { utility::LogWarning("Unknown texture format, abort!"); return false; } return it->second; } GLenum GetType(const geometry::Geometry &geometry) { auto it = gl_helper::texture_type_map_.find( ((const geometry::TriangleMesh &)geometry).texture_.bytes_per_channel_); if (it == gl_helper::texture_type_map_.end()) { utility::LogWarning("Unknown texture type, abort!"); return false; } return it->second; } struct copy_trianglemesh_functor { copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals, const int* triangles, const Eigen::Vector3f* triangle_normals, const Eigen::Vector2f* triangle_uvs, RenderOption::MeshShadeOption shade_option) : vertices_(vertices), vertex_normals_(vertex_normals), triangles_(triangles), triangle_normals_(triangle_normals), triangle_uvs_(triangle_uvs), shade_option_(shade_option) {}; const Eigen::Vector3f* vertices_; const Eigen::Vector3f* vertex_normals_; const int* triangles_; const Eigen::Vector3f* triangle_normals_; const Eigen::Vector2f* triangle_uvs_; const RenderOption::MeshShadeOption shade_option_; __device__ thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector2f> operator() (size_t k) const { int i = k / 3; int vi = triangles_[k]; if (shade_option_ == RenderOption::MeshShadeOption::FlatShade) { return thrust::make_tuple(vertices_[vi], triangle_normals_[i], triangle_uvs_[k]); } else { return thrust::make_tuple(vertices_[vi], vertex_normals_[vi], triangle_uvs_[k]); } } }; } bool TexturePhongShader::Compile() { if (CompileShaders(texture_phong_vertex_shader, NULL, texture_phong_fragment_shader) == false) { PrintShaderWarning("Compiling shaders failed."); return false; } vertex_position_ = glGetAttribLocation(program_, "vertex_position"); vertex_normal_ = glGetAttribLocation(program_, "vertex_normal"); vertex_uv_ = glGetAttribLocation(program_, "vertex_uv"); MVP_ = glGetUniformLocation(program_, "MVP"); V_ = glGetUniformLocation(program_, "V"); M_ = glGetUniformLocation(program_, "M"); light_position_world_ = glGetUniformLocation(program_, "light_position_world_4"); light_color_ = glGetUniformLocation(program_, "light_color_4"); light_diffuse_power_ = glGetUniformLocation(program_, "light_diffuse_power_4"); light_specular_power_ = glGetUniformLocation(program_, "light_specular_power_4"); light_specular_shininess_ = glGetUniformLocation(program_, "light_specular_shininess_4"); light_ambient_ = glGetUniformLocation(program_, "light_ambient"); diffuse_texture_ = glGetUniformLocation(program_, "diffuse_texture"); return true; } void TexturePhongShader::Release() { UnbindGeometry(true); ReleaseProgram(); } bool TexturePhongShader::BindGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { // If there is already geometry, we first unbind it. // We use GL_STATIC_DRAW. When geometry changes, we clear buffers and // rebind the geometry. Note that this approach is slow. If the geometry is // changing per frame, consider implementing a new ShaderWrapper using // GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object // Streaming mechanisms. UnbindGeometry(); // Prepare data to be passed to GPU const size_t num_data_size = GetDataSize(geometry); const size_t num_texture_height = GetTextureHeight(geometry); const size_t num_texture_width = GetTextureWidth(geometry); glGenTextures(1, &diffuse_texture_buffer_); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glTexImage2D(GL_TEXTURE_2D, 0, format, num_texture_width, num_texture_height, 0, format, type, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Create buffers and bind the geometry glGenBuffers(1, &vertex_position_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_normal_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &vertex_uv_buffer_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], vertex_uv_buffer_, cudaGraphicsMapFlagsNone)); glGenBuffers(1, &diffuse_texture_pixel_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); size_t texture_size = GetTextureSize(geometry); glBufferData(GL_PIXEL_UNPACK_BUFFER, texture_size, 0, GL_STATIC_DRAW); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[3], diffuse_texture_pixel_buffer_, cudaGraphicsMapFlagsNone)); Eigen::Vector3f* raw_points_ptr; Eigen::Vector3f* raw_normals_ptr; Eigen::Vector2f* raw_uvs_ptr; uint8_t* raw_render_texture_ptr; size_t n_bytes; cudaSafeCall(cudaGraphicsMapResources(4, cuda_graphics_resources_)); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[2])); cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_render_texture_ptr, &n_bytes, cuda_graphics_resources_[3])); thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr); thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr); thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr); thrust::device_ptr<uint8_t> dev_texture_ptr = thrust::device_pointer_cast(raw_render_texture_ptr); if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr, dev_uvs_ptr, dev_texture_ptr) == false) { PrintShaderWarning("Binding failed when preparing data."); return false; } Unmap(4); bound_ = true; return true; } bool TexturePhongShader::RenderGeometry(const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (PrepareRendering(geometry, option, view) == false) { PrintShaderWarning("Rendering failed during preparation."); return false; } const size_t num_data_height = GetTextureHeight(geometry); const size_t num_data_width = GetTextureWidth(geometry); GLenum format = GetFormat(geometry); GLenum type = GetType(geometry); glUseProgram(program_); glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data()); glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data()); glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data()); glUniformMatrix4fv(light_position_world_, 1, GL_FALSE, light_position_world_data_.data()); glUniformMatrix4fv(light_color_, 1, GL_FALSE, light_color_data_.data()); glUniform4fv(light_diffuse_power_, 1, light_diffuse_power_data_.data()); glUniform4fv(light_specular_power_, 1, light_specular_power_data_.data()); glUniform4fv(light_specular_shininess_, 1, light_specular_shininess_data_.data()); glUniform4fv(light_ambient_, 1, light_ambient_data_.data()); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, diffuse_texture_pixel_buffer_); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, num_data_width, num_data_height, format, type, 0); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glUniform1i(diffuse_texture_, 0); glEnableVertexAttribArray(vertex_position_); glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_); glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_normal_); glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_); glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL); glEnableVertexAttribArray(vertex_uv_); glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_); glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL); glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_); glDisableVertexAttribArray(vertex_position_); glDisableVertexAttribArray(vertex_normal_); glDisableVertexAttribArray(vertex_uv_); return true; } void TexturePhongShader::UnbindGeometry(bool finalize) { if (bound_) { if (!finalize) { cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[0])); cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[1])); cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[2])); cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[3])); } glDeleteBuffers(1, &diffuse_texture_buffer_); glDeleteBuffers(1, &vertex_position_buffer_); glDeleteBuffers(1, &vertex_normal_buffer_); glDeleteBuffers(1, &vertex_uv_buffer_); glDeleteTextures(1, &diffuse_texture_buffer_); bound_ = false; } } void TexturePhongShader::SetLighting(const ViewControl &view, const RenderOption &option) { const auto &box = view.GetBoundingBox(); light_position_world_data_.setOnes(); light_color_data_.setOnes(); for (int i = 0; i < 4; i++) { light_position_world_data_.block<3, 1>(0, i) = box.GetCenter().cast<GLfloat>() + (float)box.GetMaxExtent() * ((float)option.light_position_relative_[i](0) * view.GetRight() + (float)option.light_position_relative_[i](1) * view.GetUp() + (float)option.light_position_relative_[i](2) * view.GetFront()); light_color_data_.block<3, 1>(0, i) = option.light_color_[i].cast<GLfloat>(); } if (option.light_on_) { light_diffuse_power_data_ = Eigen::Vector4f(option.light_diffuse_power_).cast<GLfloat>(); light_specular_power_data_ = Eigen::Vector4f(option.light_specular_power_).cast<GLfloat>(); light_specular_shininess_data_ = Eigen::Vector4f(option.light_specular_shininess_) .cast<GLfloat>(); light_ambient_data_.block<3, 1>(0, 0) = option.light_ambient_color_.cast<GLfloat>(); light_ambient_data_(3) = 1.0f; } else { light_diffuse_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_power_data_ = gl_helper::GLVector4f::Zero(); light_specular_shininess_data_ = gl_helper::GLVector4f::Ones(); light_ambient_data_ = gl_helper::GLVector4f(1.0f, 1.0f, 1.0f, 1.0f); } } bool TexturePhongShaderForTriangleMesh::PrepareRendering( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } if (option.mesh_show_back_face_) { glDisable(GL_CULL_FACE); } else { glEnable(GL_CULL_FACE); } glEnable(GL_DEPTH_TEST); glDepthFunc(GLenum(option.GetGLDepthFunc())); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); if (option.mesh_show_wireframe_) { glEnable(GL_POLYGON_OFFSET_FILL); glPolygonOffset(1.0, 1.0); } else { glDisable(GL_POLYGON_OFFSET_FILL); } SetLighting(view, option); return true; } bool TexturePhongShaderForTriangleMesh::PrepareBinding( const geometry::Geometry &geometry, const RenderOption &option, const ViewControl &view, thrust::device_ptr<Eigen::Vector3f> &points, thrust::device_ptr<Eigen::Vector3f> &normals, thrust::device_ptr<Eigen::Vector2f> &uvs, thrust::device_ptr<uint8_t> &texture_image) { if (geometry.GetGeometryType() != geometry::Geometry::GeometryType::TriangleMesh) { PrintShaderWarning("Rendering type is not geometry::TriangleMesh."); return false; } const geometry::TriangleMesh &mesh = (const geometry::TriangleMesh &)geometry; if (mesh.HasTriangles() == false) { PrintShaderWarning("Binding failed with empty triangle mesh."); return false; } if (mesh.HasTriangleNormals() == false || mesh.HasVertexNormals() == false) { PrintShaderWarning("Binding failed because mesh has no normals."); PrintShaderWarning("Call ComputeVertexNormals() before binding."); return false; } copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()), thrust::raw_pointer_cast(mesh.vertex_normals_.data()), (int*)(thrust::raw_pointer_cast(mesh.triangles_.data())), thrust::raw_pointer_cast(mesh.triangle_normals_.data()), thrust::raw_pointer_cast(mesh.triangle_uvs_.data()), option.mesh_shade_option_); thrust::transform(thrust::make_counting_iterator<size_t>(0), thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3), make_tuple_iterator(points, normals, uvs), func); thrust::copy(mesh.texture_.data_.begin(), mesh.texture_.data_.end(), texture_image); draw_arrays_mode_ = GL_TRIANGLES; draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3); return true; } size_t TexturePhongShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3; } size_t TexturePhongShaderForTriangleMesh::GetTextureSize(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.data_.size(); } size_t TexturePhongShaderForTriangleMesh::GetTextureHeight(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.height_; } size_t TexturePhongShaderForTriangleMesh::GetTextureWidth(const geometry::Geometry &geometry) const { return ((const geometry::TriangleMesh &)geometry).texture_.width_; }
6a29f8ae2348f242632ecb7af3d2ecbd06cfa7e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by ameen on 07/05/20. // #include "sql_delete.cuh" #define NUM_THREADS 512 __global__ void deleteKernel(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs, int numRows, bool *flag_d) { void *res; int resType = 1; int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS; unsigned int start = rowsPerBlock * threadIdx.x; unsigned int end = rowsPerBlock * (threadIdx.x + 1); void *tempRow = malloc(rowSize); void *row; bool flag; for (unsigned int i = start; i < end; ++i) { if (i >= numRows) break; row = (char *)data + i * rowSize; eval(row, offset, types, exprs, res, resType); flag = false; if (resType == RESTYPE_INT) { flag = *(int *) res != 0; } else if (resType == RESTYPE_FLT) { flag = *(float *) res != 0; } free(res); if (!flag) continue; flag_d[i] = flag; } } void sql_delete::execute(std::string &query) { hsql::SQLParserResult *result = hsql::SQLParser::parseSQLString(query); std::vector<std::string> columnNames; std::string tableName; if (result->isValid()) { const auto *stmt = (const hsql::DeleteStatement *) result->getStatement(0); tableName = stmt->tableName; std::vector<myExpr> flattenedExpr; Data d(tableName); exprToVec(stmt->expr, flattenedExpr, d.mdata.columns, d); hipSetDevice(0); hipDeviceReset(); int rowSize = d.mdata.rowSize; void *data = malloc(d.chunkSize * rowSize); void *data_d; int numCols = d.mdata.columns.size(); ColType *type_d; hipMalloc(&type_d, sizeof(ColType) * numCols); hipMemcpy(type_d, &d.mdata.datatypes[0], sizeof(ColType) * numCols, hipMemcpyHostToDevice); myExpr *where_d; hipMalloc(&where_d, sizeof(myExpr) * flattenedExpr.size()); hipMemcpy(where_d, &flattenedExpr[0], sizeof(myExpr) * flattenedExpr.size(), hipMemcpyHostToDevice); int *offsets = (int *) malloc(sizeof(int) * (numCols + 1)); offsets[0] = 0; //d.mdata.datatypes[0].size; for (int i = 1; i <= numCols; i++) { offsets[i] = offsets[i - 1] + d.mdata.datatypes[i - 1].size; } int *offsets_d; hipMalloc(&offsets_d, sizeof(int) * (numCols + 1)); hipMemcpy(offsets_d, offsets, sizeof(int) * (numCols + 1), hipMemcpyHostToDevice); int numRows = d.read(data); hipMalloc(&data_d, d.chunkSize * rowSize); bool *flag = (bool *)malloc(numRows * sizeof(bool)); bool *flag_d; hipMalloc(&flag_d,numRows * sizeof(bool)); d.mdata.rowCount = 0; while (numRows > 0) { hipMemcpy(data_d, data, rowSize * numRows, hipMemcpyHostToDevice); hipLaunchKernelGGL(( deleteKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, rowSize, offsets_d, numCols, type_d, where_d, numRows, flag_d); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("Error at %d: %s\n", __LINE__, hipGetErrorString(err)); } // hipMemcpy(data, data_d, rowSize * numRows, hipMemcpyDeviceToHost); // d.write(data, numRows * d.mdata.rowSize); hipMemcpy(flag, flag_d, numRows * sizeof(bool), hipMemcpyDeviceToHost); for (int k=0;k<numRows;k++) if(flag[k]) d.writeRow((char *)data+k*rowSize); numRows = d.read(data); } d.mdata.commit(); //write to file after checking flag // Free all the data free(data); free(offsets); free(flag); hipFree(data_d); hipFree(type_d); hipFree(where_d); hipFree(offsets_d); hipFree(flag_d); } else { printf("QUERY is invalid\n"); } }
6a29f8ae2348f242632ecb7af3d2ecbd06cfa7e1.cu
// // Created by ameen on 07/05/20. // #include "sql_delete.cuh" #define NUM_THREADS 512 __global__ void deleteKernel(void *data, int rowSize, int *offset, int offsetSize, ColType *types, myExpr *exprs, int numRows, bool *flag_d) { void *res; int resType = 1; int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS; unsigned int start = rowsPerBlock * threadIdx.x; unsigned int end = rowsPerBlock * (threadIdx.x + 1); void *tempRow = malloc(rowSize); void *row; bool flag; for (unsigned int i = start; i < end; ++i) { if (i >= numRows) break; row = (char *)data + i * rowSize; eval(row, offset, types, exprs, res, resType); flag = false; if (resType == RESTYPE_INT) { flag = *(int *) res != 0; } else if (resType == RESTYPE_FLT) { flag = *(float *) res != 0; } free(res); if (!flag) continue; flag_d[i] = flag; } } void sql_delete::execute(std::string &query) { hsql::SQLParserResult *result = hsql::SQLParser::parseSQLString(query); std::vector<std::string> columnNames; std::string tableName; if (result->isValid()) { const auto *stmt = (const hsql::DeleteStatement *) result->getStatement(0); tableName = stmt->tableName; std::vector<myExpr> flattenedExpr; Data d(tableName); exprToVec(stmt->expr, flattenedExpr, d.mdata.columns, d); cudaSetDevice(0); cudaDeviceReset(); int rowSize = d.mdata.rowSize; void *data = malloc(d.chunkSize * rowSize); void *data_d; int numCols = d.mdata.columns.size(); ColType *type_d; cudaMalloc(&type_d, sizeof(ColType) * numCols); cudaMemcpy(type_d, &d.mdata.datatypes[0], sizeof(ColType) * numCols, cudaMemcpyHostToDevice); myExpr *where_d; cudaMalloc(&where_d, sizeof(myExpr) * flattenedExpr.size()); cudaMemcpy(where_d, &flattenedExpr[0], sizeof(myExpr) * flattenedExpr.size(), cudaMemcpyHostToDevice); int *offsets = (int *) malloc(sizeof(int) * (numCols + 1)); offsets[0] = 0; //d.mdata.datatypes[0].size; for (int i = 1; i <= numCols; i++) { offsets[i] = offsets[i - 1] + d.mdata.datatypes[i - 1].size; } int *offsets_d; cudaMalloc(&offsets_d, sizeof(int) * (numCols + 1)); cudaMemcpy(offsets_d, offsets, sizeof(int) * (numCols + 1), cudaMemcpyHostToDevice); int numRows = d.read(data); cudaMalloc(&data_d, d.chunkSize * rowSize); bool *flag = (bool *)malloc(numRows * sizeof(bool)); bool *flag_d; cudaMalloc(&flag_d,numRows * sizeof(bool)); d.mdata.rowCount = 0; while (numRows > 0) { cudaMemcpy(data_d, data, rowSize * numRows, cudaMemcpyHostToDevice); deleteKernel<<<1, NUM_THREADS>>>(data_d, rowSize, offsets_d, numCols, type_d, where_d, numRows, flag_d); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("Error at %d: %s\n", __LINE__, cudaGetErrorString(err)); } // cudaMemcpy(data, data_d, rowSize * numRows, cudaMemcpyDeviceToHost); // d.write(data, numRows * d.mdata.rowSize); cudaMemcpy(flag, flag_d, numRows * sizeof(bool), cudaMemcpyDeviceToHost); for (int k=0;k<numRows;k++) if(flag[k]) d.writeRow((char *)data+k*rowSize); numRows = d.read(data); } d.mdata.commit(); //write to file after checking flag // Free all the data free(data); free(offsets); free(flag); cudaFree(data_d); cudaFree(type_d); cudaFree(where_d); cudaFree(offsets_d); cudaFree(flag_d); } else { printf("QUERY is invalid\n"); } }
dfa206dfe184cb07c98dc7861765f867a9a976b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include <iostream> #include <cutil_inline.h> #include <shrQATest.h> #include "Timer.h" using namespace std; const double sqrt_2 = 1.4142135; __global__ void haar_horizontal(float input[], float output [], int o_width, int w) { int x_index = blockIdx.x*blockDim.x+threadIdx.x; int y_index = blockIdx.y*blockDim.y+threadIdx.y; if(x_index>=(w+1)/2 || y_index>=w) return; int i_thread_id = y_index*o_width + 2*x_index; int o_thread_id = y_index*o_width + x_index; const double sqrt_2 = 1.4142135; output[o_thread_id] = (input[i_thread_id]+input[i_thread_id+1])/sqrt_2; output[o_thread_id+w/2] = (input[i_thread_id]-input[i_thread_id+1])/sqrt_2; } __global__ void haar_vertical(float input[], float output [], int o_width, int w) { int x_index = blockIdx.x*blockDim.x+threadIdx.x; int y_index = blockIdx.y*blockDim.y+threadIdx.y; if(y_index>=(w+1)/2 || x_index>=w) return; int p1 = 2*y_index*o_width + x_index; int p2 = (2*y_index+1)*o_width + x_index; int p3 = y_index*o_width + x_index; const double sqrt_2 = 1.4142135; output[p3] = (input[p1]+input[p2])/sqrt_2; output[p3+o_width*w/2] = (input[p1]-input[p2])/sqrt_2; } void haar(float input[], float output [], int o_width, int o_height) { float* d_input; float* d_output; int widthstep = o_width*sizeof(float); hipMalloc(&d_input, widthstep*o_height); hipMalloc(&d_output, widthstep*o_height); hipMemcpy(d_input, input, widthstep*o_height, hipMemcpyHostToDevice); dim3 blocksize(16,16); dim3 gridsize; int w = o_width; gridsize.x=(w+blocksize.x-1)/blocksize.x; gridsize.y=(w+blocksize.y-1)/blocksize.y; while(w>1) { hipLaunchKernelGGL(( haar_horizontal), dim3(gridsize),dim3(blocksize), 0, 0, d_input,d_output,o_width,w); hipLaunchKernelGGL(( haar_vertical), dim3(gridsize),dim3(blocksize), 0, 0, d_output,d_input,o_width,w); w /= 2; } hipMemcpy(output,d_input,widthstep*o_height,hipMemcpyDeviceToHost); hipFree(d_input); hipFree(d_output); } void haar2d_gpu(float* input, int size) { int w = size; float* output = new float[size*size]; haar(input, output, w, w); } void printMatrix(float** mat, int size) { ofstream fout("gpu.txt"); for(int i = 0; i < size; i++) { for(int j = 0; j < size; j++) fout << mat[i][j] << " "; fout << endl; } fout << endl; fout.close(); } void printVector(float* vec, int size) { ofstream fout("gpu.txt"); for(int i = 0; i < size; i++) { for(int j = 0; j < size; j++, i++) fout << vec[i] << " "; fout << endl; } fout << endl; fout.close(); } void haar1d_cpu(float *vec, int n, int w) { int i=0; float *vecp = new float[n]; for(i=0;i<n;i++) vecp[i] = 0; w/=2; for(i=0;i<w;i++) { vecp[i] = (vec[2*i] + vec[2*i+1])/sqrt_2; vecp[i+w] = (vec[2*i] - vec[2*i+1])/sqrt_2; } for(i=0;i<(w*2);i++) vec[i] = vecp[i]; delete [] vecp; } void haar2d_cpu(float **matrix, int size) { float *temp_col = new float[size]; int i = 0, j = 0; int w = size; while(w>1) { for(i=0;i<w;i++) haar1d_cpu(matrix[i], w, w); for(i=0;i<w;i++) { for(j=0;j<w;j++) temp_col[j] = matrix[j][i]; haar1d_cpu(temp_col, w, w); for(j=0;j<w;j++) matrix[j][i] = temp_col[j]; } w/=2; } delete [] temp_col; } int main(int argc, char **argv) { Timer timer; ifstream fin; fin.open("img.txt"); if(! fin) cout <<"Input File Error!"; // Input matrix int size = 2048; float* vec = new float[size*size]; for (int i = 0; i < size*size; i++) fin >> vec[i]; fin.close(); // Haar transform with GPU timer.start(); haar2d_gpu(vec, size); timer.stop(); cout << "GPU Time: " << timer.getElapsedTimeInMilliSec() << " ms" << endl; //printVector(vec, size); float** mat = new float*[size]; for(int m = 0; m < size; m++) mat[m] = new float[size]; for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) mat[i][j] = vec[i*size+j]; // Haar transform with CPU timer.start(); haar2d_cpu(mat, size); timer.stop(); cout << "CPU Time: " << timer.getElapsedTimeInMilliSec() << " ms" << endl; //printMatrix(mat, size); cin.get(); return 0; }
dfa206dfe184cb07c98dc7861765f867a9a976b5.cu
#include <fstream> #include <iostream> #include <cutil_inline.h> #include <shrQATest.h> #include "Timer.h" using namespace std; const double sqrt_2 = 1.4142135; __global__ void haar_horizontal(float input[], float output [], int o_width, int w) { int x_index = blockIdx.x*blockDim.x+threadIdx.x; int y_index = blockIdx.y*blockDim.y+threadIdx.y; if(x_index>=(w+1)/2 || y_index>=w) return; int i_thread_id = y_index*o_width + 2*x_index; int o_thread_id = y_index*o_width + x_index; const double sqrt_2 = 1.4142135; output[o_thread_id] = (input[i_thread_id]+input[i_thread_id+1])/sqrt_2; output[o_thread_id+w/2] = (input[i_thread_id]-input[i_thread_id+1])/sqrt_2; } __global__ void haar_vertical(float input[], float output [], int o_width, int w) { int x_index = blockIdx.x*blockDim.x+threadIdx.x; int y_index = blockIdx.y*blockDim.y+threadIdx.y; if(y_index>=(w+1)/2 || x_index>=w) return; int p1 = 2*y_index*o_width + x_index; int p2 = (2*y_index+1)*o_width + x_index; int p3 = y_index*o_width + x_index; const double sqrt_2 = 1.4142135; output[p3] = (input[p1]+input[p2])/sqrt_2; output[p3+o_width*w/2] = (input[p1]-input[p2])/sqrt_2; } void haar(float input[], float output [], int o_width, int o_height) { float* d_input; float* d_output; int widthstep = o_width*sizeof(float); cudaMalloc(&d_input, widthstep*o_height); cudaMalloc(&d_output, widthstep*o_height); cudaMemcpy(d_input, input, widthstep*o_height, cudaMemcpyHostToDevice); dim3 blocksize(16,16); dim3 gridsize; int w = o_width; gridsize.x=(w+blocksize.x-1)/blocksize.x; gridsize.y=(w+blocksize.y-1)/blocksize.y; while(w>1) { haar_horizontal<<<gridsize,blocksize>>>(d_input,d_output,o_width,w); haar_vertical<<<gridsize,blocksize>>>(d_output,d_input,o_width,w); w /= 2; } cudaMemcpy(output,d_input,widthstep*o_height,cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(d_output); } void haar2d_gpu(float* input, int size) { int w = size; float* output = new float[size*size]; haar(input, output, w, w); } void printMatrix(float** mat, int size) { ofstream fout("gpu.txt"); for(int i = 0; i < size; i++) { for(int j = 0; j < size; j++) fout << mat[i][j] << " "; fout << endl; } fout << endl; fout.close(); } void printVector(float* vec, int size) { ofstream fout("gpu.txt"); for(int i = 0; i < size; i++) { for(int j = 0; j < size; j++, i++) fout << vec[i] << " "; fout << endl; } fout << endl; fout.close(); } void haar1d_cpu(float *vec, int n, int w) { int i=0; float *vecp = new float[n]; for(i=0;i<n;i++) vecp[i] = 0; w/=2; for(i=0;i<w;i++) { vecp[i] = (vec[2*i] + vec[2*i+1])/sqrt_2; vecp[i+w] = (vec[2*i] - vec[2*i+1])/sqrt_2; } for(i=0;i<(w*2);i++) vec[i] = vecp[i]; delete [] vecp; } void haar2d_cpu(float **matrix, int size) { float *temp_col = new float[size]; int i = 0, j = 0; int w = size; while(w>1) { for(i=0;i<w;i++) haar1d_cpu(matrix[i], w, w); for(i=0;i<w;i++) { for(j=0;j<w;j++) temp_col[j] = matrix[j][i]; haar1d_cpu(temp_col, w, w); for(j=0;j<w;j++) matrix[j][i] = temp_col[j]; } w/=2; } delete [] temp_col; } int main(int argc, char **argv) { Timer timer; ifstream fin; fin.open("img.txt"); if(! fin) cout <<"Input File Error!"; // Input matrix int size = 2048; float* vec = new float[size*size]; for (int i = 0; i < size*size; i++) fin >> vec[i]; fin.close(); // Haar transform with GPU timer.start(); haar2d_gpu(vec, size); timer.stop(); cout << "GPU Time: " << timer.getElapsedTimeInMilliSec() << " ms" << endl; //printVector(vec, size); float** mat = new float*[size]; for(int m = 0; m < size; m++) mat[m] = new float[size]; for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) mat[i][j] = vec[i*size+j]; // Haar transform with CPU timer.start(); haar2d_cpu(mat, size); timer.stop(); cout << "CPU Time: " << timer.getElapsedTimeInMilliSec() << " ms" << endl; //printMatrix(mat, size); cin.get(); return 0; }
d531012593fd1b0dddb73b9e02479f3857c8b267.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall(hipError_t err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (hipSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } err = hipDeviceSynchronize(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, hipGetErrorString(err)); exit(-1); } #endif return; } __global__ void rgb_2_grey(uchar* const greyImage, const uchar4* const rgbImage, int rows, int columns) { int rgb_x = blockIdx.x * blockDim.x + threadIdx.x; //x coordinate of pixel int rgb_y = blockIdx.y * blockDim.y + threadIdx.y; //y coordinate of pixel if ((rgb_x >= columns) && (rgb_y >= rows)) { return; } int rgb_ab = rgb_y*columns + rgb_x; //absolute pixel position uchar4 rgb_Img = rgbImage[rgb_ab]; greyImage[rgb_ab] = uchar((float(rgb_Img.x))*0.299f + (float(rgb_Img.y))*0.587f + (float(rgb_Img.z))*0.114f); } using namespace cv; using namespace std; void Proc_Img(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage); void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols); void Save_Img(); Mat img_RGB; Mat img_Grey; uchar4 *d_rgbImg; uchar *d_greyImg; int main() { uchar4* h_rgbImg; //uchar4* d_rgbImge=0; uchar* h_greyImg; //uchar* d_greyImge=0; Proc_Img(&h_rgbImg, &h_greyImg, &d_rgbImg, &d_greyImg); RGB_2_Greyscale(d_greyImg, d_rgbImg, img_RGB.rows, img_RGB.cols); Save_Img(); return 0; } void Proc_Img(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage){ hipFree(0); CudaCheckError(); //loads image into a matrix object along with the colors in BGR format (must convert to rgb). Mat img = imread("cinque_terre_small.jpg", CV_LOAD_IMAGE_COLOR); if (img.empty()){ cerr << "couldnt open file ..." << "cinque_terre_small.jpg" << endl; exit(1); } //converts color type from BGR to RGB cvtColor(img, img_RGB, CV_BGR2RGBA); //allocate memory for new greyscale image. //img.rows returns the range of pixels in y, img.cols returns range of pixels in x //CV_8UC1 means 8 bit unsigned(non-negative) single channel of color, aka greyscale. //all three of the parameters allow the create function in the Mat class to determine how much memory to allocate img_Grey.create(img.rows, img.cols, CV_8UC1); //creates rgb and greyscale image arrays *h_RGBImage = (uchar4*)img_RGB.ptr<uchar>(0); //.ptr is a method in the mat class that returns a pointer to the first element of the matrix. *h_greyImage = (uchar*)img_Grey.ptr<uchar>(0); //this is just like a regular array/pointer mem address to first element of the array. This is templated //in this case the compiler runs the function for returning pointer of type unsigned char. for rgb image it is //cast to uchar4 struct to hold r,g, and b values. const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //amount of pixels //allocate memory on gpu hipMalloc(d_RGBImage, sizeof(uchar4) * num_pix); //bites of 1 uchar4 times # of pixels gives number of bites necessary for array CudaCheckError(); hipMalloc(d_greyImage, sizeof(uchar) * num_pix);//bites of uchar times # pixels gives number of bites necessary for array CudaCheckError(); hipMemset(*d_greyImage, 0, sizeof(uchar) * num_pix); CudaCheckError(); //copy array into allocated space hipMemcpy(*d_RGBImage, *h_RGBImage, sizeof(uchar4)*num_pix, hipMemcpyHostToDevice); CudaCheckError(); d_rgbImg = *d_RGBImage; d_greyImg = *d_greyImage; } void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols){ const int BS = 16; const dim3 blockSize(BS, BS); const dim3 gridSize((num_Cols / BS) + 1, (num_Rows / BS) + 1); hipLaunchKernelGGL(( rgb_2_grey) , dim3(gridSize), dim3(blockSize), 0, 0, d_greyImage, d_RGBImage, num_Rows, num_Cols); hipDeviceSynchronize(); CudaCheckError(); } void Save_Img(){ const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); hipMemcpy(img_Grey.ptr<uchar>(0), d_greyImg, sizeof(uchar)*num_pix, hipMemcpyDeviceToHost); CudaCheckError(); imwrite("result.jpg", img_Grey); hipFree(d_rgbImg); hipFree(d_greyImg); }
d531012593fd1b0dddb73b9e02479f3857c8b267.cu
#define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ ) inline void __cudaSafeCall(cudaError err, const char *file, const int line) { #ifdef CUDA_ERROR_CHECK if (cudaSuccess != err) { fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } inline void __cudaCheckError(const char *file, const int line) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } err = cudaDeviceSynchronize(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString(err)); exit(-1); } #endif return; } __global__ void rgb_2_grey(uchar* const greyImage, const uchar4* const rgbImage, int rows, int columns) { int rgb_x = blockIdx.x * blockDim.x + threadIdx.x; //x coordinate of pixel int rgb_y = blockIdx.y * blockDim.y + threadIdx.y; //y coordinate of pixel if ((rgb_x >= columns) && (rgb_y >= rows)) { return; } int rgb_ab = rgb_y*columns + rgb_x; //absolute pixel position uchar4 rgb_Img = rgbImage[rgb_ab]; greyImage[rgb_ab] = uchar((float(rgb_Img.x))*0.299f + (float(rgb_Img.y))*0.587f + (float(rgb_Img.z))*0.114f); } using namespace cv; using namespace std; void Proc_Img(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage); void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols); void Save_Img(); Mat img_RGB; Mat img_Grey; uchar4 *d_rgbImg; uchar *d_greyImg; int main() { uchar4* h_rgbImg; //uchar4* d_rgbImge=0; uchar* h_greyImg; //uchar* d_greyImge=0; Proc_Img(&h_rgbImg, &h_greyImg, &d_rgbImg, &d_greyImg); RGB_2_Greyscale(d_greyImg, d_rgbImg, img_RGB.rows, img_RGB.cols); Save_Img(); return 0; } void Proc_Img(uchar4** h_RGBImage, uchar** h_greyImage, uchar4 **d_RGBImage, uchar** d_greyImage){ cudaFree(0); CudaCheckError(); //loads image into a matrix object along with the colors in BGR format (must convert to rgb). Mat img = imread("cinque_terre_small.jpg", CV_LOAD_IMAGE_COLOR); if (img.empty()){ cerr << "couldnt open file ..." << "cinque_terre_small.jpg" << endl; exit(1); } //converts color type from BGR to RGB cvtColor(img, img_RGB, CV_BGR2RGBA); //allocate memory for new greyscale image. //img.rows returns the range of pixels in y, img.cols returns range of pixels in x //CV_8UC1 means 8 bit unsigned(non-negative) single channel of color, aka greyscale. //all three of the parameters allow the create function in the Mat class to determine how much memory to allocate img_Grey.create(img.rows, img.cols, CV_8UC1); //creates rgb and greyscale image arrays *h_RGBImage = (uchar4*)img_RGB.ptr<uchar>(0); //.ptr is a method in the mat class that returns a pointer to the first element of the matrix. *h_greyImage = (uchar*)img_Grey.ptr<uchar>(0); //this is just like a regular array/pointer mem address to first element of the array. This is templated //in this case the compiler runs the function for returning pointer of type unsigned char. for rgb image it is //cast to uchar4 struct to hold r,g, and b values. const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); //amount of pixels //allocate memory on gpu cudaMalloc(d_RGBImage, sizeof(uchar4) * num_pix); //bites of 1 uchar4 times # of pixels gives number of bites necessary for array CudaCheckError(); cudaMalloc(d_greyImage, sizeof(uchar) * num_pix);//bites of uchar times # pixels gives number of bites necessary for array CudaCheckError(); cudaMemset(*d_greyImage, 0, sizeof(uchar) * num_pix); CudaCheckError(); //copy array into allocated space cudaMemcpy(*d_RGBImage, *h_RGBImage, sizeof(uchar4)*num_pix, cudaMemcpyHostToDevice); CudaCheckError(); d_rgbImg = *d_RGBImage; d_greyImg = *d_greyImage; } void RGB_2_Greyscale(uchar* const d_greyImage, uchar4* const d_RGBImage, size_t num_Rows, size_t num_Cols){ const int BS = 16; const dim3 blockSize(BS, BS); const dim3 gridSize((num_Cols / BS) + 1, (num_Rows / BS) + 1); rgb_2_grey <<<gridSize, blockSize>>>(d_greyImage, d_RGBImage, num_Rows, num_Cols); cudaDeviceSynchronize(); CudaCheckError(); } void Save_Img(){ const size_t num_pix = (img_RGB.rows) * (img_RGB.cols); cudaMemcpy(img_Grey.ptr<uchar>(0), d_greyImg, sizeof(uchar)*num_pix, cudaMemcpyDeviceToHost); CudaCheckError(); imwrite("result.jpg", img_Grey); cudaFree(d_rgbImg); cudaFree(d_greyImg); }
772bdf481ecb7d1bd25807fb6718cae512600071.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zjacobisetup.cu, normal z -> d, Sun Nov 20 20:20:40 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d __global__ void dvjacobisetup_gpu( int num_rows, int num_vecs, double *b, double *d, double *c, double *x) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ){ c[row+i*num_rows] = b[row+i*num_rows] / d[row]; x[row+i*num_rows] = c[row+i*num_rows]; } } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] c magma_d_matrix* c = D^(-1) * b @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_djacobisetup_vector_gpu( magma_int_t num_rows, magma_d_matrix b, magma_d_matrix d, magma_d_matrix c, magma_d_matrix *x, magma_queue_t queue ) { dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) ); int num_vecs = b.num_rows / num_rows; magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dvjacobisetup_gpu), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c.dval, x->val ); return MAGMA_SUCCESS; } __global__ void djacobidiagscal_kernel( int num_rows, int num_vecs, double *b, double *d, double *c) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) c[row+i*num_rows] = b[row+i*num_rows] * d[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] c magma_d_matrix* c = D^(-1) * b @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobi_diagscal( magma_int_t num_rows, magma_d_matrix d, magma_d_matrix b, magma_d_matrix *c, magma_queue_t queue ) { dim3 grid( magma_ceildiv( num_rows, 512 )); int num_vecs = b.num_rows*b.num_cols/num_rows; magma_int_t threads = 512; hipLaunchKernelGGL(( djacobidiagscal_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), num_rows, num_vecs, b.dval, d.dval, c->val ); return MAGMA_SUCCESS; } __global__ void djacobiupdate_kernel( int num_rows, int num_cols, double *t, double *b, double *d, double *x) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i=0; i<num_cols; i++) x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row]; } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-t) where d is the diagonal of the system matrix A and t=Ax. Arguments --------- @param[in] t magma_d_matrix t = A*x @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobiupdate( magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix *x, magma_queue_t queue ) { dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( djacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval ); return MAGMA_SUCCESS; } __global__ void djacobispmvupdate_kernel( int num_rows, int num_cols, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double *t, double *b, double *d, double *x ) { int row = blockDim.x * blockIdx.x + threadIdx.x; int j; if(row<num_rows){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( int i=0; i<num_cols; i++){ for( j=start; j<end; j++){ dot += dval[ j ] * x[ dcolind[j]+i*num_rows ]; } x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row]; } } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-Ax) Arguments --------- @param[in] maxiter magma_int_t number of Jacobi iterations @param[in] A magma_d_matrix system matrix @param[in] t magma_d_matrix workspace @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobispmvupdate( magma_int_t maxiter, magma_d_matrix A, magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix *x, magma_queue_t queue ) { // local variables //double c_zero = MAGMA_D_ZERO; //double c_one = MAGMA_D_ONE; dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; for( magma_int_t i=0; i<maxiter; i++ ) { // distinct routines imply synchronization // magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x //hipLaunchKernelGGL(( djacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval ); // merged in one implies asynchronous update hipLaunchKernelGGL(( djacobispmvupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval ); } return MAGMA_SUCCESS; } __global__ void djacobispmvupdate_bw_kernel( int num_rows, int num_cols, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double *t, double *b, double *d, double *x ) { int row_tmp = blockDim.x * blockIdx.x + threadIdx.x; int row = num_rows-1 - row_tmp; int j; if( row>-1 ){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( int i=0; i<num_cols; i++){ for( j=start; j<end; j++){ dot += dval[ j ] * x[ dcolind[j]+i*num_rows ]; } x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row]; } } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-Ax) This kernel processes the thread blocks in reversed order. Arguments --------- @param[in] maxiter magma_int_t number of Jacobi iterations @param[in] A magma_d_matrix system matrix @param[in] t magma_d_matrix workspace @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobispmvupdate_bw( magma_int_t maxiter, magma_d_matrix A, magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix *x, magma_queue_t queue ) { // local variables //double c_zero = MAGMA_D_ZERO; //double c_one = MAGMA_D_ONE; dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; for( magma_int_t i=0; i<maxiter; i++ ) { // distinct routines imply synchronization // magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x //hipLaunchKernelGGL(( djacobiupdate_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval ); // merged in one implies asynchronous update hipLaunchKernelGGL(( djacobispmvupdate_bw_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval ); } return MAGMA_SUCCESS; } __global__ void djacobispmvupdateselect_kernel( int num_rows, int num_cols, int num_updates, magma_index_t * indices, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double *t, double *b, double *d, double *x, double *y ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int j; if( idx<num_updates){ int row = indices[ idx ]; printf(" "); //if( row < num_rows ){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( int i=0; i<num_cols; i++){ for( j=start; j<end; j++){ dot += dval[ j ] * x[ dcolind[j]+i*num_rows ]; } x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row]; //double add = (b[row+i*num_rows]-dot) * d[row]; //#if defined(PRECISION_s) //|| defined(PRECISION_d) // atomicAdd( x + row + i*num_rows, add ); //#endif // ( unsigned int* address, unsigned int val); //} } } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-Ax) This kernel allows for overlapping domains: the indices-array contains the locations that are updated. Locations may be repeated to simulate overlapping domains. Arguments --------- @param[in] maxiter magma_int_t number of Jacobi iterations @param[in] num_updates magma_int_t number of updates - length of the indices array @param[in] indices magma_index_t* indices, which entries of x to update @param[in] A magma_d_matrix system matrix @param[in] t magma_d_matrix workspace @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[in] tmp magma_d_matrix workspace @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobispmvupdateselect( magma_int_t maxiter, magma_int_t num_updates, magma_index_t *indices, magma_d_matrix A, magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix tmp, magma_d_matrix *x, magma_queue_t queue ) { // local variables //double c_zero = MAGMA_D_ZERO //double c_one = MAGMA_D_ONE; //magma_d_matrix swp; dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) ); for( magma_int_t i=0; i<maxiter; i++ ) { hipLaunchKernelGGL(( djacobispmvupdateselect_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval ); //swp.dval = x->dval; //x->dval = tmp.dval; //tmp.dval = swp.dval; } return MAGMA_SUCCESS; } __global__ void dftjacobicontractions_kernel( int num_rows, double * xkm2val, double * xkm1val, double * xkval, double * zval, double * cval ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx<num_rows ){ zval[idx] = MAGMA_D_MAKE( MAGMA_D_ABS( xkm1val[idx] - xkval[idx] ), 0.0); cval[ idx ] = MAGMA_D_MAKE( MAGMA_D_ABS( xkm2val[idx] - xkm1val[idx] ) / MAGMA_D_ABS( xkm1val[idx] - xkval[idx] ) ,0.0 ); } } /** Purpose ------- Computes the contraction coefficients c_i: c_i = z_i^{k-1} / z_i^{k} = | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} | Arguments --------- @param[in] xkm2 magma_d_matrix vector x^{k-2} @param[in] xkm1 magma_d_matrix vector x^{k-2} @param[in] xk magma_d_matrix vector x^{k-2} @param[out] z magma_d_matrix* ratio @param[out] c magma_d_matrix* contraction coefficients @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dftjacobicontractions( magma_d_matrix xkm2, magma_d_matrix xkm1, magma_d_matrix xk, magma_d_matrix *z, magma_d_matrix *c, magma_queue_t queue ) { dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dftjacobicontractions_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval ); return MAGMA_SUCCESS; } __global__ void dftjacobiupdatecheck_kernel( int num_rows, double delta, double * xold, double * xnew, double * zprev, double * cval, magma_int_t *flag_t, magma_int_t *flag_fp ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx<num_rows ){ double t1 = delta * MAGMA_D_ABS(cval[idx]); double vkv = 1.0; for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){ vkv = vkv*2; } double xold_l = xold[idx]; double xnew_l = xnew[idx]; double znew = MAGMA_D_MAKE( max( MAGMA_D_ABS( xold_l - xnew_l), 1e-15), 0.0 ); double znr = zprev[idx] / znew; double t2 = MAGMA_D_ABS( znr - cval[idx] ); //% evaluate fp-cond magma_int_t fpcond = 0; if( MAGMA_D_ABS(znr)>vkv ){ fpcond = 1; } // % combine t-cond and fp-cond + flag_t == 1 magma_int_t cond = 0; if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){ cond = 1; } flag_fp[idx] = flag_fp[idx]+1; if( fpcond>0 ){ flag_fp[idx] = 0; } if( cond > 0 ){ flag_t[idx] = 0; zprev[idx] = znew; xold[idx] = xnew_l; } else { flag_t[idx] = 1; xnew[idx] = xold_l; } } } /** Purpose ------- Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper. Arguments --------- @param[in] delta double threshold @param[in,out] xold magma_d_matrix* vector xold @param[in,out] xnew magma_d_matrix* vector xnew @param[in,out] zprev magma_d_matrix* vector z = | x_k-1 - x_k | @param[in] c magma_d_matrix contraction coefficients @param[in,out] flag_t magma_int_t threshold condition @param[in,out] flag_fp magma_int_t false positive condition @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dftjacobiupdatecheck( double delta, magma_d_matrix *xold, magma_d_matrix *xnew, magma_d_matrix *zprev, magma_d_matrix c, magma_int_t *flag_t, magma_int_t *flag_fp, magma_queue_t queue ) { dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( dftjacobiupdatecheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream(), xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval, flag_t, flag_fp ); return MAGMA_SUCCESS; }
772bdf481ecb7d1bd25807fb6718cae512600071.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zjacobisetup.cu, normal z -> d, Sun Nov 20 20:20:40 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d __global__ void dvjacobisetup_gpu( int num_rows, int num_vecs, double *b, double *d, double *c, double *x) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ){ c[row+i*num_rows] = b[row+i*num_rows] / d[row]; x[row+i*num_rows] = c[row+i*num_rows]; } } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] c magma_d_matrix* c = D^(-1) * b @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_djacobisetup_vector_gpu( magma_int_t num_rows, magma_d_matrix b, magma_d_matrix d, magma_d_matrix c, magma_d_matrix *x, magma_queue_t queue ) { dim3 grid( magma_ceildiv( num_rows, BLOCK_SIZE ) ); int num_vecs = b.num_rows / num_rows; magma_int_t threads = BLOCK_SIZE; dvjacobisetup_gpu<<< grid, threads, 0, queue->cuda_stream()>>> ( num_rows, num_vecs, b.dval, d.dval, c.dval, x->val ); return MAGMA_SUCCESS; } __global__ void djacobidiagscal_kernel( int num_rows, int num_vecs, double *b, double *d, double *c) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) c[row+i*num_rows] = b[row+i*num_rows] * d[row]; } } /** Purpose ------- Prepares the Jacobi Iteration according to x^(k+1) = D^(-1) * b - D^(-1) * (L+U) * x^k x^(k+1) = c - M * x^k. Returns the vector c. It calls a GPU kernel Arguments --------- @param[in] num_rows magma_int_t number of rows @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] c magma_d_matrix* c = D^(-1) * b @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobi_diagscal( magma_int_t num_rows, magma_d_matrix d, magma_d_matrix b, magma_d_matrix *c, magma_queue_t queue ) { dim3 grid( magma_ceildiv( num_rows, 512 )); int num_vecs = b.num_rows*b.num_cols/num_rows; magma_int_t threads = 512; djacobidiagscal_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( num_rows, num_vecs, b.dval, d.dval, c->val ); return MAGMA_SUCCESS; } __global__ void djacobiupdate_kernel( int num_rows, int num_cols, double *t, double *b, double *d, double *x) { int row = blockDim.x * blockIdx.x + threadIdx.x; if(row < num_rows ){ for( int i=0; i<num_cols; i++) x[row+i*num_rows] += (b[row+i*num_rows]-t[row+i*num_rows]) * d[row]; } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-t) where d is the diagonal of the system matrix A and t=Ax. Arguments --------- @param[in] t magma_d_matrix t = A*x @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobiupdate( magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix *x, magma_queue_t queue ) { dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; djacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval ); return MAGMA_SUCCESS; } __global__ void djacobispmvupdate_kernel( int num_rows, int num_cols, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double *t, double *b, double *d, double *x ) { int row = blockDim.x * blockIdx.x + threadIdx.x; int j; if(row<num_rows){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( int i=0; i<num_cols; i++){ for( j=start; j<end; j++){ dot += dval[ j ] * x[ dcolind[j]+i*num_rows ]; } x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row]; } } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-Ax) Arguments --------- @param[in] maxiter magma_int_t number of Jacobi iterations @param[in] A magma_d_matrix system matrix @param[in] t magma_d_matrix workspace @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobispmvupdate( magma_int_t maxiter, magma_d_matrix A, magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix *x, magma_queue_t queue ) { // local variables //double c_zero = MAGMA_D_ZERO; //double c_one = MAGMA_D_ONE; dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; for( magma_int_t i=0; i<maxiter; i++ ) { // distinct routines imply synchronization // magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x // djacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval ); // merged in one implies asynchronous update djacobispmvupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>> ( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval ); } return MAGMA_SUCCESS; } __global__ void djacobispmvupdate_bw_kernel( int num_rows, int num_cols, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double *t, double *b, double *d, double *x ) { int row_tmp = blockDim.x * blockIdx.x + threadIdx.x; int row = num_rows-1 - row_tmp; int j; if( row>-1 ){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( int i=0; i<num_cols; i++){ for( j=start; j<end; j++){ dot += dval[ j ] * x[ dcolind[j]+i*num_rows ]; } x[row+i*num_rows] += (b[row+i*num_rows]-dot) * d[row]; } } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-Ax) This kernel processes the thread blocks in reversed order. Arguments --------- @param[in] maxiter magma_int_t number of Jacobi iterations @param[in] A magma_d_matrix system matrix @param[in] t magma_d_matrix workspace @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobispmvupdate_bw( magma_int_t maxiter, magma_d_matrix A, magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix *x, magma_queue_t queue ) { // local variables //double c_zero = MAGMA_D_ZERO; //double c_one = MAGMA_D_ONE; dim3 grid( magma_ceildiv( t.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; for( magma_int_t i=0; i<maxiter; i++ ) { // distinct routines imply synchronization // magma_d_spmv( c_one, A, *x, c_zero, t, queue ); // t = A * x // djacobiupdate_kernel<<< grid, threads, 0, queue->cuda_stream()>>>( t.num_rows, t.num_cols, t.dval, b.dval, d.dval, x->dval ); // merged in one implies asynchronous update djacobispmvupdate_bw_kernel<<< grid, threads, 0, queue->cuda_stream()>>> ( t.num_rows, t.num_cols, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval ); } return MAGMA_SUCCESS; } __global__ void djacobispmvupdateselect_kernel( int num_rows, int num_cols, int num_updates, magma_index_t * indices, double * dval, magma_index_t * drowptr, magma_index_t * dcolind, double *t, double *b, double *d, double *x, double *y ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int j; if( idx<num_updates){ int row = indices[ idx ]; printf(" "); //if( row < num_rows ){ double dot = MAGMA_D_ZERO; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( int i=0; i<num_cols; i++){ for( j=start; j<end; j++){ dot += dval[ j ] * x[ dcolind[j]+i*num_rows ]; } x[row+i*num_rows] = x[row+i*num_rows] + (b[row+i*num_rows]-dot) * d[row]; //double add = (b[row+i*num_rows]-dot) * d[row]; //#if defined(PRECISION_s) //|| defined(PRECISION_d) // atomicAdd( x + row + i*num_rows, add ); //#endif // ( unsigned int* address, unsigned int val); //} } } } /** Purpose ------- Updates the iteration vector x for the Jacobi iteration according to x=x+d.*(b-Ax) This kernel allows for overlapping domains: the indices-array contains the locations that are updated. Locations may be repeated to simulate overlapping domains. Arguments --------- @param[in] maxiter magma_int_t number of Jacobi iterations @param[in] num_updates magma_int_t number of updates - length of the indices array @param[in] indices magma_index_t* indices, which entries of x to update @param[in] A magma_d_matrix system matrix @param[in] t magma_d_matrix workspace @param[in] b magma_d_matrix RHS b @param[in] d magma_d_matrix vector with diagonal entries @param[in] tmp magma_d_matrix workspace @param[out] x magma_d_matrix* iteration vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_djacobispmvupdateselect( magma_int_t maxiter, magma_int_t num_updates, magma_index_t *indices, magma_d_matrix A, magma_d_matrix t, magma_d_matrix b, magma_d_matrix d, magma_d_matrix tmp, magma_d_matrix *x, magma_queue_t queue ) { // local variables //double c_zero = MAGMA_D_ZERO //double c_one = MAGMA_D_ONE; //magma_d_matrix swp; dim3 grid( magma_ceildiv( num_updates, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; printf("num updates:%d %d %d\n", int(num_updates), int(threads), int(grid.x) ); for( magma_int_t i=0; i<maxiter; i++ ) { djacobispmvupdateselect_kernel<<< grid, threads, 0, queue->cuda_stream()>>> ( t.num_rows, t.num_cols, num_updates, indices, A.dval, A.drow, A.dcol, t.dval, b.dval, d.dval, x->dval, tmp.dval ); //swp.dval = x->dval; //x->dval = tmp.dval; //tmp.dval = swp.dval; } return MAGMA_SUCCESS; } __global__ void dftjacobicontractions_kernel( int num_rows, double * xkm2val, double * xkm1val, double * xkval, double * zval, double * cval ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx<num_rows ){ zval[idx] = MAGMA_D_MAKE( MAGMA_D_ABS( xkm1val[idx] - xkval[idx] ), 0.0); cval[ idx ] = MAGMA_D_MAKE( MAGMA_D_ABS( xkm2val[idx] - xkm1val[idx] ) / MAGMA_D_ABS( xkm1val[idx] - xkval[idx] ) ,0.0 ); } } /** Purpose ------- Computes the contraction coefficients c_i: c_i = z_i^{k-1} / z_i^{k} = | x_i^{k-1} - x_i^{k-2} | / | x_i^{k} - x_i^{k-1} | Arguments --------- @param[in] xkm2 magma_d_matrix vector x^{k-2} @param[in] xkm1 magma_d_matrix vector x^{k-2} @param[in] xk magma_d_matrix vector x^{k-2} @param[out] z magma_d_matrix* ratio @param[out] c magma_d_matrix* contraction coefficients @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dftjacobicontractions( magma_d_matrix xkm2, magma_d_matrix xkm1, magma_d_matrix xk, magma_d_matrix *z, magma_d_matrix *c, magma_queue_t queue ) { dim3 grid( magma_ceildiv( xk.num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; dftjacobicontractions_kernel<<< grid, threads, 0, queue->cuda_stream()>>> ( xkm2.num_rows, xkm2.dval, xkm1.dval, xk.dval, z->dval, c->dval ); return MAGMA_SUCCESS; } __global__ void dftjacobiupdatecheck_kernel( int num_rows, double delta, double * xold, double * xnew, double * zprev, double * cval, magma_int_t *flag_t, magma_int_t *flag_fp ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx<num_rows ){ double t1 = delta * MAGMA_D_ABS(cval[idx]); double vkv = 1.0; for( magma_int_t i=0; i<min( flag_fp[idx], 100 ); i++){ vkv = vkv*2; } double xold_l = xold[idx]; double xnew_l = xnew[idx]; double znew = MAGMA_D_MAKE( max( MAGMA_D_ABS( xold_l - xnew_l), 1e-15), 0.0 ); double znr = zprev[idx] / znew; double t2 = MAGMA_D_ABS( znr - cval[idx] ); //% evaluate fp-cond magma_int_t fpcond = 0; if( MAGMA_D_ABS(znr)>vkv ){ fpcond = 1; } // % combine t-cond and fp-cond + flag_t == 1 magma_int_t cond = 0; if( t2<t1 || (flag_t[idx]>0 && fpcond > 0 ) ){ cond = 1; } flag_fp[idx] = flag_fp[idx]+1; if( fpcond>0 ){ flag_fp[idx] = 0; } if( cond > 0 ){ flag_t[idx] = 0; zprev[idx] = znew; xold[idx] = xnew_l; } else { flag_t[idx] = 1; xnew[idx] = xold_l; } } } /** Purpose ------- Checks the Jacobi updates accorting to the condition in the ScaLA'15 paper. Arguments --------- @param[in] delta double threshold @param[in,out] xold magma_d_matrix* vector xold @param[in,out] xnew magma_d_matrix* vector xnew @param[in,out] zprev magma_d_matrix* vector z = | x_k-1 - x_k | @param[in] c magma_d_matrix contraction coefficients @param[in,out] flag_t magma_int_t threshold condition @param[in,out] flag_fp magma_int_t false positive condition @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dftjacobiupdatecheck( double delta, magma_d_matrix *xold, magma_d_matrix *xnew, magma_d_matrix *zprev, magma_d_matrix c, magma_int_t *flag_t, magma_int_t *flag_fp, magma_queue_t queue ) { dim3 grid( magma_ceildiv( xnew->num_rows, BLOCK_SIZE )); magma_int_t threads = BLOCK_SIZE; dftjacobiupdatecheck_kernel<<< grid, threads, 0, queue->cuda_stream()>>> ( xold->num_rows, delta, xold->dval, xnew->dval, zprev->dval, c.dval, flag_t, flag_fp ); return MAGMA_SUCCESS; }
85f2c2da890f6a78c319033a54a11ee76d65c85a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <cstring> // needed for memset #include <tune_quda.h> #include <typeinfo> #include <quda_internal.h> #include <float_vector.h> #include <blas_quda.h> #include <color_spinor_field.h> #include <color_spinor_field_order.h> #define checkSpinor(a, b) \ { \ if (a.Precision() != b.Precision()) \ errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \ if (a.Length() != b.Length()) \ errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \ if (a.Stride() != b.Stride()) \ errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \ } #define checkLength(a, b) \ { \ if (a.Length() != b.Length()) \ errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \ if (a.Stride() != b.Stride()) \ errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \ } namespace quda { namespace blas { #define BLAS_SPINOR // do not include ghost functions in Spinor class to reduce parameter space overhead #include <texture.h> unsigned long long flops; unsigned long long bytes; void zero(ColorSpinorField &a) { if (typeid(a) == typeid(cudaColorSpinorField)) { static_cast<cudaColorSpinorField&>(a).zero(); } else { static_cast<cpuColorSpinorField&>(a).zero(); } } static hipStream_t *blasStream; static struct { const char *vol_str; const char *aux_str; char aux_tmp[TuneKey::aux_n]; } blasStrings; void initReduce(); void endReduce(); void init() { blasStream = &streams[Nstream-1]; initReduce(); } void end(void) { endReduce(); } hipStream_t* getStream() { return blasStream; } #include <blas_core.cuh> #include <blas_core.h> #include <blas_mixed_core.h> template <typename Float2, typename FloatN> struct BlasFunctor { //! pre-computation routine before the main loop virtual __device__ __host__ void init() { ; } //! where the reduction is usually computed and any auxiliary operations virtual __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) = 0; }; /** Functor to perform the operation y = a*x + b*y */ template <typename Float2, typename FloatN> struct axpby_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; axpby_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y = a.x*x + b.x*y; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 3; } //! flops per element }; void axpby(const double &a, ColorSpinorField &x, const double &b, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpby_,0,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, x, x); } else { blasCuda<axpby_,0,1,0,0>(make_double2(a, 0.0), make_double2(b, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation y += x */ template <typename Float2, typename FloatN> struct xpy_ : public BlasFunctor<Float2,FloatN> { xpy_(const Float2 &a, const Float2 &b, const Float2 &c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += x ; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 1; } //! flops per element }; void xpy(ColorSpinorField &x, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { mixed::blasCuda<xpy_,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } else { blasCuda<xpy_,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation y += a*x */ template <typename Float2, typename FloatN> struct axpy_ : public BlasFunctor<Float2,FloatN> { const Float2 a; axpy_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y = a.x*x + y; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 2; } //! flops per element }; void axpy(const double &a, ColorSpinorField &x, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpy_,0,1,0,0>(make_double2(a,0.0), make_double2(1.0,0.0), make_double2(0.0,0.0), x, y, x, x); } else { blasCuda<axpy_,0,1,0,0>(make_double2(a, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation z = x + a*y */ template <typename Float2, typename FloatN> struct xpayz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; xpayz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { z = x + a.x*y; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 2; } //! flops per element }; void xpay(ColorSpinorField &x, const double &a, ColorSpinorField &y) { blasCuda<xpayz_,0,0,1,0>(make_double2(a,0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, y, x); } void xpayz(ColorSpinorField &x, const double &a, ColorSpinorField &y, ColorSpinorField &z) { blasCuda<xpayz_,0,0,1,0>(make_double2(a,0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x); } /** Functor to perform the operation y -= x; */ template <typename Float2, typename FloatN> struct mxpy_ : public BlasFunctor<Float2,FloatN> { mxpy_(const Float2 &a, const Float2 &b, const Float2 &c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y -= x; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 1; } //! flops per element }; void mxpy(ColorSpinorField &x, ColorSpinorField &y) { blasCuda<mxpy_,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } /** Functor to perform the operation x *= a */ template <typename Float2, typename FloatN> struct ax_ : public BlasFunctor<Float2,FloatN> { const Float2 a; ax_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { x *= a.x; } static int streams() { return 2; } //! total number of input and output streams static int flops() { return 1; } //! flops per element }; void ax(const double &a, ColorSpinorField &x) { blasCuda<ax_,1,0,0,0>(make_double2(a, 0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, x, x, x); } /** Functor to perform the operation y += a * x (complex-valued) */ __device__ __host__ void _caxpy(const float2 &a, const float4 &x, float4 &y) { y.x += a.x*x.x; y.x -= a.y*x.y; y.y += a.y*x.x; y.y += a.x*x.y; y.z += a.x*x.z; y.z -= a.y*x.w; y.w += a.y*x.z; y.w += a.x*x.w; } __device__ __host__ void _caxpy(const float2 &a, const float2 &x, float2 &y) { y.x += a.x*x.x; y.x -= a.y*x.y; y.y += a.y*x.x; y.y += a.x*x.y; } __device__ __host__ void _caxpy(const double2 &a, const double2 &x, double2 &y) { y.x += a.x*x.x; y.x -= a.y*x.y; y.y += a.y*x.x; y.y += a.x*x.y; } template <typename Float2, typename FloatN> struct caxpy_ : public BlasFunctor<Float2,FloatN> { const Float2 a; caxpy_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; void caxpy(const Complex &a, ColorSpinorField &x, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { mixed::blasCuda<caxpy_,0,1,0,0>(make_double2(real(a),imag(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } else { blasCuda<caxpy_,0,1,0,0>(make_double2(real(a),imag(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation y = a*x + b*y (complex-valued) */ __device__ __host__ void _caxpby(const float2 &a, const float4 &x, const float2 &b, float4 &y) { float4 yy; yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y; yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y; yy.z = a.x*x.z; yy.z -= a.y*x.w; yy.z += b.x*y.z; yy.z -= b.y*y.w; yy.w = a.y*x.z; yy.w += a.x*x.w; yy.w += b.y*y.z; yy.w += b.x*y.w; y = yy; } __device__ __host__ void _caxpby(const float2 &a, const float2 &x, const float2 &b, float2 &y) { float2 yy; yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y; yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y; y = yy; } __device__ __host__ void _caxpby(const double2 &a, const double2 &x, const double2 &b, double2 &y) { double2 yy; yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y; yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y; y = yy; } template <typename Float2, typename FloatN> struct caxpby_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpby_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpby(a, x, b, y); } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 7; } //! flops per element }; void caxpby(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y) { blasCuda<caxpby_,0,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0, 0.0), x, y, x, x); } /** Functor to performs the operation z[i] = x[i] + a*y[i] + b*z[i] */ __device__ __host__ void _cxpaypbz(const float4 &x, const float2 &a, const float4 &y, const float2 &b, float4 &z) { float4 zz; zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y; zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y; zz.z = x.z + a.x*y.z; zz.z -= a.y*y.w; zz.z += b.x*z.z; zz.z -= b.y*z.w; zz.w = x.w + a.y*y.z; zz.w += a.x*y.w; zz.w += b.y*z.z; zz.w += b.x*z.w; z = zz; } __device__ __host__ void _cxpaypbz(const float2 &x, const float2 &a, const float2 &y, const float2 &b, float2 &z) { float2 zz; zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y; zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y; z = zz; } __device__ __host__ void _cxpaypbz(const double2 &x, const double2 &a, const double2 &y, const double2 &b, double2 &z) { double2 zz; zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y; zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y; z = zz; } template <typename Float2, typename FloatN> struct cxpaypbz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; cxpaypbz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _cxpaypbz(x, a, y, b, z); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void cxpaypbz(ColorSpinorField &x, const Complex &a, ColorSpinorField &y, const Complex &b, ColorSpinorField &z) { blasCuda<cxpaypbz_,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0, 0.0), x, y, z, z); } /** Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i] */ template <typename Float2, typename FloatN> struct axpyBzpcx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; const Float2 c; axpyBzpcx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += a.x*x; x = b.x*z + c.x*x; } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 5; } //! flops per element }; void axpyBzpcx(const double &a, ColorSpinorField& x, ColorSpinorField& y, const double &b, ColorSpinorField& z, const double &c) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpyBzpcx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(c,0.0), x, y, z, x); } else { // swap arguments around blasCuda<axpyBzpcx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(c,0.0), x, y, z, x); } } /** Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i] */ template <typename Float2, typename FloatN> struct axpyZpbx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; axpyZpbx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += a.x*x; x = z + b.x*x; } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; void axpyZpbx(const double &a, ColorSpinorField& x, ColorSpinorField& y, ColorSpinorField& z, const double &b) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpyZpbx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, z, x); } else { // swap arguments around blasCuda<axpyZpbx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, z, x); } } /** Functor performing the operations y[i] = a*x[i] + y[i] and x[i] = b*z[i] + x[i] */ template <typename Float2, typename FloatN> struct caxpyBzpx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpyBzpx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(b, z, x); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyBzpx(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, const Complex &b, ColorSpinorField &z) { if (x.Precision() != y.Precision()) { mixed::blasCuda<caxpyBzpx_,1,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } else { blasCuda<caxpyBzpx_,1,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } } /** Functor performing the operations y[i] = a*x[i] + y[i] and z[i] = b*x[i] + z[i] */ template <typename Float2, typename FloatN> struct caxpyBxpz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpyBxpz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(b, x, z); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyBxpz(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, const Complex &b, ColorSpinorField &z) { if (x.Precision() != y.Precision()) { mixed::blasCuda<caxpyBxpz_,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } else { blasCuda<caxpyBxpz_,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } } /** Functor performing the operations z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i] */ template <typename Float2, typename FloatN> struct caxpbypzYmbw_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpbypzYmbw_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, z); _caxpy(b, y, z); _caxpy(-b, w, y); } static int streams() { return 6; } //! total number of input and output streams static int flops() { return 12; } //! flops per element }; void caxpbypzYmbw(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y, ColorSpinorField &z, ColorSpinorField &w) { blasCuda<caxpbypzYmbw_,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, w); } /** Functor performing the operation y[i] += a*b*x[i], x[i] *= a */ template <typename Float2, typename FloatN> struct cabxpyAx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; cabxpyAx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { x *= a.x; _caxpy(b, x, y); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 5; } //! flops per element }; void cabxpyAx(const double &a, const Complex &b, ColorSpinorField &x, ColorSpinorField &y) { // swap arguments around blasCuda<cabxpyAx_,1,1,0,0>(make_double2(a,0.0), make_double2(REAL(b),IMAG(b)), make_double2(0.0,0.0), x, y, x, x); } /** Functor performing the operation z[i] = a*x[i] + b*y[i] + z[i] */ template <typename Float2, typename FloatN> struct caxpbypz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpbypz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, z); _caxpy(b, y, z); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpbypz(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y, ColorSpinorField &z) { blasCuda<caxpbypz_,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)), make_double2(0.0,0.0), x, y, z, z); } /** Functor Performing the operation w[i] = a*x[i] + b*y[i] + c*z[i] + w[i] */ template <typename Float2, typename FloatN> struct caxpbypczpw_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; const Float2 c; caxpbypczpw_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, w); _caxpy(b, y, w); _caxpy(c, z, w); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 12; } //! flops per element }; void caxpbypczpw(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y, const Complex &c, ColorSpinorField &z, ColorSpinorField &w) { blasCuda<caxpbypczpw_,0,0,0,1>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)), make_double2(REAL(c),IMAG(c)), x, y, z, w); } /** double caxpyXmaz(c a, V x, V y, V z){} First performs the operation y[i] += a*x[i] Second performs the operator x[i] -= a*z[i] */ template <typename Float2, typename FloatN> struct caxpyxmaz_ : public BlasFunctor<Float2,FloatN> { Float2 a; caxpyxmaz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(-a, z, x); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyXmaz(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { blasCuda<caxpyxmaz_,1,1,0,0>(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x); } /** double caxpyXmazMR(c a, V x, V y, V z){} First performs the operation y[i] += a*x[i] Second performs the operator x[i] -= a*z[i] */ template <typename Float2, typename FloatN> struct caxpyxmazMR_ : public BlasFunctor<Float2,FloatN> { Float2 a; double3 *Ar3; caxpyxmazMR_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), Ar3(static_cast<double3*>(blas::getDeviceReduceBuffer())) { ; } inline __device__ __host__ void init() { #ifdef __CUDA_ARCH__ typedef decltype(a.x) real; double3 result = __ldg(Ar3); a.y = a.x * (real)(result.y) * ((real)1.0 / (real)result.z); a.x = a.x * (real)(result.x) * ((real)1.0 / (real)result.z); #endif } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(-a, z, x); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyXmazMR(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { if (!commAsyncReduction()) errorQuda("This kernel requires asynchronous reductions to be set"); if (x.Location() == QUDA_CPU_FIELD_LOCATION) errorQuda("This kernel cannot be run on CPU fields"); blasCuda<caxpyxmazMR_,1,1,0,0>(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x); } /** double tripleCGUpdate(d a, d b, V x, V y, V z, V w){} First performs the operation y[i] = y[i] + a*w[i] Second performs the operation z[i] = z[i] - a*x[i] Third performs the operation w[i] = z[i] + b*w[i] */ template <typename Float2, typename FloatN> struct tripleCGUpdate_ : public BlasFunctor<Float2,FloatN> { Float2 a, b; tripleCGUpdate_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += a.x*w; z -= a.x*x; w = z + b.x*w; } static int streams() { return 7; } //! total number of input and output streams static int flops() { return 6; } //! flops per element }; void tripleCGUpdate(const double &a, const double &b, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z, ColorSpinorField &w) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<tripleCGUpdate_,0,1,1,1>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, z, w); } else { blasCuda<tripleCGUpdate_,0,1,1,1>(make_double2(a, 0.0), make_double2(b, 0.0), make_double2(0.0, 0.0), x, y, z, w); } } } // namespace blas } // namespace quda
85f2c2da890f6a78c319033a54a11ee76d65c85a.cu
#include <stdlib.h> #include <stdio.h> #include <cstring> // needed for memset #include <tune_quda.h> #include <typeinfo> #include <quda_internal.h> #include <float_vector.h> #include <blas_quda.h> #include <color_spinor_field.h> #include <color_spinor_field_order.h> #define checkSpinor(a, b) \ { \ if (a.Precision() != b.Precision()) \ errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \ if (a.Length() != b.Length()) \ errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \ if (a.Stride() != b.Stride()) \ errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \ } #define checkLength(a, b) \ { \ if (a.Length() != b.Length()) \ errorQuda("lengths do not match: %lu %lu", a.Length(), b.Length()); \ if (a.Stride() != b.Stride()) \ errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \ } namespace quda { namespace blas { #define BLAS_SPINOR // do not include ghost functions in Spinor class to reduce parameter space overhead #include <texture.h> unsigned long long flops; unsigned long long bytes; void zero(ColorSpinorField &a) { if (typeid(a) == typeid(cudaColorSpinorField)) { static_cast<cudaColorSpinorField&>(a).zero(); } else { static_cast<cpuColorSpinorField&>(a).zero(); } } static cudaStream_t *blasStream; static struct { const char *vol_str; const char *aux_str; char aux_tmp[TuneKey::aux_n]; } blasStrings; void initReduce(); void endReduce(); void init() { blasStream = &streams[Nstream-1]; initReduce(); } void end(void) { endReduce(); } cudaStream_t* getStream() { return blasStream; } #include <blas_core.cuh> #include <blas_core.h> #include <blas_mixed_core.h> template <typename Float2, typename FloatN> struct BlasFunctor { //! pre-computation routine before the main loop virtual __device__ __host__ void init() { ; } //! where the reduction is usually computed and any auxiliary operations virtual __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) = 0; }; /** Functor to perform the operation y = a*x + b*y */ template <typename Float2, typename FloatN> struct axpby_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; axpby_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y = a.x*x + b.x*y; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 3; } //! flops per element }; void axpby(const double &a, ColorSpinorField &x, const double &b, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpby_,0,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, x, x); } else { blasCuda<axpby_,0,1,0,0>(make_double2(a, 0.0), make_double2(b, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation y += x */ template <typename Float2, typename FloatN> struct xpy_ : public BlasFunctor<Float2,FloatN> { xpy_(const Float2 &a, const Float2 &b, const Float2 &c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += x ; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 1; } //! flops per element }; void xpy(ColorSpinorField &x, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { mixed::blasCuda<xpy_,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } else { blasCuda<xpy_,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation y += a*x */ template <typename Float2, typename FloatN> struct axpy_ : public BlasFunctor<Float2,FloatN> { const Float2 a; axpy_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y = a.x*x + y; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 2; } //! flops per element }; void axpy(const double &a, ColorSpinorField &x, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpy_,0,1,0,0>(make_double2(a,0.0), make_double2(1.0,0.0), make_double2(0.0,0.0), x, y, x, x); } else { blasCuda<axpy_,0,1,0,0>(make_double2(a, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation z = x + a*y */ template <typename Float2, typename FloatN> struct xpayz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; xpayz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { z = x + a.x*y; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 2; } //! flops per element }; void xpay(ColorSpinorField &x, const double &a, ColorSpinorField &y) { blasCuda<xpayz_,0,0,1,0>(make_double2(a,0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, y, x); } void xpayz(ColorSpinorField &x, const double &a, ColorSpinorField &y, ColorSpinorField &z) { blasCuda<xpayz_,0,0,1,0>(make_double2(a,0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x); } /** Functor to perform the operation y -= x; */ template <typename Float2, typename FloatN> struct mxpy_ : public BlasFunctor<Float2,FloatN> { mxpy_(const Float2 &a, const Float2 &b, const Float2 &c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y -= x; } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 1; } //! flops per element }; void mxpy(ColorSpinorField &x, ColorSpinorField &y) { blasCuda<mxpy_,0,1,0,0>(make_double2(1.0, 0.0), make_double2(1.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } /** Functor to perform the operation x *= a */ template <typename Float2, typename FloatN> struct ax_ : public BlasFunctor<Float2,FloatN> { const Float2 a; ax_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { x *= a.x; } static int streams() { return 2; } //! total number of input and output streams static int flops() { return 1; } //! flops per element }; void ax(const double &a, ColorSpinorField &x) { blasCuda<ax_,1,0,0,0>(make_double2(a, 0.0), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, x, x, x); } /** Functor to perform the operation y += a * x (complex-valued) */ __device__ __host__ void _caxpy(const float2 &a, const float4 &x, float4 &y) { y.x += a.x*x.x; y.x -= a.y*x.y; y.y += a.y*x.x; y.y += a.x*x.y; y.z += a.x*x.z; y.z -= a.y*x.w; y.w += a.y*x.z; y.w += a.x*x.w; } __device__ __host__ void _caxpy(const float2 &a, const float2 &x, float2 &y) { y.x += a.x*x.x; y.x -= a.y*x.y; y.y += a.y*x.x; y.y += a.x*x.y; } __device__ __host__ void _caxpy(const double2 &a, const double2 &x, double2 &y) { y.x += a.x*x.x; y.x -= a.y*x.y; y.y += a.y*x.x; y.y += a.x*x.y; } template <typename Float2, typename FloatN> struct caxpy_ : public BlasFunctor<Float2,FloatN> { const Float2 a; caxpy_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; void caxpy(const Complex &a, ColorSpinorField &x, ColorSpinorField &y) { if (x.Precision() != y.Precision()) { mixed::blasCuda<caxpy_,0,1,0,0>(make_double2(real(a),imag(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } else { blasCuda<caxpy_,0,1,0,0>(make_double2(real(a),imag(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, x, x); } } /** Functor to perform the operation y = a*x + b*y (complex-valued) */ __device__ __host__ void _caxpby(const float2 &a, const float4 &x, const float2 &b, float4 &y) { float4 yy; yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y; yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y; yy.z = a.x*x.z; yy.z -= a.y*x.w; yy.z += b.x*y.z; yy.z -= b.y*y.w; yy.w = a.y*x.z; yy.w += a.x*x.w; yy.w += b.y*y.z; yy.w += b.x*y.w; y = yy; } __device__ __host__ void _caxpby(const float2 &a, const float2 &x, const float2 &b, float2 &y) { float2 yy; yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y; yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y; y = yy; } __device__ __host__ void _caxpby(const double2 &a, const double2 &x, const double2 &b, double2 &y) { double2 yy; yy.x = a.x*x.x; yy.x -= a.y*x.y; yy.x += b.x*y.x; yy.x -= b.y*y.y; yy.y = a.y*x.x; yy.y += a.x*x.y; yy.y += b.y*y.x; yy.y += b.x*y.y; y = yy; } template <typename Float2, typename FloatN> struct caxpby_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpby_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpby(a, x, b, y); } static int streams() { return 3; } //! total number of input and output streams static int flops() { return 7; } //! flops per element }; void caxpby(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y) { blasCuda<caxpby_,0,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0, 0.0), x, y, x, x); } /** Functor to performs the operation z[i] = x[i] + a*y[i] + b*z[i] */ __device__ __host__ void _cxpaypbz(const float4 &x, const float2 &a, const float4 &y, const float2 &b, float4 &z) { float4 zz; zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y; zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y; zz.z = x.z + a.x*y.z; zz.z -= a.y*y.w; zz.z += b.x*z.z; zz.z -= b.y*z.w; zz.w = x.w + a.y*y.z; zz.w += a.x*y.w; zz.w += b.y*z.z; zz.w += b.x*z.w; z = zz; } __device__ __host__ void _cxpaypbz(const float2 &x, const float2 &a, const float2 &y, const float2 &b, float2 &z) { float2 zz; zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y; zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y; z = zz; } __device__ __host__ void _cxpaypbz(const double2 &x, const double2 &a, const double2 &y, const double2 &b, double2 &z) { double2 zz; zz.x = x.x + a.x*y.x; zz.x -= a.y*y.y; zz.x += b.x*z.x; zz.x -= b.y*z.y; zz.y = x.y + a.y*y.x; zz.y += a.x*y.y; zz.y += b.y*z.x; zz.y += b.x*z.y; z = zz; } template <typename Float2, typename FloatN> struct cxpaypbz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; cxpaypbz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _cxpaypbz(x, a, y, b, z); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void cxpaypbz(ColorSpinorField &x, const Complex &a, ColorSpinorField &y, const Complex &b, ColorSpinorField &z) { blasCuda<cxpaypbz_,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0, 0.0), x, y, z, z); } /** Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i] */ template <typename Float2, typename FloatN> struct axpyBzpcx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; const Float2 c; axpyBzpcx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += a.x*x; x = b.x*z + c.x*x; } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 5; } //! flops per element }; void axpyBzpcx(const double &a, ColorSpinorField& x, ColorSpinorField& y, const double &b, ColorSpinorField& z, const double &c) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpyBzpcx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(c,0.0), x, y, z, x); } else { // swap arguments around blasCuda<axpyBzpcx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(c,0.0), x, y, z, x); } } /** Functor performing the operations: y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i] */ template <typename Float2, typename FloatN> struct axpyZpbx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; axpyZpbx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += a.x*x; x = z + b.x*x; } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 4; } //! flops per element }; void axpyZpbx(const double &a, ColorSpinorField& x, ColorSpinorField& y, ColorSpinorField& z, const double &b) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<axpyZpbx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, z, x); } else { // swap arguments around blasCuda<axpyZpbx_,1,1,0,0>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, z, x); } } /** Functor performing the operations y[i] = a*x[i] + y[i] and x[i] = b*z[i] + x[i] */ template <typename Float2, typename FloatN> struct caxpyBzpx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpyBzpx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(b, z, x); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyBzpx(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, const Complex &b, ColorSpinorField &z) { if (x.Precision() != y.Precision()) { mixed::blasCuda<caxpyBzpx_,1,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } else { blasCuda<caxpyBzpx_,1,1,0,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } } /** Functor performing the operations y[i] = a*x[i] + y[i] and z[i] = b*x[i] + z[i] */ template <typename Float2, typename FloatN> struct caxpyBxpz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpyBxpz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(b, x, z); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyBxpz(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, const Complex &b, ColorSpinorField &z) { if (x.Precision() != y.Precision()) { mixed::blasCuda<caxpyBxpz_,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } else { blasCuda<caxpyBxpz_,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, x); } } /** Functor performing the operations z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i] */ template <typename Float2, typename FloatN> struct caxpbypzYmbw_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpbypzYmbw_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, z); _caxpy(b, y, z); _caxpy(-b, w, y); } static int streams() { return 6; } //! total number of input and output streams static int flops() { return 12; } //! flops per element }; void caxpbypzYmbw(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y, ColorSpinorField &z, ColorSpinorField &w) { blasCuda<caxpbypzYmbw_,0,1,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b), IMAG(b)), make_double2(0.0,0.0), x, y, z, w); } /** Functor performing the operation y[i] += a*b*x[i], x[i] *= a */ template <typename Float2, typename FloatN> struct cabxpyAx_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; cabxpyAx_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { x *= a.x; _caxpy(b, x, y); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 5; } //! flops per element }; void cabxpyAx(const double &a, const Complex &b, ColorSpinorField &x, ColorSpinorField &y) { // swap arguments around blasCuda<cabxpyAx_,1,1,0,0>(make_double2(a,0.0), make_double2(REAL(b),IMAG(b)), make_double2(0.0,0.0), x, y, x, x); } /** Functor performing the operation z[i] = a*x[i] + b*y[i] + z[i] */ template <typename Float2, typename FloatN> struct caxpbypz_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; caxpbypz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, z); _caxpy(b, y, z); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpbypz(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y, ColorSpinorField &z) { blasCuda<caxpbypz_,0,0,1,0>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)), make_double2(0.0,0.0), x, y, z, z); } /** Functor Performing the operation w[i] = a*x[i] + b*y[i] + c*z[i] + w[i] */ template <typename Float2, typename FloatN> struct caxpbypczpw_ : public BlasFunctor<Float2,FloatN> { const Float2 a; const Float2 b; const Float2 c; caxpbypczpw_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b), c(c) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, w); _caxpy(b, y, w); _caxpy(c, z, w); } static int streams() { return 4; } //! total number of input and output streams static int flops() { return 12; } //! flops per element }; void caxpbypczpw(const Complex &a, ColorSpinorField &x, const Complex &b, ColorSpinorField &y, const Complex &c, ColorSpinorField &z, ColorSpinorField &w) { blasCuda<caxpbypczpw_,0,0,0,1>(make_double2(REAL(a),IMAG(a)), make_double2(REAL(b),IMAG(b)), make_double2(REAL(c),IMAG(c)), x, y, z, w); } /** double caxpyXmaz(c a, V x, V y, V z){} First performs the operation y[i] += a*x[i] Second performs the operator x[i] -= a*z[i] */ template <typename Float2, typename FloatN> struct caxpyxmaz_ : public BlasFunctor<Float2,FloatN> { Float2 a; caxpyxmaz_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(-a, z, x); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyXmaz(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { blasCuda<caxpyxmaz_,1,1,0,0>(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x); } /** double caxpyXmazMR(c a, V x, V y, V z){} First performs the operation y[i] += a*x[i] Second performs the operator x[i] -= a*z[i] */ template <typename Float2, typename FloatN> struct caxpyxmazMR_ : public BlasFunctor<Float2,FloatN> { Float2 a; double3 *Ar3; caxpyxmazMR_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), Ar3(static_cast<double3*>(blas::getDeviceReduceBuffer())) { ; } inline __device__ __host__ void init() { #ifdef __CUDA_ARCH__ typedef decltype(a.x) real; double3 result = __ldg(Ar3); a.y = a.x * (real)(result.y) * ((real)1.0 / (real)result.z); a.x = a.x * (real)(result.x) * ((real)1.0 / (real)result.z); #endif } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { _caxpy(a, x, y); _caxpy(-a, z, x); } static int streams() { return 5; } //! total number of input and output streams static int flops() { return 8; } //! flops per element }; void caxpyXmazMR(const Complex &a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { if (!commAsyncReduction()) errorQuda("This kernel requires asynchronous reductions to be set"); if (x.Location() == QUDA_CPU_FIELD_LOCATION) errorQuda("This kernel cannot be run on CPU fields"); blasCuda<caxpyxmazMR_,1,1,0,0>(make_double2(REAL(a), IMAG(a)), make_double2(0.0, 0.0), make_double2(0.0, 0.0), x, y, z, x); } /** double tripleCGUpdate(d a, d b, V x, V y, V z, V w){} First performs the operation y[i] = y[i] + a*w[i] Second performs the operation z[i] = z[i] - a*x[i] Third performs the operation w[i] = z[i] + b*w[i] */ template <typename Float2, typename FloatN> struct tripleCGUpdate_ : public BlasFunctor<Float2,FloatN> { Float2 a, b; tripleCGUpdate_(const Float2 &a, const Float2 &b, const Float2 &c) : a(a), b(b) { ; } __device__ __host__ void operator()(FloatN &x, FloatN &y, FloatN &z, FloatN &w) { y += a.x*w; z -= a.x*x; w = z + b.x*w; } static int streams() { return 7; } //! total number of input and output streams static int flops() { return 6; } //! flops per element }; void tripleCGUpdate(const double &a, const double &b, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z, ColorSpinorField &w) { if (x.Precision() != y.Precision()) { // call hacked mixed precision kernel mixed::blasCuda<tripleCGUpdate_,0,1,1,1>(make_double2(a,0.0), make_double2(b,0.0), make_double2(0.0,0.0), x, y, z, w); } else { blasCuda<tripleCGUpdate_,0,1,1,1>(make_double2(a, 0.0), make_double2(b, 0.0), make_double2(0.0, 0.0), x, y, z, w); } } } // namespace blas } // namespace quda
df4f3f041c2ab5d2a5c6ef2cc51111ba9f8bb8b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <list> #include <algorithm> #include <iostream> #include <cstdlib> #include <chrono> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> using namespace std; using namespace std::chrono; #define SQUARE_SIDE_SIZE 8 #define WALL_PERCENTAGE 0.2 //To avoid no solution, max = 0.4 #define x_start 0 //min= 0, max = SQUARE_SIDE_SIZE-1 #define y_start 0 //min= 0, max = SQUARE_SIDE_SIZE-1 #define x_end 3 //min= 0, max = SQUARE_SIDE_SIZE-1 #define y_end 8 //min= 0, max = SQUARE_SIDE_SIZE-1 class point { public: point( int a = 0, int b = 0 ) { x = a; y = b; } bool operator ==( const point& o ) { return o.x == x && o.y == y; } point operator +( const point& o ) { return point( o.x + x, o.y + y ); } int x, y; }; class map { public: map() { float current_random_value; w = h = SQUARE_SIDE_SIZE; for( int r = 0; r < h; r++ ) for( int s = 0; s < w; s++ ){ if( !( (s ==x_start && r == y_start) || (s == x_end && r == y_end) )){ current_random_value = rand()/(float)RAND_MAX; m[s][r] = current_random_value < WALL_PERCENTAGE ? 1 : 0; } else m[s][r] = 0; // cout << "m[" << s << "][" << r <<"] = " << m[s][r] << endl; } // cout << endl; } int operator() ( int x, int y ) { return m[x][y]; } int m[SQUARE_SIDE_SIZE][SQUARE_SIDE_SIZE]; int w, h; }; class node { public: bool operator == (const node& o ) { return pos == o.pos; } bool operator == (const point& o ) { return pos == o; } bool operator < (const node& o ) { return dist + cost < o.dist + o.cost; } point pos, parent; int dist, cost; }; //Fonction called from the GPU and executed by the GPU //-------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------- __device__ bool isValid( point& p ) { return ( p.x >-1 && p.y > -1 && p.x < SQUARE_SIDE_SIZE && p.y < SQUARE_SIDE_SIZE ); } __device__ int dev_calcDist( point& p, point& dev_end){ // need a better heuristic int x = dev_end.x - p.x, y = dev_end.y - p.y; return( x * x + y * y ); } //If we don't find a node with a cheaper path to the same point then we erase the old one and we return true else we return false and we forget the new path __device__ bool existPoint( point& p, int cost, list<node> dev_closed, list<node> dev_open) { list<node>::iterator i; i = thrust::find( dev_closed.begin(), dev_closed.end(), p ); if( i != dev_closed.end() ) { if( ( *i ).cost + ( *i ).dist < cost ) return true; else { dev_closed.erase( i ); return false; } } i = thrust::find( dev_open.begin(), dev_open.end(), p ); if( i != dev_open.end() ) { if( ( *i ).cost + ( *i ).dist < cost ) return true; else { dev_open.erase( i ); return false; } } return false; } //-------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------- class aStar { public: aStar() { neighbours[0] = point( -1, -1 ); neighbours[1] = point( 1, -1 ); neighbours[2] = point( -1, 1 ); neighbours[3] = point( 1, 1 ); neighbours[4] = point( 0, -1 ); neighbours[5] = point( -1, 0 ); neighbours[6] = point( 0, 1 ); neighbours[7] = point( 1, 0 ); } int calcDist( point& p ){ // need a better heuristic int x = end.x - p.x, y = end.y - p.y; return( x * x + y * y ); } __global__ void fillOpen(node* dev_n, point* dev_neighbours, int* dev_map, bool* dev_found, list<node> dev_open, list<node> dev_closed) { int stepCost, nc, dist; point neighbour; int i = threadIdx.x + blockIdx.x * blockDim.x; //We investigate all neighbours // one can make diagonals have different cost stepCost = i < 4 ? 1 : 1; //The variable neigbours has the direct neighbours from index 0 to 3 and the diagonal neighbours from index 4 to 7 neighbour = dev_n->pos + dev_neighbours[i]; //The variable neighbours contains the relative moves from the current position to find the neighbours if( neighbour == end ) *dev_found = true; if( isValid( neighbour ) && dev_map[neighbour.x, neighbour.y] != 1 ) { //Here we inspect the new position if the position is in the map and the position isn't a wall nc = stepCost + dev_n->cost; dist = calcDist( neighbour ); if( !existPoint( neighbour, nc + dist , dev_open, dev_closed) ) { //If we don't have any path to the same point in open or closed where the cost is cheaper, we create a new node in open node m; m.cost = nc; m.dist = dist; m.pos = neighbour; m.parent = dev_n->pos; dev_open.push_back( m ); } } *dev_found = false; } /* You specify a beginning point, an end point, and a map where you want to find the cheapest way. It initializes all attributes from the object astar to keep these data in mind. We create the first node with parent 0 and current_pos the first position with a cost of zero. */ bool search( point& s, point& e, map& mp ) { //Allocate memory in the GPU point* dev_neighbours; point* dev_end; node* dev_n; int* dev_map; bool* host_found; bool* dev_found; list<node>* dev_open; list<node>* dev_closed; hipMalloc( (void**)&dev_neighbours, 8*sizeof(point) ); //Declare the neighbours variable for the GPU hipMalloc( (void**)&dev_end, sizeof(point) ); //Declare the end point for the GPU hipMalloc( (void**)&dev_map, SQUARE_SIDE_SIZE*SQUARE_SIDE_SIZE*sizeof(point) ); //Declare the end point for the GPU //Declare the current node that will be processed by the GPU hipMalloc( (void**)&dev_n, sizeof(node) ); //Declare the bool result in the GPU that is needed for our stop condition hipMalloc( (void**)&dev_found, sizeof(bool) ) //Copy values in the GPU's memory hipMemcpy( dev_end, &e, sizeof(point), hipMemcpyHostToDevice ); hipMemcpy( dev_neighbours, neighbours, 8*sizeof(point), hipMemcpyHostToDevice ); hipMemcpy( dev_map, mp.m, SQUARE_SIDE_SIZE*SQUARE_SIDE_SIZE*sizeof(int), hipMemcpyHostToDevice ); node n; end = e; start = s; m = mp; n.cost = 0; n.pos = s; n.parent = 0; n.dist = calcDist( s ); open.push_back( n ); while( !open.empty() ) { //Search stops when all nodes are closed, it means all ways have been inverstigated //open.sort(); node n = open.front(); //FIFO research open.pop_front(); //As we investigated the node, we can consider it closed (i.e. investigated) closed.push_back( n ); //So we fill the node in closed to keep it in memory //We update the value of the current node in the GPU's memory hipMemcpy( dev_n, &n, sizeof(node), hipMemcpyHostToDevice ); //Create device open and close list hipMalloc( (void**)&dev_open, open.size()*sizeof(node) ); hipMalloc( (void**)&dev_closed, closed.size()*sizeof(node) ); hipMemcpy( dev_open, open, open.size()*sizeof(node), hipMemcpyHostToDevice ); hipMemcpy( dev_closed, closed, closed.size()*sizeof(node), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( fillOpen), dim3(1),dim3(8), 0, 0, dev_n, dev_neighbours, dev_map, dev_found, dev_open, dev_closed); //We update CPU's open and closed lists using the one that were modified by the kernel hipMemcpy( open, dev_open, dev_open.size()*sizeof(node), hipMemcpyDeviceToHost ); hipMemcpy( closed, dev_closed, dev_closed.size()*sizeof(node), hipMemcpyDeviceToHost ); //We free GPU's open and closed lists hipFree(dev_open); hipFree(dev_closed); hipMemcpy( host_found, dev_found, sizeof(bool), hipMemcpyDeviceToHost ); if( *host_found ){ //Free GPU's memory hipFree(dev_found); hipFree(dev_n); hipFree(dev_end); hipFree(dev_neighbours); hipFree(dev_map); return true; } } //Free GPU's memory hipFree(dev_n); hipFree(dev_found); hipFree(dev_end); hipFree(dev_neighbours); hipFree(dev_map); return false; } /* Recreate the path from the closed list containing all the nodes that leads to the solution */ int path( list<point>& path ) { path.push_front( end ); //We last nodes first so at the end, the path list will be in the right order int cost = 1 + closed.back().cost; //We consider the last move to the end to cost 1 ???? path.push_front( closed.back().pos ); point parent = closed.back().parent; for( list<node>::reverse_iterator i = closed.rbegin(); i != closed.rend(); i++ ) { //We go through the entire close node list till we reach the start point if( ( *i ).pos == parent && !( ( *i ).pos == start ) ) { path.push_front( ( *i ).pos ); parent = ( *i ).parent; } } path.push_front( start ); return cost; } map m; point end, start; point neighbours[8]; list<node> open; list<node> closed; }; int main( int argc, char* argv[] ) { map m; point s(x_start,y_start), e(x_end,y_end); //s is the start e is the end aStar as; //Start point to measure executions time auto start = high_resolution_clock::now(); if( as.search( s, e, m ) ) { list<point> path; int c = as.path( path ); for( int y = -1; y < SQUARE_SIDE_SIZE+1; y++ ) { for( int x = -1; x < SQUARE_SIDE_SIZE+1; x++ ) { if( x < 0 || y < 0 || x > SQUARE_SIDE_SIZE-1 || y > SQUARE_SIDE_SIZE-1 || m( x, y ) == 1 ) cout << "w"; else { if( find( path.begin(), path.end(), point( x, y ) )!= path.end() ) cout << "x"; else cout << "."; } } cout << "\n"; } cout << "\nPath cost " << c << ": "; for( list<point>::iterator i = path.begin(); i != path.end(); i++ ) { cout<< "(" << ( *i ).x << ", " << ( *i ).y << ") "; } } cout << "\n\n"; // Stop point to measure executions time auto stop = high_resolution_clock::now(); // Display execution time auto duration = duration_cast<microseconds>(stop - start); cout << "CPU execution time = " << duration.count() << " microseconds" <<endl; return 0; }
df4f3f041c2ab5d2a5c6ef2cc51111ba9f8bb8b4.cu
#include <list> #include <algorithm> #include <iostream> #include <cstdlib> #include <chrono> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> using namespace std; using namespace std::chrono; #define SQUARE_SIDE_SIZE 8 #define WALL_PERCENTAGE 0.2 //To avoid no solution, max = 0.4 #define x_start 0 //min= 0, max = SQUARE_SIDE_SIZE-1 #define y_start 0 //min= 0, max = SQUARE_SIDE_SIZE-1 #define x_end 3 //min= 0, max = SQUARE_SIDE_SIZE-1 #define y_end 8 //min= 0, max = SQUARE_SIDE_SIZE-1 class point { public: point( int a = 0, int b = 0 ) { x = a; y = b; } bool operator ==( const point& o ) { return o.x == x && o.y == y; } point operator +( const point& o ) { return point( o.x + x, o.y + y ); } int x, y; }; class map { public: map() { float current_random_value; w = h = SQUARE_SIDE_SIZE; for( int r = 0; r < h; r++ ) for( int s = 0; s < w; s++ ){ if( !( (s ==x_start && r == y_start) || (s == x_end && r == y_end) )){ current_random_value = rand()/(float)RAND_MAX; m[s][r] = current_random_value < WALL_PERCENTAGE ? 1 : 0; } else m[s][r] = 0; // cout << "m[" << s << "][" << r <<"] = " << m[s][r] << endl; } // cout << endl; } int operator() ( int x, int y ) { return m[x][y]; } int m[SQUARE_SIDE_SIZE][SQUARE_SIDE_SIZE]; int w, h; }; class node { public: bool operator == (const node& o ) { return pos == o.pos; } bool operator == (const point& o ) { return pos == o; } bool operator < (const node& o ) { return dist + cost < o.dist + o.cost; } point pos, parent; int dist, cost; }; //Fonction called from the GPU and executed by the GPU //-------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------- __device__ bool isValid( point& p ) { return ( p.x >-1 && p.y > -1 && p.x < SQUARE_SIDE_SIZE && p.y < SQUARE_SIDE_SIZE ); } __device__ int dev_calcDist( point& p, point& dev_end){ // need a better heuristic int x = dev_end.x - p.x, y = dev_end.y - p.y; return( x * x + y * y ); } //If we don't find a node with a cheaper path to the same point then we erase the old one and we return true else we return false and we forget the new path __device__ bool existPoint( point& p, int cost, list<node> dev_closed, list<node> dev_open) { list<node>::iterator i; i = thrust::find( dev_closed.begin(), dev_closed.end(), p ); if( i != dev_closed.end() ) { if( ( *i ).cost + ( *i ).dist < cost ) return true; else { dev_closed.erase( i ); return false; } } i = thrust::find( dev_open.begin(), dev_open.end(), p ); if( i != dev_open.end() ) { if( ( *i ).cost + ( *i ).dist < cost ) return true; else { dev_open.erase( i ); return false; } } return false; } //-------------------------------------------------------------------------------------------------------- //-------------------------------------------------------------------------------------------------------- class aStar { public: aStar() { neighbours[0] = point( -1, -1 ); neighbours[1] = point( 1, -1 ); neighbours[2] = point( -1, 1 ); neighbours[3] = point( 1, 1 ); neighbours[4] = point( 0, -1 ); neighbours[5] = point( -1, 0 ); neighbours[6] = point( 0, 1 ); neighbours[7] = point( 1, 0 ); } int calcDist( point& p ){ // need a better heuristic int x = end.x - p.x, y = end.y - p.y; return( x * x + y * y ); } __global__ void fillOpen(node* dev_n, point* dev_neighbours, int* dev_map, bool* dev_found, list<node> dev_open, list<node> dev_closed) { int stepCost, nc, dist; point neighbour; int i = threadIdx.x + blockIdx.x * blockDim.x; //We investigate all neighbours // one can make diagonals have different cost stepCost = i < 4 ? 1 : 1; //The variable neigbours has the direct neighbours from index 0 to 3 and the diagonal neighbours from index 4 to 7 neighbour = dev_n->pos + dev_neighbours[i]; //The variable neighbours contains the relative moves from the current position to find the neighbours if( neighbour == end ) *dev_found = true; if( isValid( neighbour ) && dev_map[neighbour.x, neighbour.y] != 1 ) { //Here we inspect the new position if the position is in the map and the position isn't a wall nc = stepCost + dev_n->cost; dist = calcDist( neighbour ); if( !existPoint( neighbour, nc + dist , dev_open, dev_closed) ) { //If we don't have any path to the same point in open or closed where the cost is cheaper, we create a new node in open node m; m.cost = nc; m.dist = dist; m.pos = neighbour; m.parent = dev_n->pos; dev_open.push_back( m ); } } *dev_found = false; } /* You specify a beginning point, an end point, and a map where you want to find the cheapest way. It initializes all attributes from the object astar to keep these data in mind. We create the first node with parent 0 and current_pos the first position with a cost of zero. */ bool search( point& s, point& e, map& mp ) { //Allocate memory in the GPU point* dev_neighbours; point* dev_end; node* dev_n; int* dev_map; bool* host_found; bool* dev_found; list<node>* dev_open; list<node>* dev_closed; cudaMalloc( (void**)&dev_neighbours, 8*sizeof(point) ); //Declare the neighbours variable for the GPU cudaMalloc( (void**)&dev_end, sizeof(point) ); //Declare the end point for the GPU cudaMalloc( (void**)&dev_map, SQUARE_SIDE_SIZE*SQUARE_SIDE_SIZE*sizeof(point) ); //Declare the end point for the GPU //Declare the current node that will be processed by the GPU cudaMalloc( (void**)&dev_n, sizeof(node) ); //Declare the bool result in the GPU that is needed for our stop condition cudaMalloc( (void**)&dev_found, sizeof(bool) ) //Copy values in the GPU's memory cudaMemcpy( dev_end, &e, sizeof(point), cudaMemcpyHostToDevice ); cudaMemcpy( dev_neighbours, neighbours, 8*sizeof(point), cudaMemcpyHostToDevice ); cudaMemcpy( dev_map, mp.m, SQUARE_SIDE_SIZE*SQUARE_SIDE_SIZE*sizeof(int), cudaMemcpyHostToDevice ); node n; end = e; start = s; m = mp; n.cost = 0; n.pos = s; n.parent = 0; n.dist = calcDist( s ); open.push_back( n ); while( !open.empty() ) { //Search stops when all nodes are closed, it means all ways have been inverstigated //open.sort(); node n = open.front(); //FIFO research open.pop_front(); //As we investigated the node, we can consider it closed (i.e. investigated) closed.push_back( n ); //So we fill the node in closed to keep it in memory //We update the value of the current node in the GPU's memory cudaMemcpy( dev_n, &n, sizeof(node), cudaMemcpyHostToDevice ); //Create device open and close list cudaMalloc( (void**)&dev_open, open.size()*sizeof(node) ); cudaMalloc( (void**)&dev_closed, closed.size()*sizeof(node) ); cudaMemcpy( dev_open, open, open.size()*sizeof(node), cudaMemcpyHostToDevice ); cudaMemcpy( dev_closed, closed, closed.size()*sizeof(node), cudaMemcpyHostToDevice ); fillOpen<<<1,8>>>( dev_n, dev_neighbours, dev_map, dev_found, dev_open, dev_closed); //We update CPU's open and closed lists using the one that were modified by the kernel cudaMemcpy( open, dev_open, dev_open.size()*sizeof(node), cudaMemcpyDeviceToHost ); cudaMemcpy( closed, dev_closed, dev_closed.size()*sizeof(node), cudaMemcpyDeviceToHost ); //We free GPU's open and closed lists cudaFree(dev_open); cudaFree(dev_closed); cudaMemcpy( host_found, dev_found, sizeof(bool), cudaMemcpyDeviceToHost ); if( *host_found ){ //Free GPU's memory cudaFree(dev_found); cudaFree(dev_n); cudaFree(dev_end); cudaFree(dev_neighbours); cudaFree(dev_map); return true; } } //Free GPU's memory cudaFree(dev_n); cudaFree(dev_found); cudaFree(dev_end); cudaFree(dev_neighbours); cudaFree(dev_map); return false; } /* Recreate the path from the closed list containing all the nodes that leads to the solution */ int path( list<point>& path ) { path.push_front( end ); //We last nodes first so at the end, the path list will be in the right order int cost = 1 + closed.back().cost; //We consider the last move to the end to cost 1 ???? path.push_front( closed.back().pos ); point parent = closed.back().parent; for( list<node>::reverse_iterator i = closed.rbegin(); i != closed.rend(); i++ ) { //We go through the entire close node list till we reach the start point if( ( *i ).pos == parent && !( ( *i ).pos == start ) ) { path.push_front( ( *i ).pos ); parent = ( *i ).parent; } } path.push_front( start ); return cost; } map m; point end, start; point neighbours[8]; list<node> open; list<node> closed; }; int main( int argc, char* argv[] ) { map m; point s(x_start,y_start), e(x_end,y_end); //s is the start e is the end aStar as; //Start point to measure executions time auto start = high_resolution_clock::now(); if( as.search( s, e, m ) ) { list<point> path; int c = as.path( path ); for( int y = -1; y < SQUARE_SIDE_SIZE+1; y++ ) { for( int x = -1; x < SQUARE_SIDE_SIZE+1; x++ ) { if( x < 0 || y < 0 || x > SQUARE_SIDE_SIZE-1 || y > SQUARE_SIDE_SIZE-1 || m( x, y ) == 1 ) cout << "w"; else { if( find( path.begin(), path.end(), point( x, y ) )!= path.end() ) cout << "x"; else cout << "."; } } cout << "\n"; } cout << "\nPath cost " << c << ": "; for( list<point>::iterator i = path.begin(); i != path.end(); i++ ) { cout<< "(" << ( *i ).x << ", " << ( *i ).y << ") "; } } cout << "\n\n"; // Stop point to measure executions time auto stop = high_resolution_clock::now(); // Display execution time auto duration = duration_cast<microseconds>(stop - start); cout << "CPU execution time = " << duration.count() << " microseconds" <<endl; return 0; }
7fe4b1e3ed99ac1261bb80f9e775bca901aa2d76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <omp.h> #define threshold 5 //(50% probability) #define block_size 256 __global__ void second_calculation (char* dev_a, char* dev_b, char* dev_c, int matrix_size, int k) { int j = (blockIdx.x * blockDim.x) + threadIdx.x; if (j >= matrix_size) return; if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] + dev_b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] - dev_b[index]; } } } __global__ void calculation( char* dev_a, char* dev_b, char* dev_c, int num_matrices, int matrix_size ) { // Each thread handles a matrix int k = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (k >= num_matrices) return; // If first element is different than 0 do the computation if (dev_a[k*matrix_size*matrix_size] != 0){ // help dim3 grid(((matrix_size -1) / block_size) + 1); dim3 block(block_size); hipLaunchKernelGGL(( second_calculation), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, dev_c, matrix_size, k); } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments (we only receive command + vector size) if (argc != 3) { // Tell the user how to run the program printf ("Usage:\n%s <number of matrices> <matrix_size>\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return -1; } srand ( time(NULL) ); // Set variables with input arguments int num_matrices = atoi(argv[1]); int matrix_size = atoi(argv[2]); // Set device that we will use for our cuda code hipSetDevice(0); // Time Variables hipEvent_t stp_start, stp_stop; hipEvent_t cpu_start, cpu_stop; hipEvent_t gpu_start, gpu_stop; hipEvent_t ker_start, ker_stop; hipEventCreate (&stp_start); hipEventCreate (&stp_stop); hipEventCreate (&cpu_start); hipEventCreate (&cpu_stop); hipEventCreate (&gpu_start); hipEventCreate (&gpu_stop); hipEventCreate (&ker_start); hipEventCreate (&ker_stop); float time, ker_time; // Input Arrays and variables char *a = new char [num_matrices*matrix_size*matrix_size]; char *b = new char [num_matrices*matrix_size*matrix_size]; char *c_cpu = new char [num_matrices*matrix_size*matrix_size]; char *c_gpu = new char [num_matrices*matrix_size*matrix_size]; // Pointers in GPU memory char *dev_a; char *dev_b; char *dev_c; // // Fill arrays ////////////////// hipEventRecord(stp_start,0); #if defined(_OPENMP) printf("Setting up input arrays in parallel.\n"); omp_set_num_threads(8); #else printf("Setting up input arrays.\n"); #endif #pragma omp parallel for for (int k = 0; k < num_matrices; k++) { #if defined(_OPENMP) if (k == 0) printf ("Using %d threads.\n", omp_get_num_threads()); #endif for (int j = 0; j < matrix_size*matrix_size; j++){ a[k*matrix_size*matrix_size + j] = j%9+1; b[k*matrix_size*matrix_size + j] = j%10; c_cpu[k*matrix_size*matrix_size + j] = 0; c_gpu[k*matrix_size*matrix_size + j] = 0; } } hipEventRecord(stp_stop,0); hipEventSynchronize(stp_stop); hipEventElapsedTime(&time, stp_start, stp_stop); printf("\tSetup Time: %.2f ms\n", time); // // CPU Calculation ////////////////// printf("Running sequential job.\n"); hipEventRecord(cpu_start,0); // Calculate C in the CPU for (int k = 0; k < num_matrices; k++) { // If first element is different than 0 do the computation if (a[k*matrix_size*matrix_size] != 0){ for (int j = 0; j < matrix_size; j++){ //If first value in the row of the matrix, do addition if (a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] + b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] - b[index]; } } } } } hipEventRecord(cpu_stop,0); hipEventSynchronize(cpu_stop); hipEventElapsedTime(&time, cpu_start, cpu_stop); printf("\tSequential Job Time: %.2f ms\n", time); // // GPU Calculation ////////////////// printf("Running parallel job.\n"); int grid_size = ((num_matrices-1)/block_size) + 1; hipEventRecord(gpu_start,0); // allocate the memory on the GPU hipMalloc( (void**)&dev_a, num_matrices * matrix_size * matrix_size * sizeof(char) ); hipMalloc( (void**)&dev_b, num_matrices * matrix_size * matrix_size * sizeof(char) ); hipMalloc( (void**)&dev_c, num_matrices * matrix_size * matrix_size * sizeof(char) ); // set arrays to 0 hipMemset(dev_a, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); hipMemset(dev_b, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); hipMemset(dev_c, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); // copy the 'data' to the GPU hipMemcpy( dev_a, a, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyHostToDevice ); hipMemcpy( dev_b, b, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyHostToDevice ); // run kernel hipEventRecord(ker_start,0); hipLaunchKernelGGL(( calculation), dim3(grid_size),dim3(block_size), 0, 0, dev_a, dev_b, dev_c, num_matrices, matrix_size ); hipEventRecord(ker_stop,0); // copy the array 'c' back from the GPU to the CPU hipMemcpy( c_gpu, dev_c, num_matrices * matrix_size * matrix_size * sizeof(char), hipMemcpyDeviceToHost ); hipEventRecord(gpu_stop,0); hipEventSynchronize(gpu_stop); hipEventElapsedTime(&time , gpu_start, gpu_stop); hipEventElapsedTime(&ker_time, ker_start, ker_stop); printf("\tParallel Job Time: %.2f ms\n", time); printf("\tKernel Exec. Time: %.2f ms\n", ker_time); // // Compare Results ////////////////// int error = 0; for (int i = 0; i < num_matrices * matrix_size * matrix_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // // Free resources ////////////////// // free the memory allocated on the GPU hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_c ); // free cuda events hipEventDestroy (cpu_start); hipEventDestroy (gpu_start); hipEventDestroy (ker_start); hipEventDestroy (cpu_stop); hipEventDestroy (gpu_stop); hipEventDestroy (ker_stop); // free CPU memory free(a); free(b); free(c_cpu); free(c_gpu); return 0; }
7fe4b1e3ed99ac1261bb80f9e775bca901aa2d76.cu
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define threshold 5 //(50% probability) #define block_size 256 __global__ void second_calculation (char* dev_a, char* dev_b, char* dev_c, int matrix_size, int k) { int j = (blockIdx.x * blockDim.x) + threadIdx.x; if (j >= matrix_size) return; if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] + dev_b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; dev_c[index] = dev_a[index] - dev_b[index]; } } } __global__ void calculation( char* dev_a, char* dev_b, char* dev_c, int num_matrices, int matrix_size ) { // Each thread handles a matrix int k = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (k >= num_matrices) return; // If first element is different than 0 do the computation if (dev_a[k*matrix_size*matrix_size] != 0){ // help dim3 grid(((matrix_size -1) / block_size) + 1); dim3 block(block_size); second_calculation<<<grid, block>>>(dev_a, dev_b, dev_c, matrix_size, k); } } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments (we only receive command + vector size) if (argc != 3) { // Tell the user how to run the program printf ("Usage:\n%s <number of matrices> <matrix_size>\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return -1; } srand ( time(NULL) ); // Set variables with input arguments int num_matrices = atoi(argv[1]); int matrix_size = atoi(argv[2]); // Set device that we will use for our cuda code cudaSetDevice(0); // Time Variables cudaEvent_t stp_start, stp_stop; cudaEvent_t cpu_start, cpu_stop; cudaEvent_t gpu_start, gpu_stop; cudaEvent_t ker_start, ker_stop; cudaEventCreate (&stp_start); cudaEventCreate (&stp_stop); cudaEventCreate (&cpu_start); cudaEventCreate (&cpu_stop); cudaEventCreate (&gpu_start); cudaEventCreate (&gpu_stop); cudaEventCreate (&ker_start); cudaEventCreate (&ker_stop); float time, ker_time; // Input Arrays and variables char *a = new char [num_matrices*matrix_size*matrix_size]; char *b = new char [num_matrices*matrix_size*matrix_size]; char *c_cpu = new char [num_matrices*matrix_size*matrix_size]; char *c_gpu = new char [num_matrices*matrix_size*matrix_size]; // Pointers in GPU memory char *dev_a; char *dev_b; char *dev_c; // // Fill arrays ////////////////// cudaEventRecord(stp_start,0); #if defined(_OPENMP) printf("Setting up input arrays in parallel.\n"); omp_set_num_threads(8); #else printf("Setting up input arrays.\n"); #endif #pragma omp parallel for for (int k = 0; k < num_matrices; k++) { #if defined(_OPENMP) if (k == 0) printf ("Using %d threads.\n", omp_get_num_threads()); #endif for (int j = 0; j < matrix_size*matrix_size; j++){ a[k*matrix_size*matrix_size + j] = j%9+1; b[k*matrix_size*matrix_size + j] = j%10; c_cpu[k*matrix_size*matrix_size + j] = 0; c_gpu[k*matrix_size*matrix_size + j] = 0; } } cudaEventRecord(stp_stop,0); cudaEventSynchronize(stp_stop); cudaEventElapsedTime(&time, stp_start, stp_stop); printf("\tSetup Time: %.2f ms\n", time); // // CPU Calculation ////////////////// printf("Running sequential job.\n"); cudaEventRecord(cpu_start,0); // Calculate C in the CPU for (int k = 0; k < num_matrices; k++) { // If first element is different than 0 do the computation if (a[k*matrix_size*matrix_size] != 0){ for (int j = 0; j < matrix_size; j++){ //If first value in the row of the matrix, do addition if (a[k*matrix_size*matrix_size+j*matrix_size] < threshold){ for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] + b[index]; } //Do subtraction } else { for (int i = 0; i < matrix_size; i++){ int index = k*matrix_size*matrix_size+j*matrix_size+i; c_cpu[index] = a[index] - b[index]; } } } } } cudaEventRecord(cpu_stop,0); cudaEventSynchronize(cpu_stop); cudaEventElapsedTime(&time, cpu_start, cpu_stop); printf("\tSequential Job Time: %.2f ms\n", time); // // GPU Calculation ////////////////// printf("Running parallel job.\n"); int grid_size = ((num_matrices-1)/block_size) + 1; cudaEventRecord(gpu_start,0); // allocate the memory on the GPU cudaMalloc( (void**)&dev_a, num_matrices * matrix_size * matrix_size * sizeof(char) ); cudaMalloc( (void**)&dev_b, num_matrices * matrix_size * matrix_size * sizeof(char) ); cudaMalloc( (void**)&dev_c, num_matrices * matrix_size * matrix_size * sizeof(char) ); // set arrays to 0 cudaMemset(dev_a, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); cudaMemset(dev_b, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); cudaMemset(dev_c, 0, num_matrices * matrix_size * matrix_size * sizeof(char)); // copy the 'data' to the GPU cudaMemcpy( dev_a, a, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyHostToDevice ); // run kernel cudaEventRecord(ker_start,0); calculation<<<grid_size,block_size>>>( dev_a, dev_b, dev_c, num_matrices, matrix_size ); cudaEventRecord(ker_stop,0); // copy the array 'c' back from the GPU to the CPU cudaMemcpy( c_gpu, dev_c, num_matrices * matrix_size * matrix_size * sizeof(char), cudaMemcpyDeviceToHost ); cudaEventRecord(gpu_stop,0); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&time , gpu_start, gpu_stop); cudaEventElapsedTime(&ker_time, ker_start, ker_stop); printf("\tParallel Job Time: %.2f ms\n", time); printf("\tKernel Exec. Time: %.2f ms\n", ker_time); // // Compare Results ////////////////// int error = 0; for (int i = 0; i < num_matrices * matrix_size * matrix_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // // Free resources ////////////////// // free the memory allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); // free cuda events cudaEventDestroy (cpu_start); cudaEventDestroy (gpu_start); cudaEventDestroy (ker_start); cudaEventDestroy (cpu_stop); cudaEventDestroy (gpu_stop); cudaEventDestroy (ker_stop); // free CPU memory free(a); free(b); free(c_cpu); free(c_gpu); return 0; }
57eb5fe31ecb0e7fa24e37d24f8848362b36f85f.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> // allocate memory on gpu extern "C++" void cu_safe_falloc(float **g_f, size_t n_elem) { void *gptr; hipError_t crc = hipMalloc(&gptr, n_elem*sizeof(float)); if(crc) { printf("hipMalloc Error=%d:%s\n", crc, hipGetErrorString(crc)); exit(1); } *g_f = (float*) gptr; } // free memory on gpu extern "C++" void cu_free(void *g_d) { hipError_t crc = hipFree(g_d); if (crc) { printf("hipFree Error=%d:%s\n", crc, hipGetErrorString(crc)); exit(1); } } // copy from cpu space f to gpu space g_f extern "C++" void memcpy_htod(float *g_f, float *f, size_t n_elem) { hipError_t crc = hipMemcpy((void*)g_f, f, sizeof(float)*n_elem, hipMemcpyHostToDevice); if (crc) { printf("hipMemcpyHostToDevice float Error=%d:%s\n",crc, hipGetErrorString(crc)); exit(1); } } // copy from gpu space g_f to cpu space f extern "C++" void memcpy_dtoh(float *f, float *g_f, size_t n_elem) { hipError_t crc = hipMemcpy(f, (void*)g_f, sizeof(float)*n_elem, hipMemcpyDeviceToHost); if (crc) { printf("hipMemcpyDeviceToHost float Error=%d:%s\n",crc, hipGetErrorString(crc)); exit(1); } return; }
57eb5fe31ecb0e7fa24e37d24f8848362b36f85f.cu
#include <stdlib.h> #include <stdio.h> #include <cuda.h> // allocate memory on gpu extern "C++" void cu_safe_falloc(float **g_f, size_t n_elem) { void *gptr; cudaError_t crc = cudaMalloc(&gptr, n_elem*sizeof(float)); if(crc) { printf("cudaMalloc Error=%d:%s\n", crc, cudaGetErrorString(crc)); exit(1); } *g_f = (float*) gptr; } // free memory on gpu extern "C++" void cu_free(void *g_d) { cudaError_t crc = cudaFree(g_d); if (crc) { printf("cudaFree Error=%d:%s\n", crc, cudaGetErrorString(crc)); exit(1); } } // copy from cpu space f to gpu space g_f extern "C++" void memcpy_htod(float *g_f, float *f, size_t n_elem) { cudaError_t crc = cudaMemcpy((void*)g_f, f, sizeof(float)*n_elem, cudaMemcpyHostToDevice); if (crc) { printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } } // copy from gpu space g_f to cpu space f extern "C++" void memcpy_dtoh(float *f, float *g_f, size_t n_elem) { cudaError_t crc = cudaMemcpy(f, (void*)g_f, sizeof(float)*n_elem, cudaMemcpyDeviceToHost); if (crc) { printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc, cudaGetErrorString(crc)); exit(1); } return; }
15398cf771403cb248290761cd0908b0dfec0c0e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> using namespace std; #include <Task.hpp> #include <GridData.hpp> #include <CudaDirectSolver.hpp> #include <ReverseSolver.hpp> #include <SLESolver.hpp> #include <rocblas.h> #include <Matrix.hpp> #include <FinalSolver.hpp> void TaskOn1000() { GridData layer1(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer2.dat")); /*GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\sum.dat"));*/ GridParameters gp = layer1.GetGridParameters(); CudaDirectSolver dslvr(gp, false); Task t1; t1.asimptHeight = 5.0f; t1.geltaSigm = 0.25f; t1.initialZ = 5.0f; t1.taskType = TASK_TYPE_GRAVIMETRY; t1.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t1.precision = 0.005f; t1.grid = layer1; t1.exactSolution = layer1; //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer1_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1_field", DAT_FILE_FORMAT); Task t2; t2.asimptHeight = 20.0f; t2.geltaSigm = 0.3f; t2.initialZ = 20.0f; t2.taskType = TASK_TYPE_GRAVIMETRY; t2.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t2.precision = 0.005f; t2.grid = layer2; t2.exactSolution = layer2; //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer2_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer2_field", DAT_FILE_FORMAT); MultilayerTask mTask(layer1); mTask.AddTask(t1); mTask.AddTask(t2); dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag500\\model1\\sum", DAT_FILE_FORMAT); } int main(int argc, char* argv[]) { try { setlocale(LC_ALL, "Russian"); cout << " !!" << endl; //**************************************************** 2********************************************* GridData layer1(string("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer2.dat")); /*GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag250\\model2\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag250\\model2\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag250\\model2\\sum.dat"));*/ GridParameters gp = layer1.GetGridParameters(); CudaDirectSolver dslvr(gp, false); Task t1; t1.asimptHeight = 5.0f; t1.geltaSigm = 0.25f; t1.initialZ = 5.0f; t1.taskType = TASK_TYPE_GRAVIMETRY; t1.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t1.precision = 0.005f; t1.grid = layer1; t1.exactSolution = layer1; dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer1_field", DAT_FILE_FORMAT); Task t2; t2.asimptHeight = 20.0f; t2.geltaSigm = 0.3f; t2.initialZ = 20.0f; t2.taskType = TASK_TYPE_GRAVIMETRY; t2.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t2.precision = 0.005f; t2.grid = layer2; t2.exactSolution = layer2; dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer2_field", DAT_FILE_FORMAT); MultilayerTask mTask(layer1); mTask.AddTask(t1); mTask.AddTask(t2); dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model2\\sum", DAT_FILE_FORMAT); /*vector<GridData> result = LightMultilayerLinearisedMinimalError(mTask, 0.4f); string outName("D:\\Workspace\\Science\\grids\\mag100\\model2\\ans_layer"); string ss; char num; int L = result.size(); for (int i = 1; i <= L; i++) { ss = outName; num = '0' + i; ss.append(string(&num).substr(0, 1)); result[i - 1].SaveToFile(ss, DAT_FILE_FORMAT); }*/ return 0; //**************************************************** 1********************************************* //TaskOn1000(); //return 0; /*GridData layer1(string("D:\\Workspace\\Science\\grids\\mag\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag\\layer2.dat")); GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag\\sum.dat"));*/ /*GridData layer1(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer2.dat")); GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\sum.dat"));*/ //GridData layer1(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1.dat")); //GridData layer2(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2.dat")); //GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1_field.dat")); //GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2_field.dat")); //GridData sum(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\sum.dat")); //GridParameters gp = layer1.GetGridParameters(); //GridParameters gp = sum.GetGridParameters(); //CudaDirectSolver dslvr(gp, false); /*Task t1; t1.asimptHeight = 5.0f; t1.geltaSigm = 0.25f; t1.initialZ = 5.0f; t1.taskType = TASK_TYPE_GRAVIMETRY; t1.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t1.precision = 0.005f; t1.grid = layer1_field; t1.exactSolution = layer1;*/ //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer1_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer1_field", DAT_FILE_FORMAT); /*Task t2; t2.asimptHeight = 20.0f; t2.geltaSigm = 0.3f; t2.initialZ = 20.0f; t2.taskType = TASK_TYPE_GRAVIMETRY; t2.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t2.precision = 0.005f; t2.grid = layer2_field; t2.exactSolution = layer2;*/ //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer2_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2_field", DAT_FILE_FORMAT); //return 0; /*MultilayerTask mTask(sum); mTask.AddTask(t1); mTask.AddTask(t2);*/ //dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\sum", DAT_FILE_FORMAT); //dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag100\\model1\\sum", DAT_FILE_FORMAT); //return; //LightLinearisedMinimalError(t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\res1", DAT_FILE_FORMAT); //LightLinearisedSpeedDescent(t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\res1", DAT_FILE_FORMAT); //LinearisedSpeedDescent(&dslvr, t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\res1", DAT_FILE_FORMAT); //return; //LightLevenbergMarkvardt(t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\result1", DAT_FILE_FORMAT); //return; //vector<GridData> result = LightMultilayerLevenbergMarkvardt(mTask, 1.0f); /*vector<GridData> result = LightMultilayerLinearisedMinimalError(mTask, 1.0f); string outName("D:\\Workspace\\Science\\grids\\mag\\ans_layer"); string ss; char num; int L = result.size(); for (int i = 1; i <= L; i++) { ss = outName; num = '0' + i; ss.append(string(&num).substr(0, 1)); result[i - 1].SaveToFile(ss, DAT_FILE_FORMAT); } return 0;*/ } catch(string s) { cout << s << endl; } catch (char *str) { cout << str << endl; } system("PAUSE"); return 0; }
15398cf771403cb248290761cd0908b0dfec0c0e.cu
#include <iostream> using namespace std; #include <Task.hpp> #include <GridData.hpp> #include <CudaDirectSolver.hpp> #include <ReverseSolver.hpp> #include <SLESolver.hpp> #include <cublas.h> #include <Matrix.hpp> #include <FinalSolver.hpp> void TaskOn1000() { GridData layer1(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer2.dat")); /*GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\sum.dat"));*/ GridParameters gp = layer1.GetGridParameters(); CudaDirectSolver dslvr(gp, false); Task t1; t1.asimptHeight = 5.0f; t1.geltaSigm = 0.25f; t1.initialZ = 5.0f; t1.taskType = TASK_TYPE_GRAVIMETRY; t1.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t1.precision = 0.005f; t1.grid = layer1; t1.exactSolution = layer1; //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer1_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1_field", DAT_FILE_FORMAT); Task t2; t2.asimptHeight = 20.0f; t2.geltaSigm = 0.3f; t2.initialZ = 20.0f; t2.taskType = TASK_TYPE_GRAVIMETRY; t2.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t2.precision = 0.005f; t2.grid = layer2; t2.exactSolution = layer2; //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer2_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer2_field", DAT_FILE_FORMAT); MultilayerTask mTask(layer1); mTask.AddTask(t1); mTask.AddTask(t2); dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag500\\model1\\sum", DAT_FILE_FORMAT); } int main(int argc, char* argv[]) { try { setlocale(LC_ALL, "Russian"); cout << "Программа начала работу!!" << endl; //****************************************************Модель 2********************************************* GridData layer1(string("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer2.dat")); /*GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag250\\model2\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag250\\model2\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag250\\model2\\sum.dat"));*/ GridParameters gp = layer1.GetGridParameters(); CudaDirectSolver dslvr(gp, false); Task t1; t1.asimptHeight = 5.0f; t1.geltaSigm = 0.25f; t1.initialZ = 5.0f; t1.taskType = TASK_TYPE_GRAVIMETRY; t1.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t1.precision = 0.005f; t1.grid = layer1; t1.exactSolution = layer1; dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer1_field", DAT_FILE_FORMAT); Task t2; t2.asimptHeight = 20.0f; t2.geltaSigm = 0.3f; t2.initialZ = 20.0f; t2.taskType = TASK_TYPE_GRAVIMETRY; t2.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t2.precision = 0.005f; t2.grid = layer2; t2.exactSolution = layer2; dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model2\\layer2_field", DAT_FILE_FORMAT); MultilayerTask mTask(layer1); mTask.AddTask(t1); mTask.AddTask(t2); dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model2\\sum", DAT_FILE_FORMAT); /*vector<GridData> result = LightMultilayerLinearisedMinimalError(mTask, 0.4f); string outName("D:\\Workspace\\Science\\grids\\mag100\\model2\\ans_layer"); string ss; char num; int L = result.size(); for (int i = 1; i <= L; i++) { ss = outName; num = '0' + i; ss.append(string(&num).substr(0, 1)); result[i - 1].SaveToFile(ss, DAT_FILE_FORMAT); }*/ return 0; //****************************************************Модель 1********************************************* //TaskOn1000(); //return 0; /*GridData layer1(string("D:\\Workspace\\Science\\grids\\mag\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag\\layer2.dat")); GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag\\sum.dat"));*/ /*GridData layer1(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer1.dat")); GridData layer2(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer2.dat")); GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer1_field.dat")); GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\layer2_field.dat")); GridData sum(string("D:\\Workspace\\Science\\grids\\mag200\\model1\\sum.dat"));*/ //GridData layer1(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1.dat")); //GridData layer2(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2.dat")); //GridData layer1_field(string("D:\\Workspace\\Science\\grids\\mag500\\model1\\layer1_field.dat")); //GridData layer2_field(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2_field.dat")); //GridData sum(string("D:\\Workspace\\Science\\grids\\mag1000\\model1\\sum.dat")); //GridParameters gp = layer1.GetGridParameters(); //GridParameters gp = sum.GetGridParameters(); //CudaDirectSolver dslvr(gp, false); /*Task t1; t1.asimptHeight = 5.0f; t1.geltaSigm = 0.25f; t1.initialZ = 5.0f; t1.taskType = TASK_TYPE_GRAVIMETRY; t1.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t1.precision = 0.005f; t1.grid = layer1_field; t1.exactSolution = layer1;*/ //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer1_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t1).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer1_field", DAT_FILE_FORMAT); /*Task t2; t2.asimptHeight = 20.0f; t2.geltaSigm = 0.3f; t2.initialZ = 20.0f; t2.taskType = TASK_TYPE_GRAVIMETRY; t2.residualType = RESIDUAL_TYPE_EXACTSOLUTION; t2.precision = 0.005f; t2.grid = layer2_field; t2.exactSolution = layer2;*/ //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\layer2_field", DAT_FILE_FORMAT); //dslvr.SolveDirectTask(t2).SaveToFile("D:\\Workspace\\Science\\grids\\mag1000\\model1\\layer2_field", DAT_FILE_FORMAT); //return 0; /*MultilayerTask mTask(sum); mTask.AddTask(t1); mTask.AddTask(t2);*/ //dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\sum", DAT_FILE_FORMAT); //dslvr.SolveDirectMultilayerTask(mTask).SaveToFile("D:\\Workspace\\Science\\grids\\mag100\\model1\\sum", DAT_FILE_FORMAT); //return; //LightLinearisedMinimalError(t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\res1", DAT_FILE_FORMAT); //LightLinearisedSpeedDescent(t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\res1", DAT_FILE_FORMAT); //LinearisedSpeedDescent(&dslvr, t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\res1", DAT_FILE_FORMAT); //return; //LightLevenbergMarkvardt(t1, 0.5f).SaveToFile("D:\\Workspace\\Science\\grids\\mag\\result1", DAT_FILE_FORMAT); //return; //vector<GridData> result = LightMultilayerLevenbergMarkvardt(mTask, 1.0f); /*vector<GridData> result = LightMultilayerLinearisedMinimalError(mTask, 1.0f); string outName("D:\\Workspace\\Science\\grids\\mag\\ans_layer"); string ss; char num; int L = result.size(); for (int i = 1; i <= L; i++) { ss = outName; num = '0' + i; ss.append(string(&num).substr(0, 1)); result[i - 1].SaveToFile(ss, DAT_FILE_FORMAT); } return 0;*/ } catch(string s) { cout << s << endl; } catch (char *str) { cout << str << endl; } system("PAUSE"); return 0; }
7d41a7177dc9cf6bc1e3972f11e7008addd89505.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <linalg/transpose.h> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <cuda_utils.cuh> #include <cuml/common/cuml_allocator.hpp> #include <cuml/decomposition/pca.hpp> #include <cuml/decomposition/pca_mg.hpp> #include <cuml/decomposition/sign_flip_mg.hpp> #include <matrix/math.cuh> #include <opg/linalg/qr_based_svd.hpp> #include <opg/matrix/matrix_utils.hpp> #include <opg/stats/cov.hpp> #include <opg/stats/mean.hpp> #include <opg/stats/mean_center.hpp> #include <raft/comms/comms.hpp> #include <stats/mean_center.cuh> #include "pca.cuh" using namespace MLCommon; namespace ML { namespace PCA { namespace opg { template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *components, T *explained_var, T *explained_var_ratio, T *singular_vals, T *mu, T *noise_vars, paramsPCAMG prms, hipStream_t *streams, int n_streams, bool verbose) { const auto &comm = handle.get_comms(); hipblasHandle_t cublas_handle = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); Matrix::Data<T> mu_data{mu, size_t(prms.n_cols)}; Stats::opg::mean(mu_data, input_data, input_desc, comm, allocator, streams, n_streams, handle.get_cublas_handle()); device_buffer<T> cov_data(allocator, streams[0], prms.n_cols * prms.n_cols); size_t cov_data_size = cov_data.size(); Matrix::Data<T> cov{cov_data.data(), cov_data_size}; Stats::opg::cov(cov, input_data, input_desc, mu_data, true, comm, allocator, streams, n_streams, cublas_handle); ML::truncCompExpVars<T, mg_solver>(handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]); T scalar = (prms.n_rows - 1); raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true); Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams); } /** * @brief performs MNMG fit operation for the pca * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param input: input data * @input param components: principal components of the input data * @output param explained_var: explained var * @output param explained_var_ratio: the explained var ratio * @output param singular_vals: singular values of the data * @output param mu: mean of every column in input * @output param noise_vars: variance of the noise * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *components, T *explained_var, T *explained_var_ratio, T *singular_vals, T *mu, T *noise_vars, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Reference issue https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } } else if (prms.algorithm == mg_solver::QR) { const raft::handle_t &h = handle; hipStream_t stream = h.get_stream(); const auto allocator = h.get_device_allocator(); const auto &comm = h.get_comms(); // Center the data Matrix::Data<T> mu_data{mu, size_t(prms.n_cols)}; Stats::opg::mean(mu_data, input_data, input_desc, comm, allocator, streams, n_streams, handle.get_cublas_handle()); Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } // Allocate Q, S and V and call QR std::vector<Matrix::Data<T> *> uMatrixParts; Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream); device_buffer<T> sVector(allocator, stream, prms.n_cols); device_buffer<T> vMatrix(allocator, stream, prms.n_cols * prms.n_cols); CUDA_CHECK( hipMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T))); LinAlg::opg::svdQR(h, sVector.data(), uMatrixParts, vMatrix.data(), true, true, prms.tol, prms.n_iterations, input_data, input_desc, rank); // sign flip sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams); // Calculate instance variables device_buffer<T> explained_var_all(allocator, stream, prms.n_cols); device_buffer<T> explained_var_ratio_all(allocator, stream, prms.n_cols); T scalar = 1.0 / (prms.n_rows - 1); raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream); raft::matrix::ratio(handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream); Matrix::truncZeroOrigin(sVector.data(), prms.n_cols, singular_vals, prms.n_components, 1, stream); Matrix::truncZeroOrigin(explained_var_all.data(), prms.n_cols, explained_var, prms.n_components, 1, stream); Matrix::truncZeroOrigin(explained_var_ratio_all.data(), prms.n_cols, explained_var_ratio, prms.n_components, 1, stream); MLCommon::LinAlg::transpose(vMatrix.data(), prms.n_cols, stream); Matrix::truncZeroOrigin(vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream); Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream); // Re-add mean to centered data Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } template <typename T> void transform_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input, const Matrix::PartDescriptor input_desc, T *components, std::vector<Matrix::Data<T> *> &trans_input, T *singular_vals, T *mu, const paramsPCAMG prms, hipStream_t *streams, int n_streams, bool verbose) { hipblasHandle_t cublas_h = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks; if (prms.whiten) { T scalar = T(sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_cols * prms.n_components, streams[0]); raft::matrix::matrixVectorBinaryDivSkipZero(components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]); } for (int i = 0; i < input.size(); i++) { int si = i % n_streams; MLCommon::Stats::meanCenter(input[i]->ptr, input[i]->ptr, mu, size_t(prms.n_cols), local_blocks[i]->size, false, true, streams[si]); T alpha = T(1); T beta = T(0); LinAlg::gemm(input[i]->ptr, local_blocks[i]->size, size_t(prms.n_cols), components, trans_input[i]->ptr, local_blocks[i]->size, int(prms.n_components), HIPBLAS_OP_N, HIPBLAS_OP_T, alpha, beta, cublas_h, streams[si]); MLCommon::Stats::meanAdd(input[i]->ptr, input[i]->ptr, mu, size_t(prms.n_cols), local_blocks[i]->size, false, true, streams[si]); } if (prms.whiten) { raft::matrix::matrixVectorBinaryMultSkipZero(components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]); T scalar = T(1 / sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_cols * prms.n_components, streams[0]); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } } /** * @brief performs MNMG transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param components: principal components of the input data * @output param trans_input: transformed input data * @input param singular_vals: singular values of the data * @input param mu: mean of every column in input * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void transform_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, T *components, Matrix::Data<T> **trans_input, T *singular_vals, T *mu, paramsPCAMG prms, bool verbose) { // We want to update the API of this function, and other functions with // regards to https://github.com/rapidsai/cuml/issues/2471 int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> trans_data(trans_input, trans_input + n_parts); // TODO: These streams should come from raft::handle_t int n_streams = n_parts; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } transform_impl(handle, input_data, input_desc, components, trans_data, singular_vals, mu, prms, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } template <typename T> void inverse_transform_impl( raft::handle_t &handle, std::vector<Matrix::Data<T> *> &trans_input, Matrix::PartDescriptor trans_input_desc, T *components, std::vector<Matrix::Data<T> *> &input, T *singular_vals, T *mu, paramsPCAMG prms, hipStream_t *streams, int n_streams, bool verbose) { hipblasHandle_t cublas_h = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> local_blocks = trans_input_desc.partsToRanks; if (prms.whiten) { T scalar = T(1 / sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_rows * prms.n_components, streams[0]); raft::matrix::matrixVectorBinaryMultSkipZero(components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]); } for (int i = 0; i < local_blocks.size(); i++) { int si = i % n_streams; T alpha = T(1); T beta = T(0); LinAlg::gemm(trans_input[i]->ptr, local_blocks[i]->size, size_t(prms.n_components), components, input[i]->ptr, local_blocks[i]->size, prms.n_cols, HIPBLAS_OP_N, HIPBLAS_OP_N, alpha, beta, cublas_h, streams[si]); MLCommon::Stats::meanAdd(input[i]->ptr, input[i]->ptr, mu, size_t(prms.n_cols), local_blocks[i]->size, false, true, streams[si]); } if (prms.whiten) { raft::matrix::matrixVectorBinaryDivSkipZero(components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]); T scalar = T(sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_rows * prms.n_components, streams[0]); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } } /** * @brief performs MNMG inverse transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param trans_input: transformed input data * @input param components: principal components of the input data * @output param input: input data * @input param singular_vals: singular values of the data * @input param mu: mean of every column in input * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void inverse_transform_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **trans_input, T *components, Matrix::Data<T> **input, T *singular_vals, T *mu, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> trans_data(trans_input, trans_input + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); // TODO: These streams should come from raft::handle_t int n_streams = n_parts; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } inverse_transform_impl(handle, trans_data, trans_desc, components, input_data, singular_vals, mu, prms, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } /** * @brief performs MNMG fit and transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @output param trans_input: transformed input data * @output param components: principal components of the input data * @output param explained_var: explained var * @output param explained_var_ratio: the explained var ratio * @output param singular_vals: singular values of the data * @output param mu: mean of every column in input * @output param noise_vars: variance of the noise * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void fit_transform_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, Matrix::Data<T> **trans_input, T *components, T *explained_var, T *explained_var_ratio, T *singular_vals, T *mu, T *noise_vars, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> trans_data(trans_input, trans_input + n_parts); // TODO: These streams should come from raft::handle_t int n_streams = n_parts; hipStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, streams, n_streams, verbose); transform_impl(handle, input_data, input_desc, components, trans_data, singular_vals, mu, prms, streams, n_streams, verbose); sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(hipStreamDestroy(streams[i])); } } void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data, Matrix::PartDescriptor &input_desc, float *components, float *explained_var, float *explained_var_ratio, float *singular_vals, float *mu, float *noise_vars, paramsPCAMG prms, bool verbose) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit(raft::handle_t &handle, std::vector<Matrix::Data<double> *> &input_data, Matrix::PartDescriptor &input_desc, double *components, double *explained_var, double *explained_var_ratio, double *singular_vals, double *mu, double *noise_vars, paramsPCAMG prms, bool verbose) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::floatData_t **input, Matrix::floatData_t **trans_input, float *components, float *explained_var, float *explained_var_ratio, float *singular_vals, float *mu, float *noise_vars, paramsPCAMG prms, bool verbose) { fit_transform_impl(handle, rank_sizes, n_parts, input, trans_input, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::doubleData_t **input, Matrix::doubleData_t **trans_input, double *components, double *explained_var, double *explained_var_ratio, double *singular_vals, double *mu, double *noise_vars, paramsPCAMG prms, bool verbose) { fit_transform_impl(handle, rank_sizes, n_parts, input, trans_input, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **input, float *components, Matrix::Data<float> **trans_input, float *singular_vals, float *mu, paramsPCAMG prms, bool verbose) { transform_impl(handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose); } void transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **input, double *components, Matrix::Data<double> **trans_input, double *singular_vals, double *mu, paramsPCAMG prms, bool verbose) { transform_impl(handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose); } void inverse_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **trans_input, float *components, Matrix::Data<float> **input, float *singular_vals, float *mu, paramsPCAMG prms, bool verbose) { inverse_transform_impl(handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose); } void inverse_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **trans_input, double *components, Matrix::Data<double> **input, double *singular_vals, double *mu, paramsPCAMG prms, bool verbose) { inverse_transform_impl(handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose); } } // namespace opg } // namespace PCA } // namespace ML
7d41a7177dc9cf6bc1e3972f11e7008addd89505.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <linalg/transpose.h> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <cuda_utils.cuh> #include <cuml/common/cuml_allocator.hpp> #include <cuml/decomposition/pca.hpp> #include <cuml/decomposition/pca_mg.hpp> #include <cuml/decomposition/sign_flip_mg.hpp> #include <matrix/math.cuh> #include <opg/linalg/qr_based_svd.hpp> #include <opg/matrix/matrix_utils.hpp> #include <opg/stats/cov.hpp> #include <opg/stats/mean.hpp> #include <opg/stats/mean_center.hpp> #include <raft/comms/comms.hpp> #include <stats/mean_center.cuh> #include "pca.cuh" using namespace MLCommon; namespace ML { namespace PCA { namespace opg { template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *components, T *explained_var, T *explained_var_ratio, T *singular_vals, T *mu, T *noise_vars, paramsPCAMG prms, cudaStream_t *streams, int n_streams, bool verbose) { const auto &comm = handle.get_comms(); cublasHandle_t cublas_handle = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); Matrix::Data<T> mu_data{mu, size_t(prms.n_cols)}; Stats::opg::mean(mu_data, input_data, input_desc, comm, allocator, streams, n_streams, handle.get_cublas_handle()); device_buffer<T> cov_data(allocator, streams[0], prms.n_cols * prms.n_cols); size_t cov_data_size = cov_data.size(); Matrix::Data<T> cov{cov_data.data(), cov_data_size}; Stats::opg::cov(cov, input_data, input_desc, mu_data, true, comm, allocator, streams, n_streams, cublas_handle); ML::truncCompExpVars<T, mg_solver>(handle, cov.ptr, components, explained_var, explained_var_ratio, prms, streams[0]); T scalar = (prms.n_rows - 1); raft::matrix::seqRoot(explained_var, singular_vals, scalar, prms.n_components, streams[0], true); Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams); } /** * @brief performs MNMG fit operation for the pca * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param input: input data * @input param components: principal components of the input data * @output param explained_var: explained var * @output param explained_var_ratio: the explained var ratio * @output param singular_vals: singular values of the data * @output param mu: mean of every column in input * @output param noise_vars: variance of the noise * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void fit_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input_data, Matrix::PartDescriptor &input_desc, T *components, T *explained_var, T *explained_var_ratio, T *singular_vals, T *mu, T *noise_vars, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); // TODO: These streams should come from raft::handle_t // Reference issue https://github.com/rapidsai/cuml/issues/2470 int n_streams = input_desc.blocksOwnedBy(rank).size(); cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } if (prms.algorithm == mg_solver::COV_EIG_JACOBI || prms.algorithm == mg_solver::COV_EIG_DQ) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } } else if (prms.algorithm == mg_solver::QR) { const raft::handle_t &h = handle; cudaStream_t stream = h.get_stream(); const auto allocator = h.get_device_allocator(); const auto &comm = h.get_comms(); // Center the data Matrix::Data<T> mu_data{mu, size_t(prms.n_cols)}; Stats::opg::mean(mu_data, input_data, input_desc, comm, allocator, streams, n_streams, handle.get_cublas_handle()); Stats::opg::mean_center(input_data, input_desc, mu_data, comm, streams, n_streams); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } // Allocate Q, S and V and call QR std::vector<Matrix::Data<T> *> uMatrixParts; Matrix::opg::allocate(h, uMatrixParts, input_desc, rank, stream); device_buffer<T> sVector(allocator, stream, prms.n_cols); device_buffer<T> vMatrix(allocator, stream, prms.n_cols * prms.n_cols); CUDA_CHECK( cudaMemset(vMatrix.data(), 0, prms.n_cols * prms.n_cols * sizeof(T))); LinAlg::opg::svdQR(h, sVector.data(), uMatrixParts, vMatrix.data(), true, true, prms.tol, prms.n_iterations, input_data, input_desc, rank); // sign flip sign_flip(handle, uMatrixParts, input_desc, vMatrix.data(), prms.n_cols, streams, n_streams); // Calculate instance variables device_buffer<T> explained_var_all(allocator, stream, prms.n_cols); device_buffer<T> explained_var_ratio_all(allocator, stream, prms.n_cols); T scalar = 1.0 / (prms.n_rows - 1); raft::matrix::power(sVector.data(), explained_var_all.data(), scalar, prms.n_cols, stream); raft::matrix::ratio(handle, explained_var_all.data(), explained_var_ratio_all.data(), prms.n_cols, stream); Matrix::truncZeroOrigin(sVector.data(), prms.n_cols, singular_vals, prms.n_components, 1, stream); Matrix::truncZeroOrigin(explained_var_all.data(), prms.n_cols, explained_var, prms.n_components, 1, stream); Matrix::truncZeroOrigin(explained_var_ratio_all.data(), prms.n_cols, explained_var_ratio, prms.n_components, 1, stream); MLCommon::LinAlg::transpose(vMatrix.data(), prms.n_cols, stream); Matrix::truncZeroOrigin(vMatrix.data(), prms.n_cols, components, prms.n_components, prms.n_cols, stream); Matrix::opg::deallocate(h, uMatrixParts, input_desc, rank, stream); // Re-add mean to centered data Stats::opg::mean_add(input_data, input_desc, mu_data, comm, streams, n_streams); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } template <typename T> void transform_impl(raft::handle_t &handle, std::vector<Matrix::Data<T> *> &input, const Matrix::PartDescriptor input_desc, T *components, std::vector<Matrix::Data<T> *> &trans_input, T *singular_vals, T *mu, const paramsPCAMG prms, cudaStream_t *streams, int n_streams, bool verbose) { cublasHandle_t cublas_h = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks; if (prms.whiten) { T scalar = T(sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_cols * prms.n_components, streams[0]); raft::matrix::matrixVectorBinaryDivSkipZero(components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]); } for (int i = 0; i < input.size(); i++) { int si = i % n_streams; MLCommon::Stats::meanCenter(input[i]->ptr, input[i]->ptr, mu, size_t(prms.n_cols), local_blocks[i]->size, false, true, streams[si]); T alpha = T(1); T beta = T(0); LinAlg::gemm(input[i]->ptr, local_blocks[i]->size, size_t(prms.n_cols), components, trans_input[i]->ptr, local_blocks[i]->size, int(prms.n_components), CUBLAS_OP_N, CUBLAS_OP_T, alpha, beta, cublas_h, streams[si]); MLCommon::Stats::meanAdd(input[i]->ptr, input[i]->ptr, mu, size_t(prms.n_cols), local_blocks[i]->size, false, true, streams[si]); } if (prms.whiten) { raft::matrix::matrixVectorBinaryMultSkipZero(components, singular_vals, prms.n_cols, prms.n_components, true, true, streams[0]); T scalar = T(1 / sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_cols * prms.n_components, streams[0]); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } } /** * @brief performs MNMG transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @input param components: principal components of the input data * @output param trans_input: transformed input data * @input param singular_vals: singular values of the data * @input param mu: mean of every column in input * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void transform_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, T *components, Matrix::Data<T> **trans_input, T *singular_vals, T *mu, paramsPCAMG prms, bool verbose) { // We want to update the API of this function, and other functions with // regards to https://github.com/rapidsai/cuml/issues/2471 int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> trans_data(trans_input, trans_input + n_parts); // TODO: These streams should come from raft::handle_t int n_streams = n_parts; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } transform_impl(handle, input_data, input_desc, components, trans_data, singular_vals, mu, prms, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } template <typename T> void inverse_transform_impl( raft::handle_t &handle, std::vector<Matrix::Data<T> *> &trans_input, Matrix::PartDescriptor trans_input_desc, T *components, std::vector<Matrix::Data<T> *> &input, T *singular_vals, T *mu, paramsPCAMG prms, cudaStream_t *streams, int n_streams, bool verbose) { cublasHandle_t cublas_h = handle.get_cublas_handle(); const auto allocator = handle.get_device_allocator(); std::vector<Matrix::RankSizePair *> local_blocks = trans_input_desc.partsToRanks; if (prms.whiten) { T scalar = T(1 / sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_rows * prms.n_components, streams[0]); raft::matrix::matrixVectorBinaryMultSkipZero(components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]); } for (int i = 0; i < local_blocks.size(); i++) { int si = i % n_streams; T alpha = T(1); T beta = T(0); LinAlg::gemm(trans_input[i]->ptr, local_blocks[i]->size, size_t(prms.n_components), components, input[i]->ptr, local_blocks[i]->size, prms.n_cols, CUBLAS_OP_N, CUBLAS_OP_N, alpha, beta, cublas_h, streams[si]); MLCommon::Stats::meanAdd(input[i]->ptr, input[i]->ptr, mu, size_t(prms.n_cols), local_blocks[i]->size, false, true, streams[si]); } if (prms.whiten) { raft::matrix::matrixVectorBinaryDivSkipZero(components, singular_vals, prms.n_rows, prms.n_components, true, true, streams[0]); T scalar = T(sqrt(prms.n_rows - 1)); raft::linalg::scalarMultiply(components, components, scalar, prms.n_rows * prms.n_components, streams[0]); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } } /** * @brief performs MNMG inverse transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param trans_input: transformed input data * @input param components: principal components of the input data * @output param input: input data * @input param singular_vals: singular values of the data * @input param mu: mean of every column in input * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void inverse_transform_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **trans_input, T *components, Matrix::Data<T> **input, T *singular_vals, T *mu, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); Matrix::PartDescriptor trans_desc(prms.n_rows, prms.n_components, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> trans_data(trans_input, trans_input + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); // TODO: These streams should come from raft::handle_t int n_streams = n_parts; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } inverse_transform_impl(handle, trans_data, trans_desc, components, input_data, singular_vals, mu, prms, streams, n_streams, verbose); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } /** * @brief performs MNMG fit and transform operation for the pca. * @input param handle: the internal cuml handle object * @input param rank_sizes: includes all the partition size information for the rank * @input param n_parts: number of partitions * @input param input: input data * @output param trans_input: transformed input data * @output param components: principal components of the input data * @output param explained_var: explained var * @output param explained_var_ratio: the explained var ratio * @output param singular_vals: singular values of the data * @output param mu: mean of every column in input * @output param noise_vars: variance of the noise * @input param prms: data structure that includes all the parameters from input size to algorithm * @input param verbose */ template <typename T> void fit_transform_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<T> **input, Matrix::Data<T> **trans_input, T *components, T *explained_var, T *explained_var_ratio, T *singular_vals, T *mu, T *noise_vars, paramsPCAMG prms, bool verbose) { int rank = handle.get_comms().get_rank(); std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes, rank_sizes + n_parts); std::vector<Matrix::Data<T> *> input_data(input, input + n_parts); Matrix::PartDescriptor input_desc(prms.n_rows, prms.n_cols, ranksAndSizes, rank); std::vector<Matrix::Data<T> *> trans_data(trans_input, trans_input + n_parts); // TODO: These streams should come from raft::handle_t int n_streams = n_parts; cudaStream_t streams[n_streams]; for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamCreate(&streams[i])); } fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, streams, n_streams, verbose); transform_impl(handle, input_data, input_desc, components, trans_data, singular_vals, mu, prms, streams, n_streams, verbose); sign_flip(handle, trans_data, input_desc, components, prms.n_components, streams, n_streams); for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamSynchronize(streams[i])); } for (int i = 0; i < n_streams; i++) { CUDA_CHECK(cudaStreamDestroy(streams[i])); } } void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data, Matrix::PartDescriptor &input_desc, float *components, float *explained_var, float *explained_var_ratio, float *singular_vals, float *mu, float *noise_vars, paramsPCAMG prms, bool verbose) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit(raft::handle_t &handle, std::vector<Matrix::Data<double> *> &input_data, Matrix::PartDescriptor &input_desc, double *components, double *explained_var, double *explained_var_ratio, double *singular_vals, double *mu, double *noise_vars, paramsPCAMG prms, bool verbose) { fit_impl(handle, input_data, input_desc, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::floatData_t **input, Matrix::floatData_t **trans_input, float *components, float *explained_var, float *explained_var_ratio, float *singular_vals, float *mu, float *noise_vars, paramsPCAMG prms, bool verbose) { fit_transform_impl(handle, rank_sizes, n_parts, input, trans_input, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void fit_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::doubleData_t **input, Matrix::doubleData_t **trans_input, double *components, double *explained_var, double *explained_var_ratio, double *singular_vals, double *mu, double *noise_vars, paramsPCAMG prms, bool verbose) { fit_transform_impl(handle, rank_sizes, n_parts, input, trans_input, components, explained_var, explained_var_ratio, singular_vals, mu, noise_vars, prms, verbose); } void transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **input, float *components, Matrix::Data<float> **trans_input, float *singular_vals, float *mu, paramsPCAMG prms, bool verbose) { transform_impl(handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose); } void transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **input, double *components, Matrix::Data<double> **trans_input, double *singular_vals, double *mu, paramsPCAMG prms, bool verbose) { transform_impl(handle, rank_sizes, n_parts, input, components, trans_input, singular_vals, mu, prms, verbose); } void inverse_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<float> **trans_input, float *components, Matrix::Data<float> **input, float *singular_vals, float *mu, paramsPCAMG prms, bool verbose) { inverse_transform_impl(handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose); } void inverse_transform(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes, size_t n_parts, Matrix::Data<double> **trans_input, double *components, Matrix::Data<double> **input, double *singular_vals, double *mu, paramsPCAMG prms, bool verbose) { inverse_transform_impl(handle, rank_sizes, n_parts, trans_input, components, input, singular_vals, mu, prms, verbose); } } // namespace opg } // namespace PCA } // namespace ML
f9fc312425a32163b66ed965273183b448138706.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <vector> #include <string> #include <thrust/device_vector.h> #include <cupti_profiler.h> template<typename T> __global__ void kernel(T begin, int size) { const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < size) *(begin + thread_id) += 1; } template<typename T> __global__ void kernel2(T begin, int size) { const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < size) *(begin + thread_id) += 2; } template<typename T> void call_kernel(T& arg) { hipLaunchKernelGGL(( kernel), dim3(1), dim3(100), 0, 0, thrust::raw_pointer_cast(&arg[0]), arg.size()); } template<typename T> void call_kernel2(T& arg) { hipLaunchKernelGGL(( kernel2), dim3(1), dim3(50), 0, 0, thrust::raw_pointer_cast(&arg[0]), arg.size()); } int main() { using namespace std; //using namespace thrust; vector<string> event_names { "active_warps", "gst_inst_32bit", "active_cycles", "threads_launched", "branch" }; vector<string> metric_names { "flop_count_dp", "flop_count_sp", "inst_executed", "gst_transactions", "gld_transactions", "shared_efficiency" //"stall_memory_throttle" }; constexpr int N = 100; thrust::device_vector<float> data(N, 0); //cupti_profiler::profiler profiler(vector<string>{}, metric_names); // XXX: Disabling all metrics seems to change the values // of some events. Not sure if this is correct behavior. //cupti_profiler::profiler profiler(event_names, vector<string>{}); cupti_profiler::profiler profiler(event_names, metric_names); // Get #passes required to compute all metrics and events const int passes = profiler.get_passes(); printf("Passes: %d\n", passes); profiler.start(); for(int i=0; i<50; ++i) { call_kernel(data); hipDeviceSynchronize(); call_kernel2(data); hipDeviceSynchronize(); } profiler.stop(); printf("Event Trace\n"); profiler.print_event_values(std::cout); printf("Metric Trace\n"); profiler.print_metric_values(std::cout); auto names = profiler.get_kernel_names(); for(auto name: names) { printf("%s\n", name.c_str()); } thrust::host_vector<float> h_data(data); /*printf("\n"); for(int i = 0; i < 10; ++i) { printf("%.2lf ", h_data[i]); }*/ printf("\n"); return 0; }
f9fc312425a32163b66ed965273183b448138706.cu
#include <cstdio> #include <vector> #include <string> #include <thrust/device_vector.h> #include <cupti_profiler.h> template<typename T> __global__ void kernel(T begin, int size) { const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < size) *(begin + thread_id) += 1; } template<typename T> __global__ void kernel2(T begin, int size) { const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id < size) *(begin + thread_id) += 2; } template<typename T> void call_kernel(T& arg) { kernel<<<1, 100>>>(thrust::raw_pointer_cast(&arg[0]), arg.size()); } template<typename T> void call_kernel2(T& arg) { kernel2<<<1, 50>>>(thrust::raw_pointer_cast(&arg[0]), arg.size()); } int main() { using namespace std; //using namespace thrust; vector<string> event_names { "active_warps", "gst_inst_32bit", "active_cycles", "threads_launched", "branch" }; vector<string> metric_names { "flop_count_dp", "flop_count_sp", "inst_executed", "gst_transactions", "gld_transactions", "shared_efficiency" //"stall_memory_throttle" }; constexpr int N = 100; thrust::device_vector<float> data(N, 0); //cupti_profiler::profiler profiler(vector<string>{}, metric_names); // XXX: Disabling all metrics seems to change the values // of some events. Not sure if this is correct behavior. //cupti_profiler::profiler profiler(event_names, vector<string>{}); cupti_profiler::profiler profiler(event_names, metric_names); // Get #passes required to compute all metrics and events const int passes = profiler.get_passes(); printf("Passes: %d\n", passes); profiler.start(); for(int i=0; i<50; ++i) { call_kernel(data); cudaDeviceSynchronize(); call_kernel2(data); cudaDeviceSynchronize(); } profiler.stop(); printf("Event Trace\n"); profiler.print_event_values(std::cout); printf("Metric Trace\n"); profiler.print_metric_values(std::cout); auto names = profiler.get_kernel_names(); for(auto name: names) { printf("%s\n", name.c_str()); } thrust::host_vector<float> h_data(data); /*printf("\n"); for(int i = 0; i < 10; ++i) { printf("%.2lf ", h_data[i]); }*/ printf("\n"); return 0; }
353ea7d035550a02d192a81f0a72c745b89daa28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2019-2021 ETH Zurich, Automatic Control Lab, * Michel Schubiger, Goran Banjac. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_pcg.h" #include "csr_type.h" #include "cuda_configure.h" #include "cuda_handler.h" #include "cuda_malloc.h" #include "cuda_lin_alg.h" #include "cuda_wrapper.h" #include "helper_cuda.h" /* --> checkCudaErrors */ #ifdef __cplusplus extern "C" {extern CUDA_Handle_t *CUDA_handle;} #endif /******************************************************************************* * GPU Kernels * *******************************************************************************/ __global__ void scalar_division_kernel(c_float *res, const c_float *num, const c_float *den) { *res = (*num) / (*den); } /******************************************************************************* * Private Functions * *******************************************************************************/ /* * d_y = (P + sigma*I + A'*R*A) * d_x */ static void mat_vec_prod(cudapcg_solver *s, c_float *d_y, const c_float *d_x, c_int device) { c_float *sigma; c_float H_ZERO = 0.0; c_float H_ONE = 1.0; c_int n = s->n; c_int m = s->m; csr *P = s->P; csr *A = s->A; csr *At = s->At; sigma = device ? s->d_sigma : s->h_sigma; /* d_y = d_x */ checkCudaErrors(hipMemcpy(d_y, d_x, n * sizeof(c_float), hipMemcpyDeviceToDevice)); /* d_y *= sigma */ checkCudaErrors(cublasTscal(CUDA_handle->cublasHandle, n, sigma, d_y, 1)); /* d_y += P * d_x */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, P->alg, HIPSPARSE_OPERATION_NON_TRANSPOSE, P->m, P->n, P->nnz, &H_ONE, CUDA_FLOAT, P->MatDescription, P->val, CUDA_FLOAT, P->row_ptr, P->col_ind, d_x, CUDA_FLOAT, &H_ONE, CUDA_FLOAT, d_y, CUDA_FLOAT, CUDA_FLOAT, P->buffer)); if (m == 0) return; if (!s->d_rho_vec) { /* d_z = rho * A * d_x */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, A->alg, HIPSPARSE_OPERATION_NON_TRANSPOSE, A->m, A->n, A->nnz, s->h_rho, CUDA_FLOAT, A->MatDescription, A->val, CUDA_FLOAT, A->row_ptr, A->col_ind, d_x, CUDA_FLOAT, &H_ZERO, CUDA_FLOAT, s->d_z, CUDA_FLOAT, CUDA_FLOAT, A->buffer)); } else { /* d_z = A * d_x */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, A->alg, HIPSPARSE_OPERATION_NON_TRANSPOSE, A->m, A->n, A->nnz, &H_ONE, CUDA_FLOAT, A->MatDescription, A->val, CUDA_FLOAT, A->row_ptr, A->col_ind, d_x, CUDA_FLOAT, &H_ZERO, CUDA_FLOAT, s->d_z, CUDA_FLOAT, CUDA_FLOAT, A->buffer)); /* d_z = diag(d_rho_vec) * dz */ cuda_vec_ew_prod(s->d_z, s->d_z, s->d_rho_vec, m); } /* d_y += A' * d_z */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, At->alg, HIPSPARSE_OPERATION_NON_TRANSPOSE, At->m, At->n, At->nnz, &H_ONE, CUDA_FLOAT, At->MatDescription, At->val, CUDA_FLOAT, At->row_ptr, At->col_ind, s->d_z, CUDA_FLOAT, &H_ONE, CUDA_FLOAT, d_y, CUDA_FLOAT, CUDA_FLOAT, A->buffer)); } /******************************************************************************* * API Functions * *******************************************************************************/ c_int cuda_pcg_alg(cudapcg_solver *s, c_float eps, c_int max_iter) { c_float *tmp; c_int iter = 0; c_int n = s->n; c_float H_MINUS_ONE = -1.0; if (!s->warm_start) { /* d_x = 0 */ checkCudaErrors(hipMemset(s->d_x, 0, n * sizeof(c_float))); } /* d_p = 0 */ checkCudaErrors(hipMemset(s->d_p, 0, n * sizeof(c_float))); /* d_r = K * d_x */ mat_vec_prod(s, s->d_r, s->d_x, 0); /* d_r -= d_rhs */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, &H_MINUS_ONE, s->d_rhs, 1, s->d_r, 1)); /* h_r_norm = |d_r| */ s->vector_norm(s->d_r, n, s->h_r_norm); /* From here on cuBLAS is operating in device pointer mode */ hipblasSetPointerMode(CUDA_handle->cublasHandle, HIPBLAS_POINTER_MODE_DEVICE); if (s->precondition) { /* d_y = M \ d_r */ cuda_vec_ew_prod(s->d_y, s->d_diag_precond_inv, s->d_r, n); } /* d_p = -d_y */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->D_MINUS_ONE, s->d_y, 1, s->d_p, 1)); /* rTy = d_r' * d_y */ checkCudaErrors(cublasTdot(CUDA_handle->cublasHandle, n, s->d_y, 1, s->d_r, 1, s->rTy)); hipDeviceSynchronize(); /* Run the PCG algorithm */ while ( *(s->h_r_norm) > eps && iter < max_iter ) { /* d_Kp = K * d_p */ mat_vec_prod(s, s->d_Kp, s->d_p, 1); /* pKp = d_p' * d_Kp */ checkCudaErrors(cublasTdot(CUDA_handle->cublasHandle, n, s->d_p, 1, s->d_Kp, 1, s->pKp)); /* alpha = rTy / pKp */ hipLaunchKernelGGL(( scalar_division_kernel), dim3(1),dim3(1), 0, 0, s->alpha, s->rTy, s->pKp); /* d_x += alpha * d_p */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->alpha, s->d_p, 1, s->d_x, 1)); /* d_r += alpha * d_Kp */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->alpha, s->d_Kp, 1, s->d_r, 1)); if (s->precondition) { /* d_y = M \ d_r */ cuda_vec_ew_prod(s->d_y, s->d_diag_precond_inv, s->d_r, n); } /* Swap pointers to rTy and rTy_prev */ tmp = s->rTy_prev; s->rTy_prev = s->rTy; s->rTy = tmp; /* rTy = d_r' * d_y */ checkCudaErrors(cublasTdot(CUDA_handle->cublasHandle, n, s->d_y, 1, s->d_r, 1, s->rTy)); /* Update residual norm */ s->vector_norm(s->d_r, n, s->d_r_norm); checkCudaErrors(hipMemcpyAsync(s->h_r_norm, s->d_r_norm, sizeof(c_float), hipMemcpyDeviceToHost)); /* beta = rTy / rTy_prev */ hipLaunchKernelGGL(( scalar_division_kernel), dim3(1),dim3(1), 0, 0, s->beta, s->rTy, s->rTy_prev); /* d_p *= beta */ checkCudaErrors(cublasTscal(CUDA_handle->cublasHandle, n, s->beta, s->d_p, 1)); /* d_p -= d_y */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->D_MINUS_ONE, s->d_y, 1, s->d_p, 1)); hipDeviceSynchronize(); iter++; } /* End of the PCG algorithm */ /* From here on cuBLAS is operating in host pointer mode again */ hipblasSetPointerMode(CUDA_handle->cublasHandle, HIPBLAS_POINTER_MODE_HOST); return iter; } void cuda_pcg_update_precond(cudapcg_solver *s, c_int P_updated, c_int A_updated, c_int R_updated) { void *buffer; c_float *tmp; c_int n = s->n; csr *At = s->At; size_t Buffer_size_in_bytes = n * (sizeof(c_float) + sizeof(c_int)); if (!P_updated && !A_updated && !R_updated) return; if (P_updated) { /* Update d_P_diag_val */ cuda_vec_gather(n, s->P->val, s->d_P_diag_val, s->d_P_diag_ind); } if (A_updated || R_updated) { /* Allocate memory */ cuda_malloc((void **) &tmp, At->nnz * sizeof(c_float)); cuda_malloc((void **) &buffer, Buffer_size_in_bytes); /* Update d_AtRA_diag_val */ if (!s->d_rho_vec) { /* R = rho*I --> A'*R*A = rho * A'*A */ if (A_updated) { /* Update d_AtA_diag_val */ cuda_vec_ew_prod(tmp, At->val, At->val, At->nnz); cuda_vec_segmented_sum(tmp, At->row_ind, s->d_AtA_diag_val, buffer, n, At->nnz); } /* d_AtRA_diag_val = rho * d_AtA_diag_val */ cuda_vec_add_scaled(s->d_AtRA_diag_val, s->d_AtA_diag_val, NULL, *s->h_rho, 0.0, n); } else { /* R = diag(d_rho_vec) --> A'*R*A = A' * diag(d_rho_vec) * A */ cuda_mat_rmult_diag_new(At, tmp, s->d_rho_vec); /* tmp = A' * R */ cuda_vec_ew_prod(tmp, tmp, At->val, At->nnz); /* tmp = tmp * A */ cuda_vec_segmented_sum(tmp, At->row_ind, s->d_AtRA_diag_val, buffer, n, At->nnz); } /* Free memory */ cuda_free((void **) &tmp); cuda_free((void **) &buffer); } /* d_diag_precond = sigma */ cuda_vec_set_sc(s->d_diag_precond, *s->h_sigma, n); /* d_diag_precond += d_P_diag_val + d_AtRA_diag_val */ cuda_vec_add_scaled3(s->d_diag_precond, s->d_diag_precond, s->d_P_diag_val, s->d_AtRA_diag_val, 1.0, 1.0, 1.0, n); /* d_diag_precond_inv = 1 / d_diag_precond */ cuda_vec_reciprocal(s->d_diag_precond_inv, s->d_diag_precond, n); }
353ea7d035550a02d192a81f0a72c745b89daa28.cu
/** * Copyright (c) 2019-2021 ETH Zurich, Automatic Control Lab, * Michel Schubiger, Goran Banjac. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "cuda_pcg.h" #include "csr_type.h" #include "cuda_configure.h" #include "cuda_handler.h" #include "cuda_malloc.h" #include "cuda_lin_alg.h" #include "cuda_wrapper.h" #include "helper_cuda.h" /* --> checkCudaErrors */ #ifdef __cplusplus extern "C" {extern CUDA_Handle_t *CUDA_handle;} #endif /******************************************************************************* * GPU Kernels * *******************************************************************************/ __global__ void scalar_division_kernel(c_float *res, const c_float *num, const c_float *den) { *res = (*num) / (*den); } /******************************************************************************* * Private Functions * *******************************************************************************/ /* * d_y = (P + sigma*I + A'*R*A) * d_x */ static void mat_vec_prod(cudapcg_solver *s, c_float *d_y, const c_float *d_x, c_int device) { c_float *sigma; c_float H_ZERO = 0.0; c_float H_ONE = 1.0; c_int n = s->n; c_int m = s->m; csr *P = s->P; csr *A = s->A; csr *At = s->At; sigma = device ? s->d_sigma : s->h_sigma; /* d_y = d_x */ checkCudaErrors(cudaMemcpy(d_y, d_x, n * sizeof(c_float), cudaMemcpyDeviceToDevice)); /* d_y *= sigma */ checkCudaErrors(cublasTscal(CUDA_handle->cublasHandle, n, sigma, d_y, 1)); /* d_y += P * d_x */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, P->alg, CUSPARSE_OPERATION_NON_TRANSPOSE, P->m, P->n, P->nnz, &H_ONE, CUDA_FLOAT, P->MatDescription, P->val, CUDA_FLOAT, P->row_ptr, P->col_ind, d_x, CUDA_FLOAT, &H_ONE, CUDA_FLOAT, d_y, CUDA_FLOAT, CUDA_FLOAT, P->buffer)); if (m == 0) return; if (!s->d_rho_vec) { /* d_z = rho * A * d_x */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, A->alg, CUSPARSE_OPERATION_NON_TRANSPOSE, A->m, A->n, A->nnz, s->h_rho, CUDA_FLOAT, A->MatDescription, A->val, CUDA_FLOAT, A->row_ptr, A->col_ind, d_x, CUDA_FLOAT, &H_ZERO, CUDA_FLOAT, s->d_z, CUDA_FLOAT, CUDA_FLOAT, A->buffer)); } else { /* d_z = A * d_x */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, A->alg, CUSPARSE_OPERATION_NON_TRANSPOSE, A->m, A->n, A->nnz, &H_ONE, CUDA_FLOAT, A->MatDescription, A->val, CUDA_FLOAT, A->row_ptr, A->col_ind, d_x, CUDA_FLOAT, &H_ZERO, CUDA_FLOAT, s->d_z, CUDA_FLOAT, CUDA_FLOAT, A->buffer)); /* d_z = diag(d_rho_vec) * dz */ cuda_vec_ew_prod(s->d_z, s->d_z, s->d_rho_vec, m); } /* d_y += A' * d_z */ checkCudaErrors(cusparseCsrmvEx(CUDA_handle->cusparseHandle, At->alg, CUSPARSE_OPERATION_NON_TRANSPOSE, At->m, At->n, At->nnz, &H_ONE, CUDA_FLOAT, At->MatDescription, At->val, CUDA_FLOAT, At->row_ptr, At->col_ind, s->d_z, CUDA_FLOAT, &H_ONE, CUDA_FLOAT, d_y, CUDA_FLOAT, CUDA_FLOAT, A->buffer)); } /******************************************************************************* * API Functions * *******************************************************************************/ c_int cuda_pcg_alg(cudapcg_solver *s, c_float eps, c_int max_iter) { c_float *tmp; c_int iter = 0; c_int n = s->n; c_float H_MINUS_ONE = -1.0; if (!s->warm_start) { /* d_x = 0 */ checkCudaErrors(cudaMemset(s->d_x, 0, n * sizeof(c_float))); } /* d_p = 0 */ checkCudaErrors(cudaMemset(s->d_p, 0, n * sizeof(c_float))); /* d_r = K * d_x */ mat_vec_prod(s, s->d_r, s->d_x, 0); /* d_r -= d_rhs */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, &H_MINUS_ONE, s->d_rhs, 1, s->d_r, 1)); /* h_r_norm = |d_r| */ s->vector_norm(s->d_r, n, s->h_r_norm); /* From here on cuBLAS is operating in device pointer mode */ cublasSetPointerMode(CUDA_handle->cublasHandle, CUBLAS_POINTER_MODE_DEVICE); if (s->precondition) { /* d_y = M \ d_r */ cuda_vec_ew_prod(s->d_y, s->d_diag_precond_inv, s->d_r, n); } /* d_p = -d_y */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->D_MINUS_ONE, s->d_y, 1, s->d_p, 1)); /* rTy = d_r' * d_y */ checkCudaErrors(cublasTdot(CUDA_handle->cublasHandle, n, s->d_y, 1, s->d_r, 1, s->rTy)); cudaDeviceSynchronize(); /* Run the PCG algorithm */ while ( *(s->h_r_norm) > eps && iter < max_iter ) { /* d_Kp = K * d_p */ mat_vec_prod(s, s->d_Kp, s->d_p, 1); /* pKp = d_p' * d_Kp */ checkCudaErrors(cublasTdot(CUDA_handle->cublasHandle, n, s->d_p, 1, s->d_Kp, 1, s->pKp)); /* alpha = rTy / pKp */ scalar_division_kernel<<<1,1>>>(s->alpha, s->rTy, s->pKp); /* d_x += alpha * d_p */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->alpha, s->d_p, 1, s->d_x, 1)); /* d_r += alpha * d_Kp */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->alpha, s->d_Kp, 1, s->d_r, 1)); if (s->precondition) { /* d_y = M \ d_r */ cuda_vec_ew_prod(s->d_y, s->d_diag_precond_inv, s->d_r, n); } /* Swap pointers to rTy and rTy_prev */ tmp = s->rTy_prev; s->rTy_prev = s->rTy; s->rTy = tmp; /* rTy = d_r' * d_y */ checkCudaErrors(cublasTdot(CUDA_handle->cublasHandle, n, s->d_y, 1, s->d_r, 1, s->rTy)); /* Update residual norm */ s->vector_norm(s->d_r, n, s->d_r_norm); checkCudaErrors(cudaMemcpyAsync(s->h_r_norm, s->d_r_norm, sizeof(c_float), cudaMemcpyDeviceToHost)); /* beta = rTy / rTy_prev */ scalar_division_kernel<<<1,1>>>(s->beta, s->rTy, s->rTy_prev); /* d_p *= beta */ checkCudaErrors(cublasTscal(CUDA_handle->cublasHandle, n, s->beta, s->d_p, 1)); /* d_p -= d_y */ checkCudaErrors(cublasTaxpy(CUDA_handle->cublasHandle, n, s->D_MINUS_ONE, s->d_y, 1, s->d_p, 1)); cudaDeviceSynchronize(); iter++; } /* End of the PCG algorithm */ /* From here on cuBLAS is operating in host pointer mode again */ cublasSetPointerMode(CUDA_handle->cublasHandle, CUBLAS_POINTER_MODE_HOST); return iter; } void cuda_pcg_update_precond(cudapcg_solver *s, c_int P_updated, c_int A_updated, c_int R_updated) { void *buffer; c_float *tmp; c_int n = s->n; csr *At = s->At; size_t Buffer_size_in_bytes = n * (sizeof(c_float) + sizeof(c_int)); if (!P_updated && !A_updated && !R_updated) return; if (P_updated) { /* Update d_P_diag_val */ cuda_vec_gather(n, s->P->val, s->d_P_diag_val, s->d_P_diag_ind); } if (A_updated || R_updated) { /* Allocate memory */ cuda_malloc((void **) &tmp, At->nnz * sizeof(c_float)); cuda_malloc((void **) &buffer, Buffer_size_in_bytes); /* Update d_AtRA_diag_val */ if (!s->d_rho_vec) { /* R = rho*I --> A'*R*A = rho * A'*A */ if (A_updated) { /* Update d_AtA_diag_val */ cuda_vec_ew_prod(tmp, At->val, At->val, At->nnz); cuda_vec_segmented_sum(tmp, At->row_ind, s->d_AtA_diag_val, buffer, n, At->nnz); } /* d_AtRA_diag_val = rho * d_AtA_diag_val */ cuda_vec_add_scaled(s->d_AtRA_diag_val, s->d_AtA_diag_val, NULL, *s->h_rho, 0.0, n); } else { /* R = diag(d_rho_vec) --> A'*R*A = A' * diag(d_rho_vec) * A */ cuda_mat_rmult_diag_new(At, tmp, s->d_rho_vec); /* tmp = A' * R */ cuda_vec_ew_prod(tmp, tmp, At->val, At->nnz); /* tmp = tmp * A */ cuda_vec_segmented_sum(tmp, At->row_ind, s->d_AtRA_diag_val, buffer, n, At->nnz); } /* Free memory */ cuda_free((void **) &tmp); cuda_free((void **) &buffer); } /* d_diag_precond = sigma */ cuda_vec_set_sc(s->d_diag_precond, *s->h_sigma, n); /* d_diag_precond += d_P_diag_val + d_AtRA_diag_val */ cuda_vec_add_scaled3(s->d_diag_precond, s->d_diag_precond, s->d_P_diag_val, s->d_AtRA_diag_val, 1.0, 1.0, 1.0, n); /* d_diag_precond_inv = 1 / d_diag_precond */ cuda_vec_reciprocal(s->d_diag_precond_inv, s->d_diag_precond, n); }
b44b3344bb63f6ef6831bd3ae44e3990bbe39212.hip
// !!! This is a file automatically generated by hipify!!! // aux.cu // Implementation file for the matrix-free product between a Jacobian matrix // and a vector #include <lmnslsqr/aux.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <stdio.h> #include <lmnslsqr/kernel.h> #include <lmnslsqr/error.h> static double *dev_xp=NULL, *dev_xm=NULL, *dev_fp=NULL, *dev_fm=NULL; // Initialization functions void init_jac_approx(int M, int N) { hipMalloc(&dev_xp, sizeof(double)*N); hipMalloc(&dev_xm, sizeof(double)*N); hipMalloc(&dev_fp, sizeof(double)*M); hipMalloc(&dev_fm, sizeof(double)*M); } void free_jac_approx() { // Free data hipFree(dev_xp); hipFree(dev_xm); hipFree(dev_fp); hipFree(dev_fm); } // Matrix-free matrix-vector product void jac_approx( int M, int N, void (*func)(int, int, const double *, double *), const double *dev_x, const double *dev_v, double *dev_out, double eps, hipblasHandle_t *handle ) { // Build vectors double alpha; // Make the finite differece calculations hipMemcpy(dev_xp, dev_x, sizeof(double)*N, hipMemcpyDeviceToDevice); hipMemcpy(dev_xm, dev_x, sizeof(double)*N, hipMemcpyDeviceToDevice); alpha = eps; cublas_check_error( hipblasDaxpy(*handle, N, &alpha, dev_v, 1, dev_xp, 1), "aux, Jacobian-vector, sum\n"); alpha = -eps; cublas_check_error( hipblasDaxpy(*handle, N, &alpha, dev_v, 1, dev_xm, 1), "aux, Jacobien-vector, substraction\n"); func(M, N, dev_xp, dev_fp); func(M, N, dev_xm, dev_fm); // Store data in out vector hipMemcpy(dev_out, dev_fp, sizeof(double)*M, hipMemcpyDeviceToDevice); alpha = -1; cublas_check_error( hipblasDaxpy(*handle, M, &alpha, dev_fm, 1, dev_out, 1), "aux, Jacobien-vector, finite difference\n"); alpha = 1/(2*eps); cublas_check_error( hipblasDscal(*handle, M, &alpha, dev_out, 1), "aux, Jacobian-vector, scaling by epsilon reciprocal\n"); } //------------------------------------------------------------------------------
b44b3344bb63f6ef6831bd3ae44e3990bbe39212.cu
// aux.cu // Implementation file for the matrix-free product between a Jacobian matrix // and a vector #include <lmnslsqr/aux.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <stdio.h> #include <lmnslsqr/kernel.h> #include <lmnslsqr/error.h> static double *dev_xp=NULL, *dev_xm=NULL, *dev_fp=NULL, *dev_fm=NULL; // Initialization functions void init_jac_approx(int M, int N) { cudaMalloc(&dev_xp, sizeof(double)*N); cudaMalloc(&dev_xm, sizeof(double)*N); cudaMalloc(&dev_fp, sizeof(double)*M); cudaMalloc(&dev_fm, sizeof(double)*M); } void free_jac_approx() { // Free data cudaFree(dev_xp); cudaFree(dev_xm); cudaFree(dev_fp); cudaFree(dev_fm); } // Matrix-free matrix-vector product void jac_approx( int M, int N, void (*func)(int, int, const double *, double *), const double *dev_x, const double *dev_v, double *dev_out, double eps, cublasHandle_t *handle ) { // Build vectors double alpha; // Make the finite differece calculations cudaMemcpy(dev_xp, dev_x, sizeof(double)*N, cudaMemcpyDeviceToDevice); cudaMemcpy(dev_xm, dev_x, sizeof(double)*N, cudaMemcpyDeviceToDevice); alpha = eps; cublas_check_error( cublasDaxpy(*handle, N, &alpha, dev_v, 1, dev_xp, 1), "aux, Jacobian-vector, sum\n"); alpha = -eps; cublas_check_error( cublasDaxpy(*handle, N, &alpha, dev_v, 1, dev_xm, 1), "aux, Jacobien-vector, substraction\n"); func(M, N, dev_xp, dev_fp); func(M, N, dev_xm, dev_fm); // Store data in out vector cudaMemcpy(dev_out, dev_fp, sizeof(double)*M, cudaMemcpyDeviceToDevice); alpha = -1; cublas_check_error( cublasDaxpy(*handle, M, &alpha, dev_fm, 1, dev_out, 1), "aux, Jacobien-vector, finite difference\n"); alpha = 1/(2*eps); cublas_check_error( cublasDscal(*handle, M, &alpha, dev_out, 1), "aux, Jacobian-vector, scaling by epsilon reciprocal\n"); } //------------------------------------------------------------------------------
0f24d9334a83b44fdf2114a1bccf3458f3768999.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void helloKernel () { printf ("Hello from the GPU!\n"); } int main () { printf ("Hello from the CPU\n"); hipLaunchKernelGGL(( helloKernel) , dim3(1), dim3(1) , 0, 0, ); hipDeviceSynchronize (); return 0; }
0f24d9334a83b44fdf2114a1bccf3458f3768999.cu
#include <stdio.h> #include <cuda.h> __global__ void helloKernel () { printf ("Hello from the GPU!\n"); } int main () { printf ("Hello from the CPU\n"); helloKernel <<< 1, 1 >>> (); cudaDeviceSynchronize (); return 0; }
a81b1417900b40621dffadbb5e42dc08e3c4b461.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"./config.cuh" #include<iostream> using namespace std; void __global__ test(int* outdata) { int idx = threadIdx.x; outdata[idx] = __shfl_xor(idx, 2); } int main() { MODEL_(one_arg) model{32,1};// one wrap int *out,ans[32]; model(test,&out, 32); hipMemcpy(ans, out,sizeof(int)*32, hipMemcpyDeviceToHost); hipDeviceSynchronize(); for(int i = 0;i < 32;++i) { cout << ans[i] << " "; } cout << "\n"; return 0; }
a81b1417900b40621dffadbb5e42dc08e3c4b461.cu
#include"./config.cuh" #include<iostream> using namespace std; void __global__ test(int* outdata) { int idx = threadIdx.x; outdata[idx] = __shfl_xor(idx, 2); } int main() { MODEL_(one_arg) model{32,1};// one wrap int *out,ans[32]; model(test,&out, 32); cudaMemcpy(ans, out,sizeof(int)*32, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(int i = 0;i < 32;++i) { cout << ans[i] << " "; } cout << "\n"; return 0; }
2c9a259157c464043dac328163670495520d0c15.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <ctype.h> #include <hip/hip_runtime.h> #include <math.h> #define CEIL(a,b) ((a+b-1)/b) #define SWAP(a,b,t) t=b; b=a; a=t; #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) #define PI 3.14159265 typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; uch *TheImg, *CopyImg; // Where images are stored in CPU uch *GPUImg, *GPUCopyImg, *GPUResult; // Where images are stored in GPU struct ImgProp{ int Hpixels; int Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // Kernel that flips the given image horizontally // each thread only flips a single pixel (R,G,B) __global__ void imrotate(uch *ImgDst, uch *ImgSrc, ui Vpixels, ui Hpixels, ui BlkPerRow, ui RowBytes, double cosRot, double sinRot) { __shared__ uch PixBuffer[3072*16]; ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYsrcOffset = MYrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ////////////// find destination index int c, h, v, X, Y, NewCol, NewRow; double newX, newY, H, V, Diagonal, ScaleFactor; c=MYcol; h=Hpixels/2; v=Vpixels/2; // integer div X=(double)c-(double)h; Y=(double)v-(double)MYrow; // pixel rotation matrix newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; // Scale to fit everything in the image box H=(double)Hpixels; V=(double)Vpixels; Diagonal=sqrt(H*H+V*V); ScaleFactor=(Hpixels>Vpixels) ? V/Diagonal : H/Diagonal; newX=newX*ScaleFactor; newY=newY*ScaleFactor; // convert back from Cartesian to image coordinates NewCol=((int) newX+h); NewRow=v-(int)newY; ui MYdstOffset = NewRow*RowBytes; ui MYdstIndex = MYdstOffset + 3 * NewCol; /////////////// ui Mytid3 = MYtid*3; PixBuffer[Mytid3] = ImgSrc[MYsrcIndex]; PixBuffer[Mytid3+1] = ImgSrc[MYsrcIndex+1]; PixBuffer[Mytid3+2] = ImgSrc[MYsrcIndex+2]; __syncthreads(); // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = PixBuffer[Mytid3]; ImgDst[MYdstIndex + 1] = PixBuffer[Mytid3+1]; ImgDst[MYdstIndex + 2] = PixBuffer[Mytid3+2]; } // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); printf("\n Input File name: %17s (%d x %d) File Size=%lu", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%lu", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { // char Flip = 'H'; float tmpKernelExcutionTime, totalKernelExecutionTime; // GPU code run times hipError_t cudaStatus, cudaStatus2; hipEvent_t time1, time2; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow; // ui BlkPerRowInt, BlkPerRowInt2; ui ThrPerBlk = 128, NumBlocks; // ui NB2, NB4, NB8, RowInts; ui RowBytes; hipDeviceProp_t GPUprop; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; // ui *GPUCopyImg32, *GPUImg32; char SupportedBlocks[100]; // int KernelNum=1; char KernelName[255]; double RotAngle, deltaAngle; // rotation angle int RotIter; int TotalIters; double cosRot, sinRot; strcpy(ProgName, "imrotateG"); if(argc!=4){ printf("\n\nUsage: ./imrotateG infile outfile N"); return 0; } strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ free(TheImg); printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(EXIT_FAILURE); } hipGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%lu %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; // Allocate GPU buffer for the input and output images cudaStatus = hipMalloc((void**)&GPUImg, IMAGESIZE); cudaStatus2 = hipMalloc((void**)&GPUCopyImg, IMAGESIZE); if ((cudaStatus != hipSuccess) || (cudaStatus2 != hipSuccess)){ fprintf(stderr, "hipMalloc failed! Can't allocate GPU memory"); exit(EXIT_FAILURE); } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(GPUImg, TheImg, IMAGESIZE, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy CPU to GPU failed!"); exit(EXIT_FAILURE); } RowBytes = (IPH * 3 + 3) & (~3); RowBytes = (IPH * 3 + 3) & (~3); BlkPerRow = CEIL(IPH,ThrPerBlk); NumBlocks = IPV*BlkPerRow; printf("\nNum blocks: %d\n", NumBlocks); printf("\nThread per block: %d\n", ThrPerBlk); TotalIters = atoi(argv[3]); if(TotalIters > 30){ printf("\nN is too large, should be less or equal to 30\n"); } deltaAngle = 2*PI/float(TotalIters); printf("\nTotal iterations: %d\n", TotalIters); // iteration to find all images strcpy(OutputFileName, argv[2]); char* token = strtok(OutputFileName, "."); char* OutputFirstName = token; token = strtok(NULL, "."); char* OutputLastName = token; for(RotIter=1; RotIter<=TotalIters; RotIter++){ char outName[128]=""; char tmp[10]; sprintf(tmp, "%d", RotIter); strcat(outName, OutputFirstName); strcat(outName, tmp); strcat(outName, "."); strcat(outName, OutputLastName); hipEventCreate(&time1); hipEventCreate(&time2); hipEventRecord(time1, 0); // record time1 in the first iteration RotAngle = (double)(RotIter-1)*deltaAngle; cosRot = cos(RotAngle); sinRot = sin(RotAngle); printf("\nRotation angle = %lf\n", RotAngle); hipLaunchKernelGGL(( imrotate) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUCopyImg, GPUImg, IPV, IPH, BlkPerRow, RowBytes, cosRot, sinRot); hipEventRecord(time2, 0); //record time2 in teh last iteration hipEventSynchronize(time1); hipEventSynchronize(time2); hipEventElapsedTime(&tmpKernelExcutionTime, time1, time2); totalKernelExecutionTime += tmpKernelExcutionTime; strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n"); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } GPUResult = GPUCopyImg; cudaStatus = hipMemcpy(CopyImg, GPUResult, IMAGESIZE, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy GPU to CPU failed!"); exit(EXIT_FAILURE); } cudaStatus = hipDeviceSynchronize(); //checkError(hipGetLastError()); // screen for errors in kernel launches if (cudaStatus != hipSuccess) { fprintf(stderr, "\n Program failed after hipDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } WriteBMPlin(CopyImg, outName); // Write the flipped image back to disk memset(CopyImg, 0, IMAGESIZE); hipMemset(GPUCopyImg, 0, IMAGESIZE); } printf("\nTotal Kernel Execution =%7.2f ms\n", totalKernelExecutionTime); hipFree(GPUImg); hipFree(GPUCopyImg); cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); }
2c9a259157c464043dac328163670495520d0c15.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <ctype.h> #include <cuda.h> #include <math.h> #define CEIL(a,b) ((a+b-1)/b) #define SWAP(a,b,t) t=b; b=a; a=t; #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) #define PI 3.14159265 typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; uch *TheImg, *CopyImg; // Where images are stored in CPU uch *GPUImg, *GPUCopyImg, *GPUResult; // Where images are stored in GPU struct ImgProp{ int Hpixels; int Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // Kernel that flips the given image horizontally // each thread only flips a single pixel (R,G,B) __global__ void imrotate(uch *ImgDst, uch *ImgSrc, ui Vpixels, ui Hpixels, ui BlkPerRow, ui RowBytes, double cosRot, double sinRot) { __shared__ uch PixBuffer[3072*16]; ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYsrcOffset = MYrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ////////////// find destination index int c, h, v, X, Y, NewCol, NewRow; double newX, newY, H, V, Diagonal, ScaleFactor; c=MYcol; h=Hpixels/2; v=Vpixels/2; // integer div X=(double)c-(double)h; Y=(double)v-(double)MYrow; // pixel rotation matrix newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; // Scale to fit everything in the image box H=(double)Hpixels; V=(double)Vpixels; Diagonal=sqrt(H*H+V*V); ScaleFactor=(Hpixels>Vpixels) ? V/Diagonal : H/Diagonal; newX=newX*ScaleFactor; newY=newY*ScaleFactor; // convert back from Cartesian to image coordinates NewCol=((int) newX+h); NewRow=v-(int)newY; ui MYdstOffset = NewRow*RowBytes; ui MYdstIndex = MYdstOffset + 3 * NewCol; /////////////// ui Mytid3 = MYtid*3; PixBuffer[Mytid3] = ImgSrc[MYsrcIndex]; PixBuffer[Mytid3+1] = ImgSrc[MYsrcIndex+1]; PixBuffer[Mytid3+2] = ImgSrc[MYsrcIndex+2]; __syncthreads(); // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = PixBuffer[Mytid3]; ImgDst[MYdstIndex + 1] = PixBuffer[Mytid3+1]; ImgDst[MYdstIndex + 2] = PixBuffer[Mytid3+2]; } // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); printf("\n Input File name: %17s (%d x %d) File Size=%lu", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%lu", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { // char Flip = 'H'; float tmpKernelExcutionTime, totalKernelExecutionTime; // GPU code run times cudaError_t cudaStatus, cudaStatus2; cudaEvent_t time1, time2; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow; // ui BlkPerRowInt, BlkPerRowInt2; ui ThrPerBlk = 128, NumBlocks; // ui NB2, NB4, NB8, RowInts; ui RowBytes; cudaDeviceProp GPUprop; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; // ui *GPUCopyImg32, *GPUImg32; char SupportedBlocks[100]; // int KernelNum=1; char KernelName[255]; double RotAngle, deltaAngle; // rotation angle int RotIter; int TotalIters; double cosRot, sinRot; strcpy(ProgName, "imrotateG"); if(argc!=4){ printf("\n\nUsage: ./imrotateG infile outfile N"); return 0; } strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ free(TheImg); printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(EXIT_FAILURE); } cudaGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%lu %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; // Allocate GPU buffer for the input and output images cudaStatus = cudaMalloc((void**)&GPUImg, IMAGESIZE); cudaStatus2 = cudaMalloc((void**)&GPUCopyImg, IMAGESIZE); if ((cudaStatus != cudaSuccess) || (cudaStatus2 != cudaSuccess)){ fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory"); exit(EXIT_FAILURE); } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy CPU to GPU failed!"); exit(EXIT_FAILURE); } RowBytes = (IPH * 3 + 3) & (~3); RowBytes = (IPH * 3 + 3) & (~3); BlkPerRow = CEIL(IPH,ThrPerBlk); NumBlocks = IPV*BlkPerRow; printf("\nNum blocks: %d\n", NumBlocks); printf("\nThread per block: %d\n", ThrPerBlk); TotalIters = atoi(argv[3]); if(TotalIters > 30){ printf("\nN is too large, should be less or equal to 30\n"); } deltaAngle = 2*PI/float(TotalIters); printf("\nTotal iterations: %d\n", TotalIters); // iteration to find all images strcpy(OutputFileName, argv[2]); char* token = strtok(OutputFileName, "."); char* OutputFirstName = token; token = strtok(NULL, "."); char* OutputLastName = token; for(RotIter=1; RotIter<=TotalIters; RotIter++){ char outName[128]=""; char tmp[10]; sprintf(tmp, "%d", RotIter); strcat(outName, OutputFirstName); strcat(outName, tmp); strcat(outName, "."); strcat(outName, OutputLastName); cudaEventCreate(&time1); cudaEventCreate(&time2); cudaEventRecord(time1, 0); // record time1 in the first iteration RotAngle = (double)(RotIter-1)*deltaAngle; cosRot = cos(RotAngle); sinRot = sin(RotAngle); printf("\nRotation angle = %lf\n", RotAngle); imrotate <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPV, IPH, BlkPerRow, RowBytes, cosRot, sinRot); cudaEventRecord(time2, 0); //record time2 in teh last iteration cudaEventSynchronize(time1); cudaEventSynchronize(time2); cudaEventElapsedTime(&tmpKernelExcutionTime, time1, time2); totalKernelExecutionTime += tmpKernelExcutionTime; strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n"); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } GPUResult = GPUCopyImg; cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy GPU to CPU failed!"); exit(EXIT_FAILURE); } cudaStatus = cudaDeviceSynchronize(); //checkError(cudaGetLastError()); // screen for errors in kernel launches if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } WriteBMPlin(CopyImg, outName); // Write the flipped image back to disk memset(CopyImg, 0, IMAGESIZE); cudaMemset(GPUCopyImg, 0, IMAGESIZE); } printf("\nTotal Kernel Execution =%7.2f ms\n", totalKernelExecutionTime); cudaFree(GPUImg); cudaFree(GPUCopyImg); cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); }
43ffc2762bad9883dd1a2143c114e63d2ae9aed8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: ndtrung #include "RigidData.cuh" #ifdef WIN32 #include <cassert> #else #include <assert.h> #endif /*! \file RigidData.cu */ #ifdef NVCC //! Kernel for seting R and V of rigid body particles /*! \param pdata_pos Particle position \param pdata_vel Particle velocity \param pdata_image Particle image \param pdata_orientation Particle orientation \param d_pgroup_idx Particle index \param n_pgroup Number of particles in the group \param d_particle_offset Local index of a particle in the body \param d_particle_body Body index of a particle \param d_rigid_orientation Body orientation (quaternion) \param d_rigid_com Body center of mass \param d_rigid_vel Body velocity \param d_rigid_angvel Body angular velocity \param d_rigid_image Body image \param d_rigid_particle_dis Position of a particle in the body frame \param d_rigid_particle_orientation Orientation of a particle in the body frame \param nmax Maximum number of particles per body \param box Box dimensions for periodic boundary condition handling */ template<bool set_x> __global__ void gpu_rigid_setRV_kernel(Scalar4* pdata_pos, Scalar4* pdata_vel, int3* pdata_image, Scalar4* pdata_orientation, unsigned int *d_pgroup_idx, unsigned int n_pgroup, unsigned int *d_particle_offset, unsigned int *d_particle_body, Scalar4* d_rigid_orientation, Scalar4* d_rigid_com, Scalar4* d_rigid_vel, Scalar4* d_rigid_angvel, int3* d_rigid_image, Scalar4* d_rigid_particle_dis, Scalar4* d_rigid_particle_orientation, unsigned int nmax, BoxDim box) { Scalar4 com, vel, angvel, ex_space, ey_space, ez_space; int3 body_image = make_int3(0, 0, 0); int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= n_pgroup) return; unsigned int pidx = d_pgroup_idx[group_idx]; unsigned int idx_body = d_particle_body[pidx]; unsigned int particle_offset = d_particle_offset[pidx]; Scalar4 body_orientation = d_rigid_orientation[idx_body]; com = d_rigid_com[idx_body]; vel = d_rigid_vel[idx_body]; angvel = d_rigid_angvel[idx_body]; if (set_x) { body_image = d_rigid_image[idx_body]; } exyzFromQuaternion(body_orientation, ex_space, ey_space, ez_space); int localidx = idx_body * nmax + particle_offset; Scalar4 particle_pos = d_rigid_particle_dis[localidx]; Scalar4 constituent_orientation = d_rigid_particle_orientation[localidx]; // compute ri with new orientation Scalar3 ri; ri.x = ex_space.x * particle_pos.x + ey_space.x * particle_pos.y + ez_space.x * particle_pos.z; ri.y = ex_space.y * particle_pos.x + ey_space.y * particle_pos.y + ez_space.y * particle_pos.z; ri.z = ex_space.z * particle_pos.x + ey_space.z * particle_pos.y + ez_space.z * particle_pos.z; Scalar3 ppos; int3 image; Scalar4 porientation; if (set_x) { // x_particle = com + ri ppos.x = com.x + ri.x; ppos.y = com.y + ri.y; ppos.z = com.z + ri.z; // time to fix the periodic boundary conditions image = body_image; box.wrap(ppos, image); // update particle orientation quatquat(body_orientation, constituent_orientation, porientation); } // v_particle = vel + angvel x ri Scalar4 pvel = pdata_vel[pidx]; pvel.x = vel.x + angvel.y * ri.z - angvel.z * ri.y; pvel.y = vel.y + angvel.z * ri.x - angvel.x * ri.z; pvel.z = vel.z + angvel.x * ri.y - angvel.y * ri.x; // write out the results if (set_x) { pdata_pos[pidx] = make_scalar4(ppos.x, ppos.y, ppos.z, pdata_pos[pidx].w); pdata_image[pidx] = image; pdata_orientation[pidx] = porientation; } pdata_vel[pidx] = pvel; } #endif // Sets R and v of particles of the rigid body on the GPU /*! \param d_pos array of particle positions \param d_vel array of particle velocities \param d_image array of particle images \param d_body array of particle body ids \param rigid_data Rigid body data \param d_pdata_orientation Particle orientations \param d_group_members Device array listing the indicies of the mebers of the group to integrate (all particles in rigid bodies) \param group_size Number of members in the group \param box Box dimensions for periodic boundary condition handling \param set_x boolean indicating whether the positions are changed or not (first or second step of integration) */ hipError_t gpu_rigid_setRV(Scalar4 *d_pos, Scalar4 *d_vel, int3 *d_image, unsigned int *d_body, const gpu_rigid_data_arrays& rigid_data, Scalar4 *d_pdata_orientation, unsigned int *d_group_members, unsigned int group_size, const BoxDim& box, bool set_x) { assert(d_pos); assert(d_vel); assert(d_pdata_orientation); assert(d_image); assert(d_group_members); assert(rigid_data.particle_offset); assert(d_body); assert(rigid_data.orientation); assert(rigid_data.com); assert(rigid_data.vel); assert(rigid_data.angvel); assert(rigid_data.body_image); assert(rigid_data.particle_pos); assert(rigid_data.particle_orientation); unsigned int nmax = rigid_data.nmax; unsigned int block_size = 192; dim3 particle_grid(group_size/block_size+1, 1, 1); dim3 particle_threads(block_size, 1, 1); if (set_x) hipLaunchKernelGGL(( gpu_rigid_setRV_kernel<true>), dim3(particle_grid), dim3(particle_threads) , 0, 0, d_pos, d_vel, d_image, d_pdata_orientation, d_group_members, group_size, rigid_data.particle_offset, d_body, rigid_data.orientation, rigid_data.com, rigid_data.vel, rigid_data.angvel, rigid_data.body_image, rigid_data.particle_pos, rigid_data.particle_orientation, nmax, box); else hipLaunchKernelGGL(( gpu_rigid_setRV_kernel<false>), dim3(particle_grid), dim3(particle_threads) , 0, 0, d_pos, d_vel, d_image, d_pdata_orientation, d_group_members, group_size, rigid_data.particle_offset, d_body, rigid_data.orientation, rigid_data.com, rigid_data.vel, rigid_data.angvel, rigid_data.body_image, rigid_data.particle_pos, rigid_data.particle_orientation, nmax, box); return hipSuccess; } //! Kernel driven by gpu_compute_virial_correction_end() __global__ void gpu_compute_virial_correction_end_kernel(Scalar *d_net_virial, unsigned int virial_pitch, const Scalar4 *d_net_force, const Scalar4 *d_oldpos, const Scalar4 *d_oldvel, const Scalar4 *d_vel, const unsigned int *d_body, Scalar deltaT, unsigned int N) { unsigned int pidx = blockIdx.x * blockDim.x + threadIdx.x; if (pidx >= N) return; if (d_body[pidx] != NO_BODY) { // calculate the virial from the position and velocity from the previous step Scalar4 old_vel = d_oldvel[pidx]; Scalar4 old_pos = d_oldpos[pidx]; Scalar4 vel = d_vel[pidx]; Scalar mass = vel.w; Scalar4 net_force = d_net_force[pidx]; Scalar3 fc; fc.x = mass * (vel.x - old_vel.x) / deltaT - net_force.x; fc.y = mass * (vel.y - old_vel.y) / deltaT - net_force.y; fc.z = mass * (vel.z - old_vel.z) / deltaT - net_force.z; d_net_virial[0*virial_pitch+pidx] += old_pos.x * fc.x; d_net_virial[1*virial_pitch+pidx] += old_pos.x * fc.y; d_net_virial[2*virial_pitch+pidx] += old_pos.x * fc.z; d_net_virial[3*virial_pitch+pidx] += old_pos.y * fc.y; d_net_virial[4*virial_pitch+pidx] += old_pos.y * fc.z; d_net_virial[5*virial_pitch+pidx] += old_pos.z * fc.z; } } /*! \param d_net_virial Net virial data to update with correction terms \param virial_pitch Pitch of d_net_virial \param d_net_force Net force on each particle \param d_oldpos Old position of particles saved at the start of the step \param d_oldvel Old velocity of particles saved at the start of the step \param d_vel Current velocity of particles at the end of the step \param d_body Body index of each particle \param deltaT Step size \param N number of particles in the box */ hipError_t gpu_compute_virial_correction_end(Scalar *d_net_virial, const unsigned int virial_pitch, const Scalar4 *d_net_force, const Scalar4 *d_oldpos, const Scalar4 *d_oldvel, const Scalar4 *d_vel, const unsigned int *d_body, Scalar deltaT, unsigned int N) { assert(d_net_virial); assert(d_net_force); assert(d_oldpos); assert(d_oldvel); assert(d_vel); unsigned int block_size = 192; dim3 particle_grid(N/block_size+1, 1, 1); dim3 particle_threads(block_size, 1, 1); hipLaunchKernelGGL(( gpu_compute_virial_correction_end_kernel), dim3(particle_grid), dim3(particle_threads), 0, 0, d_net_virial, virial_pitch, d_net_force, d_oldpos, d_oldvel, d_vel, d_body, deltaT, N); return hipSuccess; }
43ffc2762bad9883dd1a2143c114e63d2ae9aed8.cu
/* Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition (HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of the University of Michigan All rights reserved. HOOMD-blue may contain modifications ("Contributions") provided, and to which copyright is held, by various Contributors who have granted The Regents of the University of Michigan the right to modify and/or distribute such Contributions. You may redistribute, use, and create derivate works of HOOMD-blue, in source and binary forms, provided you abide by the following conditions: * Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer both in the code and prominently in any materials provided with the distribution. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. * All publications and presentations based on HOOMD-blue, including any reports or published results obtained, in whole or in part, with HOOMD-blue, will acknowledge its use according to the terms posted at the time of submission on: http://codeblue.umich.edu/hoomd-blue/citations.html * Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website: http://codeblue.umich.edu/hoomd-blue/ * Apart from the above required attributions, neither the name of the copyright holder nor the names of HOOMD-blue's contributors may be used to endorse or promote products derived from this software without specific prior written permission. Disclaimer THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Maintainer: ndtrung #include "RigidData.cuh" #ifdef WIN32 #include <cassert> #else #include <assert.h> #endif /*! \file RigidData.cu */ #ifdef NVCC //! Kernel for seting R and V of rigid body particles /*! \param pdata_pos Particle position \param pdata_vel Particle velocity \param pdata_image Particle image \param pdata_orientation Particle orientation \param d_pgroup_idx Particle index \param n_pgroup Number of particles in the group \param d_particle_offset Local index of a particle in the body \param d_particle_body Body index of a particle \param d_rigid_orientation Body orientation (quaternion) \param d_rigid_com Body center of mass \param d_rigid_vel Body velocity \param d_rigid_angvel Body angular velocity \param d_rigid_image Body image \param d_rigid_particle_dis Position of a particle in the body frame \param d_rigid_particle_orientation Orientation of a particle in the body frame \param nmax Maximum number of particles per body \param box Box dimensions for periodic boundary condition handling */ template<bool set_x> __global__ void gpu_rigid_setRV_kernel(Scalar4* pdata_pos, Scalar4* pdata_vel, int3* pdata_image, Scalar4* pdata_orientation, unsigned int *d_pgroup_idx, unsigned int n_pgroup, unsigned int *d_particle_offset, unsigned int *d_particle_body, Scalar4* d_rigid_orientation, Scalar4* d_rigid_com, Scalar4* d_rigid_vel, Scalar4* d_rigid_angvel, int3* d_rigid_image, Scalar4* d_rigid_particle_dis, Scalar4* d_rigid_particle_orientation, unsigned int nmax, BoxDim box) { Scalar4 com, vel, angvel, ex_space, ey_space, ez_space; int3 body_image = make_int3(0, 0, 0); int group_idx = blockIdx.x * blockDim.x + threadIdx.x; if (group_idx >= n_pgroup) return; unsigned int pidx = d_pgroup_idx[group_idx]; unsigned int idx_body = d_particle_body[pidx]; unsigned int particle_offset = d_particle_offset[pidx]; Scalar4 body_orientation = d_rigid_orientation[idx_body]; com = d_rigid_com[idx_body]; vel = d_rigid_vel[idx_body]; angvel = d_rigid_angvel[idx_body]; if (set_x) { body_image = d_rigid_image[idx_body]; } exyzFromQuaternion(body_orientation, ex_space, ey_space, ez_space); int localidx = idx_body * nmax + particle_offset; Scalar4 particle_pos = d_rigid_particle_dis[localidx]; Scalar4 constituent_orientation = d_rigid_particle_orientation[localidx]; // compute ri with new orientation Scalar3 ri; ri.x = ex_space.x * particle_pos.x + ey_space.x * particle_pos.y + ez_space.x * particle_pos.z; ri.y = ex_space.y * particle_pos.x + ey_space.y * particle_pos.y + ez_space.y * particle_pos.z; ri.z = ex_space.z * particle_pos.x + ey_space.z * particle_pos.y + ez_space.z * particle_pos.z; Scalar3 ppos; int3 image; Scalar4 porientation; if (set_x) { // x_particle = com + ri ppos.x = com.x + ri.x; ppos.y = com.y + ri.y; ppos.z = com.z + ri.z; // time to fix the periodic boundary conditions image = body_image; box.wrap(ppos, image); // update particle orientation quatquat(body_orientation, constituent_orientation, porientation); } // v_particle = vel + angvel x ri Scalar4 pvel = pdata_vel[pidx]; pvel.x = vel.x + angvel.y * ri.z - angvel.z * ri.y; pvel.y = vel.y + angvel.z * ri.x - angvel.x * ri.z; pvel.z = vel.z + angvel.x * ri.y - angvel.y * ri.x; // write out the results if (set_x) { pdata_pos[pidx] = make_scalar4(ppos.x, ppos.y, ppos.z, pdata_pos[pidx].w); pdata_image[pidx] = image; pdata_orientation[pidx] = porientation; } pdata_vel[pidx] = pvel; } #endif // Sets R and v of particles of the rigid body on the GPU /*! \param d_pos array of particle positions \param d_vel array of particle velocities \param d_image array of particle images \param d_body array of particle body ids \param rigid_data Rigid body data \param d_pdata_orientation Particle orientations \param d_group_members Device array listing the indicies of the mebers of the group to integrate (all particles in rigid bodies) \param group_size Number of members in the group \param box Box dimensions for periodic boundary condition handling \param set_x boolean indicating whether the positions are changed or not (first or second step of integration) */ cudaError_t gpu_rigid_setRV(Scalar4 *d_pos, Scalar4 *d_vel, int3 *d_image, unsigned int *d_body, const gpu_rigid_data_arrays& rigid_data, Scalar4 *d_pdata_orientation, unsigned int *d_group_members, unsigned int group_size, const BoxDim& box, bool set_x) { assert(d_pos); assert(d_vel); assert(d_pdata_orientation); assert(d_image); assert(d_group_members); assert(rigid_data.particle_offset); assert(d_body); assert(rigid_data.orientation); assert(rigid_data.com); assert(rigid_data.vel); assert(rigid_data.angvel); assert(rigid_data.body_image); assert(rigid_data.particle_pos); assert(rigid_data.particle_orientation); unsigned int nmax = rigid_data.nmax; unsigned int block_size = 192; dim3 particle_grid(group_size/block_size+1, 1, 1); dim3 particle_threads(block_size, 1, 1); if (set_x) gpu_rigid_setRV_kernel<true><<< particle_grid, particle_threads >>>(d_pos, d_vel, d_image, d_pdata_orientation, d_group_members, group_size, rigid_data.particle_offset, d_body, rigid_data.orientation, rigid_data.com, rigid_data.vel, rigid_data.angvel, rigid_data.body_image, rigid_data.particle_pos, rigid_data.particle_orientation, nmax, box); else gpu_rigid_setRV_kernel<false><<< particle_grid, particle_threads >>>(d_pos, d_vel, d_image, d_pdata_orientation, d_group_members, group_size, rigid_data.particle_offset, d_body, rigid_data.orientation, rigid_data.com, rigid_data.vel, rigid_data.angvel, rigid_data.body_image, rigid_data.particle_pos, rigid_data.particle_orientation, nmax, box); return cudaSuccess; } //! Kernel driven by gpu_compute_virial_correction_end() __global__ void gpu_compute_virial_correction_end_kernel(Scalar *d_net_virial, unsigned int virial_pitch, const Scalar4 *d_net_force, const Scalar4 *d_oldpos, const Scalar4 *d_oldvel, const Scalar4 *d_vel, const unsigned int *d_body, Scalar deltaT, unsigned int N) { unsigned int pidx = blockIdx.x * blockDim.x + threadIdx.x; if (pidx >= N) return; if (d_body[pidx] != NO_BODY) { // calculate the virial from the position and velocity from the previous step Scalar4 old_vel = d_oldvel[pidx]; Scalar4 old_pos = d_oldpos[pidx]; Scalar4 vel = d_vel[pidx]; Scalar mass = vel.w; Scalar4 net_force = d_net_force[pidx]; Scalar3 fc; fc.x = mass * (vel.x - old_vel.x) / deltaT - net_force.x; fc.y = mass * (vel.y - old_vel.y) / deltaT - net_force.y; fc.z = mass * (vel.z - old_vel.z) / deltaT - net_force.z; d_net_virial[0*virial_pitch+pidx] += old_pos.x * fc.x; d_net_virial[1*virial_pitch+pidx] += old_pos.x * fc.y; d_net_virial[2*virial_pitch+pidx] += old_pos.x * fc.z; d_net_virial[3*virial_pitch+pidx] += old_pos.y * fc.y; d_net_virial[4*virial_pitch+pidx] += old_pos.y * fc.z; d_net_virial[5*virial_pitch+pidx] += old_pos.z * fc.z; } } /*! \param d_net_virial Net virial data to update with correction terms \param virial_pitch Pitch of d_net_virial \param d_net_force Net force on each particle \param d_oldpos Old position of particles saved at the start of the step \param d_oldvel Old velocity of particles saved at the start of the step \param d_vel Current velocity of particles at the end of the step \param d_body Body index of each particle \param deltaT Step size \param N number of particles in the box */ cudaError_t gpu_compute_virial_correction_end(Scalar *d_net_virial, const unsigned int virial_pitch, const Scalar4 *d_net_force, const Scalar4 *d_oldpos, const Scalar4 *d_oldvel, const Scalar4 *d_vel, const unsigned int *d_body, Scalar deltaT, unsigned int N) { assert(d_net_virial); assert(d_net_force); assert(d_oldpos); assert(d_oldvel); assert(d_vel); unsigned int block_size = 192; dim3 particle_grid(N/block_size+1, 1, 1); dim3 particle_threads(block_size, 1, 1); gpu_compute_virial_correction_end_kernel<<<particle_grid, particle_threads>>>(d_net_virial, virial_pitch, d_net_force, d_oldpos, d_oldvel, d_vel, d_body, deltaT, N); return cudaSuccess; }
3f5d80f11162771b9bca9276d3203992a84a9ddc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_kernels.hpp" __global__ void restore_kernel(uchar* ret, const float* Yst, const float* mask, const uchar* Xt, const float* trans, const int H, const int W, const int h, const int w) { int tid = threadIdx.x + blockIdx.x*blockDim.x; int offset = blockDim.x * gridDim.x; while (tid < h * w) { int x = tid % w; int y = tid / w; float sx = x*trans[0] + y*trans[1] + trans[2]; float sy = x*trans[3] + y*trans[4] + trans[5]; if (sx < 0 || sy < 0 || sx >= W-1 || sy >= H-1){ ret[tid*3+0] = Xt[tid*3+0]; ret[tid*3+1] = Xt[tid*3+1]; ret[tid*3+2] = Xt[tid*3+2]; tid += offset; continue; } float xp = sx - (int)sx; float yp = sy - (int)sy; float color[3] = {0}; for(int i=0;i<3;i++){ float v = 0; float a = Yst[i*H*W + int(sy)*W + (int)(sx)]; float b = Yst[i*H*W + int(sy)*W + (int)(sx+1)]; float c = Yst[i*H*W + int(sy+1)*W + (int)(sx)]; float d = Yst[i*H*W + int(sy+1)*W + (int)(sx+1)]; float x1 = a + (b-a)*xp; float x2 = c + (d-c)*xp; v = x1 + (x2-x1)*yp; color[i] = v * 0.5 + 0.5; } float alpha = 0; { float a = mask[(int)(sy)*W + (int)(sx)]; float b = mask[(int)(sy)*W + (int)(sx+1)]; float c = mask[(int)(sy+1)*W + (int)(sx)]; float d = mask[(int)(sy+1)*W + (int)(sx+1)]; float x1 = a + (b-a)*xp; float x2 = c + (d-c)*xp; alpha = x1 + (x2-x1)*yp; } for(int i=0;i<3;i++){ float c = color[i]*255*alpha + Xt[tid*3+(i)]*(1-alpha); c = c < 0 ? 0 : c; c = c > 255 ? 255 : c; ret[tid*3+i] = c; } tid += offset; } } void restore_image(uchar* ret, const float* Yst, const float* mask, const uchar* Xt, const float* trans, const int H, const int W, const int h, const int w) { hipLaunchKernelGGL(( restore_kernel), dim3(1000), dim3(64), 0, 0, ret, Yst, mask, Xt, trans, H, W, h, w); hipDeviceSynchronize(); }
3f5d80f11162771b9bca9276d3203992a84a9ddc.cu
#include "cuda_kernels.hpp" __global__ void restore_kernel(uchar* ret, const float* Yst, const float* mask, const uchar* Xt, const float* trans, const int H, const int W, const int h, const int w) { int tid = threadIdx.x + blockIdx.x*blockDim.x; int offset = blockDim.x * gridDim.x; while (tid < h * w) { int x = tid % w; int y = tid / w; float sx = x*trans[0] + y*trans[1] + trans[2]; float sy = x*trans[3] + y*trans[4] + trans[5]; if (sx < 0 || sy < 0 || sx >= W-1 || sy >= H-1){ ret[tid*3+0] = Xt[tid*3+0]; ret[tid*3+1] = Xt[tid*3+1]; ret[tid*3+2] = Xt[tid*3+2]; tid += offset; continue; } float xp = sx - (int)sx; float yp = sy - (int)sy; float color[3] = {0}; for(int i=0;i<3;i++){ float v = 0; float a = Yst[i*H*W + int(sy)*W + (int)(sx)]; float b = Yst[i*H*W + int(sy)*W + (int)(sx+1)]; float c = Yst[i*H*W + int(sy+1)*W + (int)(sx)]; float d = Yst[i*H*W + int(sy+1)*W + (int)(sx+1)]; float x1 = a + (b-a)*xp; float x2 = c + (d-c)*xp; v = x1 + (x2-x1)*yp; color[i] = v * 0.5 + 0.5; } float alpha = 0; { float a = mask[(int)(sy)*W + (int)(sx)]; float b = mask[(int)(sy)*W + (int)(sx+1)]; float c = mask[(int)(sy+1)*W + (int)(sx)]; float d = mask[(int)(sy+1)*W + (int)(sx+1)]; float x1 = a + (b-a)*xp; float x2 = c + (d-c)*xp; alpha = x1 + (x2-x1)*yp; } for(int i=0;i<3;i++){ float c = color[i]*255*alpha + Xt[tid*3+(i)]*(1-alpha); c = c < 0 ? 0 : c; c = c > 255 ? 255 : c; ret[tid*3+i] = c; } tid += offset; } } void restore_image(uchar* ret, const float* Yst, const float* mask, const uchar* Xt, const float* trans, const int H, const int W, const int h, const int w) { restore_kernel<<<1000, 64>>>(ret, Yst, mask, Xt, trans, H, W, h, w); cudaThreadSynchronize(); }
ac2e03d4518ad6c1cc5ce0ae141acafdd8e419a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ static void update_e(int objs,double* e,double* kval,double b_old,double b_new,int i,int j,int yi,int yj,double ai_old,double ai_new,double aj_old,double aj_new){ int id=blockDim.x * blockIdx.x + threadIdx.x; if (id<objs){ double val=e[id]; val+=(b_new-b_old); double ti=yi*kval[i*objs+id]; double tj=yj*kval[j*objs+id]; val += ti*(ai_new-ai_old); val += tj*(aj_new-aj_old); e[id]=val; } }
ac2e03d4518ad6c1cc5ce0ae141acafdd8e419a1.cu
#include "includes.h" __global__ static void update_e(int objs,double* e,double* kval,double b_old,double b_new,int i,int j,int yi,int yj,double ai_old,double ai_new,double aj_old,double aj_new){ int id=blockDim.x * blockIdx.x + threadIdx.x; if (id<objs){ double val=e[id]; val+=(b_new-b_old); double ti=yi*kval[i*objs+id]; double tj=yj*kval[j*objs+id]; val += ti*(ai_new-ai_old); val += tj*(aj_new-aj_old); e[id]=val; } }
6f275f6c0da44f99a1a398cd48e418f986ec5adb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <iostream> #include <fstream> #include <time.h> #include <thrust/device_vector.h> #include <math.h> #define BLOCK_SIZE 16 #define TILE_DIM 16 __global__ void MV(int* A, int* B, int* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) { int CValue = 0; int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ int As[TILE_DIM][TILE_DIM]; __shared__ int Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x]; else As[threadIdx.y][threadIdx.x] = 0; if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else Bs[threadIdx.y][threadIdx.x] = 0; __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) + (blockIdx.x * blockDim.x)+ threadIdx.x] = CValue; } int main(int argc, char const *argv[]) { // number of instances of data generated int NUM = 500; std::ofstream ofile; // change here to customize output filename ofile.open("matrix_vector_gpu_500_points_Tesla_2.csv"); for (int iterator = 0; iterator < NUM; iterator++) { if (iterator % 10 == 0) std::cout << "iter: " << iterator << std::endl; // size int m, n, k; m = rand() % 1024 + 1; n = rand() % 1024 + 1; k = 1; // density int power1; double d; power1 = rand()%int((log2(double(m*n))+1)); d = 1/pow(2,power1); // [m*n] * [n*1] // allocate memory in host RAM int *h_a, *h_b, *h_c; hipHostMalloc((void **) &h_a, sizeof(int) * m * n); hipHostMalloc((void **) &h_b, sizeof(int) * n * k); hipHostMalloc((void **) &h_c, sizeof(int) * m * k); // initialize matrix A // if A is a sparse matrix if (d <= 0.5){ int count_a = m * n * d; for (int it = 0; it < count_a; it++){ // approximation int i = rand() % m; int j = rand() % n; h_a[i*n+j] = rand() % 1024 + 1; } } // if A is a dense matrix else{ for (int i = 0; i < m; i++){ for (int j = 0; j < n; j++){ h_a[i*n+j] = rand() % 1024 + 1; } } } // random initialize vector B int count_b = n; for (int it = 0; it < count_b; it++){ h_b[it] = rand() % 1024 + 1; } // Allocate memory space on the device int *d_a, *d_b, *d_c; hipMalloc((void **) &d_a, sizeof(int) * m * n); hipMalloc((void **) &d_b, sizeof(int) * n * k); hipMalloc((void **) &d_c, sizeof(int) * m * k); hipMemcpy(d_a, h_a, sizeof(int) * m * n, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, sizeof(int) * n * k, hipMemcpyHostToDevice); float gpu_elapsed_time_ms; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); hipEventRecord(start, 0); // launch kernel MV << < dimGrid, dimBlock >> > (d_a, d_b, d_c, m, n, n, k, m, k); hipMemcpy(h_c, d_c, sizeof(int) * m * k, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&gpu_elapsed_time_ms, start, stop); int c = m*n; ofile << gpu_elapsed_time_ms/1000; ofile << "," << m << "," << n << ","; ofile << d << "," << c << ",\n"; // free memory hipFree(d_a); hipFree(d_b); hipFree(d_c); hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c); } ofile.close(); return 0; }
6f275f6c0da44f99a1a398cd48e418f986ec5adb.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <iostream> #include <fstream> #include <time.h> #include <thrust/device_vector.h> #include <math.h> #define BLOCK_SIZE 16 #define TILE_DIM 16 __global__ void MV(int* A, int* B, int* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) { int CValue = 0; int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ int As[TILE_DIM][TILE_DIM]; __shared__ int Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x]; else As[threadIdx.y][threadIdx.x] = 0; if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col]; else Bs[threadIdx.y][threadIdx.x] = 0; __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } if (Row < CRows && Col < CCols) C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) + (blockIdx.x * blockDim.x)+ threadIdx.x] = CValue; } int main(int argc, char const *argv[]) { // number of instances of data generated int NUM = 500; std::ofstream ofile; // change here to customize output filename ofile.open("matrix_vector_gpu_500_points_Tesla_2.csv"); for (int iterator = 0; iterator < NUM; iterator++) { if (iterator % 10 == 0) std::cout << "iter: " << iterator << std::endl; // size int m, n, k; m = rand() % 1024 + 1; n = rand() % 1024 + 1; k = 1; // density int power1; double d; power1 = rand()%int((log2(double(m*n))+1)); d = 1/pow(2,power1); // [m*n] * [n*1] // allocate memory in host RAM int *h_a, *h_b, *h_c; cudaMallocHost((void **) &h_a, sizeof(int) * m * n); cudaMallocHost((void **) &h_b, sizeof(int) * n * k); cudaMallocHost((void **) &h_c, sizeof(int) * m * k); // initialize matrix A // if A is a sparse matrix if (d <= 0.5){ int count_a = m * n * d; for (int it = 0; it < count_a; it++){ // approximation int i = rand() % m; int j = rand() % n; h_a[i*n+j] = rand() % 1024 + 1; } } // if A is a dense matrix else{ for (int i = 0; i < m; i++){ for (int j = 0; j < n; j++){ h_a[i*n+j] = rand() % 1024 + 1; } } } // random initialize vector B int count_b = n; for (int it = 0; it < count_b; it++){ h_b[it] = rand() % 1024 + 1; } // Allocate memory space on the device int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int) * m * n); cudaMalloc((void **) &d_b, sizeof(int) * n * k); cudaMalloc((void **) &d_c, sizeof(int) * m * k); cudaMemcpy(d_a, h_a, sizeof(int) * m * n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int) * n * k, cudaMemcpyHostToDevice); float gpu_elapsed_time_ms; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE; unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); cudaEventRecord(start, 0); // launch kernel MV << < dimGrid, dimBlock >> > (d_a, d_b, d_c, m, n, n, k, m, k); cudaMemcpy(h_c, d_c, sizeof(int) * m * k, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); int c = m*n; ofile << gpu_elapsed_time_ms/1000; ofile << "," << m << "," << n << ","; ofile << d << "," << c << ",\n"; // free memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); } ofile.close(); return 0; }
f3112a404286f7f494cafb48838897c252ee888f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define __CUDA #include <cstdio> // #include <iostream> //#include <cutil.h> #include <helper_cuda.h> /* #include <helper_cuda_drvapi.h> #include <helper_cuda_gl.h> #include <helper_functions.h> #include <helper_image.h> #include <helper_math.h> #include <helper_string.h> #include <helper_timer.h> */ #include "hermite6-gpu.h" #define NTHREADS 128 //#define NJBLOCKS 16 // GF 8800 GTS 512 //#define NJBLOCKS_ 16 //#define NJBLOCKS 30 // GF 280 GTX //#define NJBLOCKS_ 32 #define NJBLOCKS 30 // GF 480 GTX #define NJBLOCKS_ 32 #define NREDUCE (NTHREADS/NJBLOCKS_) #define NIBLOCKS 32 #define NIMAX (NTHREADS * NIBLOCKS) // 2048 #define GPU_REDUCE struct Force_dev{ float2 acc[3]; // 6 // float2 pot; float jrk[3]; // 9 // float pad0; float snp[3]; // 12 // float pad1; __device__ Force_dev(){ // acc[0] = acc[1] = acc[2] = pot = make_float2(0.f, 0.f); acc[0] = acc[1] = acc[2] = make_float2(0.f, 0.f); jrk[0] = jrk[1] = jrk[2] = snp[0] = snp[1] = snp[2] = 0.f; } }; __device__ void force_reduce(Force_dev &fl, Force_dev &fr){ #pragma unroll for(int k=0; k<3; k++){ fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].x); fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].y); fl.jrk[k] += fr.jrk[k]; fl.snp[k] += fr.snp[k]; } // fl.pot = float2_accum(fl.pot, fr.pot.x); // fl.pot = float2_accum(fl.pot, fr.pot.y); } __device__ void h6_kernel( const Predictor &ip, const Predictor &jp, Force_dev &fo, float eps2){ #if 0 float dx = float2_sub(jp.pos[0], ip.pos[0]); float dy = float2_sub(jp.pos[1], ip.pos[1]); float dz = float2_sub(jp.pos[2], ip.pos[2]); float dvx = jp.vel[0] - ip.vel[0]; float dvy = jp.vel[1] - ip.vel[1]; float dvz = jp.vel[2] - ip.vel[2]; float dax = jp.acc[0] - ip.acc[0]; float day = jp.acc[1] - ip.acc[1]; float daz = jp.acc[2] - ip.acc[2]; #else float dx = (jp.posH.x - ip.posH.x) + (jp.posL.x - ip.posL.x); float dy = (jp.posH.y - ip.posH.y) + (jp.posL.y - ip.posL.y); float dz = (jp.posH.z - ip.posH.z) + (jp.posL.z - ip.posL.z); float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float dax = jp.acc.x - ip.acc.x; float day = jp.acc.y - ip.acc.y; float daz = jp.acc.z - ip.acc.z; #endif float r2 = eps2 + dx*dx + dy*dy + dz*dz; float drdv = dx*dvx + dy*dvy + dz*dvz; float dvdv = dvx*dvx + dvy*dvy + dvz*dvz; float drda = dx*dax + dy*day + dz*daz; float rinv1 = rsqrtf(r2); float rinv2 = rinv1 * rinv1; float alpha = (drdv)*rinv2; float beta = (dvdv + drda)*rinv2 + alpha*alpha; // rinv1 *= jp.mass; rinv1 *= jp.posH.w; float rinv3 = rinv1 * rinv2; // float pot = rinv1; float ax = rinv3*dx; float ay = rinv3*dy; float az = rinv3*dz; float jx = rinv3*dvx + (-3.f*alpha)*ax; float jy = rinv3*dvy + (-3.f*alpha)*ay; float jz = rinv3*dvz + (-3.f*alpha)*az; float sx = rinv3*dax + (-6.f*alpha)*jx + (-3.f*beta)*ax; float sy = rinv3*day + (-6.f*alpha)*jy + (-3.f*beta)*ay; float sz = rinv3*daz + (-6.f*alpha)*jz + (-3.f*beta)*az; #if 0 if(r2 != eps2){ fo.pot = float2_accum(fo.pot, pot); } #endif fo.acc[0] = float2_accum(fo.acc[0], ax); fo.acc[1] = float2_accum(fo.acc[1], ay); fo.acc[2] = float2_accum(fo.acc[2], az); fo.jrk[0] += jx; fo.jrk[1] += jy; fo.jrk[2] += jz; fo.snp[0] += sx; fo.snp[1] += sy; fo.snp[2] += sz; } __global__ void h6_gravity( int ni, int nj, Predictor ipred[], Predictor jpred[], Force_dev force[][NJBLOCKS_], float eps2){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + NTHREADS * ibid; int jstart = (nj * (jbid )) / NJBLOCKS; int jend = (nj * (jbid+1)) / NJBLOCKS; // small kernel opt int nskip = 1; int niloc = ni - NTHREADS * ibid; if(niloc <= NTHREADS/2) nskip = 2; if(niloc <= NTHREADS/4) nskip = 4; if(niloc <= NTHREADS/8) nskip = 8; if(niloc <= NTHREADS/16) nskip = 16; if(niloc <= NTHREADS/32) nskip = 32; int joff = tid / (NTHREADS/nskip); __shared__ Predictor jpshare[NTHREADS]; Force_dev fo; Predictor ip = ipred[tid % (NTHREADS/nskip) + NTHREADS * ibid]; for(int j=jstart; j<jend; j+=NTHREADS){ __syncthreads(); #if 0 jpshare[tid] = jpred[j+tid]; #else float4 *src = (float4 *)&jpred[j]; float4 *dst = (float4 *)jpshare; for(int it=0; it<sizeof(Predictor)/sizeof(float4); it++){ dst[tid] = src[tid]; dst += NTHREADS; src += NTHREADS; } #endif __syncthreads(); if(jend-j < NTHREADS){ for(int jj=0; jj<jend-j; jj+=nskip){ Predictor &jp = jpshare[jj+joff]; if(jj+joff < jend-j) h6_kernel(ip, jp, fo, eps2); } }else{ #if 0 #pragma unroll for(int jj=0; jj<NTHREADS; jj+=nskip){ Predictor &jp = jpshare[jj+joff]; h6_kernel(ip, jp, fo, eps2); } #else for(int jj=0; jj<NTHREADS; jj+=4*nskip){ Predictor &jp0 = jpshare[0*nskip+jj+joff]; Predictor &jp1 = jpshare[1*nskip+jj+joff]; Predictor &jp2 = jpshare[2*nskip+jj+joff]; Predictor &jp3 = jpshare[3*nskip+jj+joff]; h6_kernel(ip, jp0, fo, eps2); h6_kernel(ip, jp1, fo, eps2); h6_kernel(ip, jp2, fo, eps2); h6_kernel(ip, jp3, fo, eps2); } #endif } } // horizontal reduce // __shared__ Force_dev foshare[NTHREADS]; Force_dev *foshare = (Force_dev *)jpshare; __syncthreads(); foshare[tid] = fo; __syncthreads(); if(nskip > 1){ if(tid < NTHREADS/2){ force_reduce(foshare[tid], foshare[tid + NTHREADS/2]); } __syncthreads(); } if(nskip > 2){ if(tid < NTHREADS/4){ force_reduce(foshare[tid], foshare[tid + NTHREADS/4]); } __syncthreads(); } if(nskip > 4){ if(tid < NTHREADS/8){ force_reduce(foshare[tid], foshare[tid + NTHREADS/8]); } __syncthreads(); } if(nskip > 8){ if(tid < NTHREADS/16){ force_reduce(foshare[tid], foshare[tid + NTHREADS/16]); } __syncthreads(); } if(nskip > 16){ if(tid < NTHREADS/32){ force_reduce(foshare[tid], foshare[tid + NTHREADS/32]); } __syncthreads(); } // store if(tid < niloc){ fo = foshare[tid]; force[iaddr][jbid] = fo; } } #ifdef GPU_REDUCE __global__ void reduce_kernel( Force_dev fo_dev[][NJBLOCKS_], Force_dev fo_reduce[]) { int bid = blockIdx.x; int tid = threadIdx.x; int ioff = bid * NREDUCE; #if 0 __shared__ Force_dev fo_share[NTHREADS]; #else __shared__ Predictor jpshare[NTHREADS]; Force_dev *fo_share = (Force_dev *)jpshare; #endif #if 0 fo_share[tid] = fo_dev[ioff][tid]; #else float4 *src = (float4 *)fo_dev[ioff]; float4 *dst = (float4 *)fo_share; for(int it=0; it<sizeof(Force_dev)/sizeof(float4); it++){ dst[tid] = src[tid]; dst += NTHREADS; src += NTHREADS; } #endif __syncthreads(); int n = NJBLOCKS_; while(n > 1){ n /= 2; if(tid % NJBLOCKS_ < n){ force_reduce(fo_share[tid], fo_share[tid + n]); } // __syncthreads(); } // Force_dev fotmp = fo_share[tid]; __syncthreads(); if(tid % NJBLOCKS_ == 0){ // fo_reduce[ioff + tid / NJBLOCKS_] = fo_share[tid]; fo_share[tid / NJBLOCKS_] = fo_share[tid]; // fo_share[tid / NJBLOCKS_] = fotmp; } __syncthreads(); #if 0 if(tid < NREDUCE){ fo_reduce[ioff + tid] = fo_share[tid]; } #else if(tid < NREDUCE * sizeof(Force_dev) / sizeof(float)){ // (tid < 96) float *dst = (float *)&fo_reduce[ioff]; float *src = (float *)fo_share; dst[tid] = src[tid]; } #endif } #endif extern double wtime(); void calc_force( int nitot, int nj, float eps2, Predictor ipred[], Predictor jpred[], Force force[], double &t1, double &t_send, double &t_recv){ static Predictor *jp_dev = NULL; static Predictor *ip_dev = NULL; static Force_dev (*fo_dev)[NJBLOCKS_] = NULL; #ifdef GPU_REDUCE static Force_dev (*fo_reduce) = NULL; static Force_dev (*fo_host) = NULL; #else static Force_dev (*fo_host)[NJBLOCKS_] = NULL; #endif if(jp_dev == NULL){ // first call /* const int dev = 0; checkCudaErrors(hipSetDevice(dev)); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); printf("GPU: %s\n", deviceProp.name); */ hipMalloc((void **)&jp_dev, (nj + NTHREADS) * sizeof(Predictor)); hipMalloc((void **)&ip_dev, NIMAX * sizeof(Predictor)); hipMalloc((void **)&fo_dev, NIMAX * sizeof(*fo_dev)); hipMemset(fo_dev, 0, NIMAX * sizeof(*fo_dev)); #ifdef GPU_REDUCE hipMalloc((void **)&fo_reduce, NIMAX * sizeof(*fo_reduce)); #endif hipHostMalloc((void **)&fo_host, NIMAX * sizeof(*fo_host)); } hipMemcpy(jp_dev, jpred, nj * sizeof(Predictor), hipMemcpyHostToDevice); t1 = wtime(); int nimax = NIMAX; for(int ioff=0; ioff<nitot; ioff+=nimax){ int ni = ::min(nimax, nitot-ioff); double t2 = wtime(); hipMemcpy(ip_dev, ipred+ioff, ni * sizeof(Predictor), hipMemcpyHostToDevice); double t3 = wtime(); t_send += t3 - t2; // kernel call int niblocks = 1 + (ni-1) / NTHREADS; dim3 grid(niblocks, NJBLOCKS, 1); dim3 threads(NTHREADS, 1, 1); // std::cerr << "call h6_gravity " << niblocks << std::endl; //int sharedMemSize = NTHREADS * sizeof(Predictor); // h6_gravity <<< grid, threads, sharedMemSize >>> // (ni, nj, ip_dev, jp_dev, fo_dev, eps2); hipLaunchKernelGGL(( h6_gravity) , dim3(grid), dim3(threads) , 0, 0, ni, nj, ip_dev, jp_dev, fo_dev, eps2); // CUDA_SAFE_THREAD_SYNC(); hipDeviceSynchronize(); #ifdef GPU_REDUCE dim3 grid_reduce(1 + (ni-1)/NREDUCE, 1, 1); hipLaunchKernelGGL(( reduce_kernel) , dim3(grid_reduce), dim3(threads) , 0, 0, fo_dev, fo_reduce); // hipDeviceSynchronize(); // CUDA_SAFE_THREAD_SYNC(); hipDeviceSynchronize(); double t4 = wtime(); hipMemcpy(fo_host, fo_reduce, ni * sizeof(*fo_reduce), hipMemcpyDeviceToHost); double t5 = wtime(); t_recv += t5 - t4; for(int i=0; i<ni; i++){ Force f; // 0 flashed by the constructer Force_dev &fo = fo_host[i]; f.acc.x = float2_reduce(fo.acc[0]); f.acc.y = float2_reduce(fo.acc[1]); f.acc.z = float2_reduce(fo.acc[2]); // f.pot = float2_reduce(fo.pot); f.jrk.x = fo.jrk[0]; f.jrk.y = fo.jrk[1]; f.jrk.z = fo.jrk[2]; f.snp.x = fo.snp[0]; f.snp.y = fo.snp[1]; f.snp.z = fo.snp[2]; force[ioff + i] = f; } #else hipMemcpy(fo_host, fo_dev, ni * sizeof(*fo_dev), hipMemcpyDeviceToHost); // std::cerr << "done" << std::endl; for(int i=0; i<ni; i++){ Force f; // 0 flashed by the constructer for(int jb=0; jb<NJBLOCKS; jb++){ Force_dev &fo = fo_host[i][jb]; f.acc.x += float2_reduce(fo.acc[0]); f.acc.y += float2_reduce(fo.acc[1]); f.acc.z += float2_reduce(fo.acc[2]); f.pot -= float2_reduce(fo.pot); f.jrk.x += fo.jrk[0]; f.jrk.y += fo.jrk[1]; f.jrk.z += fo.jrk[2]; f.snp.x += fo.snp[0]; f.snp.y += fo.snp[1]; f.snp.z += fo.snp[2]; } force[ioff + i] = f; } #endif } } __global__ void pot_kernel( int js, int je, float eps2, Posm posm[], float2 pot[]){ int bid = blockIdx.x; int tid = threadIdx.x; int iaddr = tid + NTHREADS * bid; Posm ip = posm[iaddr]; float2 poti = make_float2(0.f, 0.f); for(int j=js; j<je; j+=NTHREADS){ __shared__ Posm posmshare[NTHREADS]; __syncthreads(); posmshare[tid] = posm[j + tid]; __syncthreads(); int njj = NTHREADS < je-j ? NTHREADS : je-j; for(int jj=0; jj< njj; jj++){ Posm &jp = posmshare[jj]; float dx = float2_sub(jp.pos[0], ip.pos[0]); float dy = float2_sub(jp.pos[1], ip.pos[1]); float dz = float2_sub(jp.pos[2], ip.pos[2]); float r2 = eps2 + dx*dx + dy*dy + dz*dz; float mrinv = jp.mass * rsqrtf(r2); if(r2 > eps2) poti = float2_accum(poti, mrinv); } } pot[iaddr] = poti; } void calc_pot( int ni, int js, int je, float eps2, Posm posm[], double dpot[]){ Posm *posm_dev; float2 *pot, *pot_dev; hipMalloc((void **)&posm_dev, (ni+NTHREADS) * sizeof(Posm)); hipMalloc((void **)&pot_dev, (ni+NTHREADS) * sizeof(float2)); hipHostMalloc((void **)&pot, (ni+NTHREADS) * sizeof(float2)); hipMemcpy(posm_dev, posm, ni * sizeof(Posm), hipMemcpyHostToDevice); int nblocks = 1 + (ni-1) / NTHREADS; dim3 grid(nblocks, 1, 1); dim3 threads(NTHREADS, 1, 1); int sharedMemSize = NTHREADS * sizeof(Posm); hipLaunchKernelGGL(( pot_kernel) , dim3(grid), dim3(threads), sharedMemSize , 0, js, je, eps2, posm_dev, pot_dev); // CUDA_SAFE_THREAD_SYNC(); hipDeviceSynchronize(); hipMemcpy(pot, pot_dev, ni * sizeof(float2), hipMemcpyDeviceToHost); for(int i=0; i<ni; i++){ dpot[i] = -float2_reduce(pot[i]); } hipFree(posm_dev); hipFree(pot_dev); hipHostFree(pot); } void CUDA_MPI_Init(int myRank){ int numGPU; checkCudaErrors(hipGetDeviceCount(&numGPU)); const int dev = myRank % numGPU; checkCudaErrors(hipSetDevice(dev)); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); printf("Rank %02d : GPU %d : %s\n", myRank, dev, deviceProp.name); hipFuncSetCacheConfig(h6_gravity, hipFuncCachePreferShared); hipFuncSetCacheConfig(reduce_kernel, hipFuncCachePreferShared); hipFuncSetCacheConfig(pot_kernel, hipFuncCachePreferShared); }
f3112a404286f7f494cafb48838897c252ee888f.cu
#define __CUDA #include <cstdio> // #include <iostream> //#include <cutil.h> #include <helper_cuda.h> /* #include <helper_cuda_drvapi.h> #include <helper_cuda_gl.h> #include <helper_functions.h> #include <helper_image.h> #include <helper_math.h> #include <helper_string.h> #include <helper_timer.h> */ #include "hermite6-gpu.h" #define NTHREADS 128 //#define NJBLOCKS 16 // GF 8800 GTS 512 //#define NJBLOCKS_ 16 //#define NJBLOCKS 30 // GF 280 GTX //#define NJBLOCKS_ 32 #define NJBLOCKS 30 // GF 480 GTX #define NJBLOCKS_ 32 #define NREDUCE (NTHREADS/NJBLOCKS_) #define NIBLOCKS 32 #define NIMAX (NTHREADS * NIBLOCKS) // 2048 #define GPU_REDUCE struct Force_dev{ float2 acc[3]; // 6 // float2 pot; float jrk[3]; // 9 // float pad0; float snp[3]; // 12 // float pad1; __device__ Force_dev(){ // acc[0] = acc[1] = acc[2] = pot = make_float2(0.f, 0.f); acc[0] = acc[1] = acc[2] = make_float2(0.f, 0.f); jrk[0] = jrk[1] = jrk[2] = snp[0] = snp[1] = snp[2] = 0.f; } }; __device__ void force_reduce(Force_dev &fl, Force_dev &fr){ #pragma unroll for(int k=0; k<3; k++){ fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].x); fl.acc[k] = float2_accum(fl.acc[k], fr.acc[k].y); fl.jrk[k] += fr.jrk[k]; fl.snp[k] += fr.snp[k]; } // fl.pot = float2_accum(fl.pot, fr.pot.x); // fl.pot = float2_accum(fl.pot, fr.pot.y); } __device__ void h6_kernel( const Predictor &ip, const Predictor &jp, Force_dev &fo, float eps2){ #if 0 float dx = float2_sub(jp.pos[0], ip.pos[0]); float dy = float2_sub(jp.pos[1], ip.pos[1]); float dz = float2_sub(jp.pos[2], ip.pos[2]); float dvx = jp.vel[0] - ip.vel[0]; float dvy = jp.vel[1] - ip.vel[1]; float dvz = jp.vel[2] - ip.vel[2]; float dax = jp.acc[0] - ip.acc[0]; float day = jp.acc[1] - ip.acc[1]; float daz = jp.acc[2] - ip.acc[2]; #else float dx = (jp.posH.x - ip.posH.x) + (jp.posL.x - ip.posL.x); float dy = (jp.posH.y - ip.posH.y) + (jp.posL.y - ip.posL.y); float dz = (jp.posH.z - ip.posH.z) + (jp.posL.z - ip.posL.z); float dvx = jp.vel.x - ip.vel.x; float dvy = jp.vel.y - ip.vel.y; float dvz = jp.vel.z - ip.vel.z; float dax = jp.acc.x - ip.acc.x; float day = jp.acc.y - ip.acc.y; float daz = jp.acc.z - ip.acc.z; #endif float r2 = eps2 + dx*dx + dy*dy + dz*dz; float drdv = dx*dvx + dy*dvy + dz*dvz; float dvdv = dvx*dvx + dvy*dvy + dvz*dvz; float drda = dx*dax + dy*day + dz*daz; float rinv1 = rsqrtf(r2); float rinv2 = rinv1 * rinv1; float alpha = (drdv)*rinv2; float beta = (dvdv + drda)*rinv2 + alpha*alpha; // rinv1 *= jp.mass; rinv1 *= jp.posH.w; float rinv3 = rinv1 * rinv2; // float pot = rinv1; float ax = rinv3*dx; float ay = rinv3*dy; float az = rinv3*dz; float jx = rinv3*dvx + (-3.f*alpha)*ax; float jy = rinv3*dvy + (-3.f*alpha)*ay; float jz = rinv3*dvz + (-3.f*alpha)*az; float sx = rinv3*dax + (-6.f*alpha)*jx + (-3.f*beta)*ax; float sy = rinv3*day + (-6.f*alpha)*jy + (-3.f*beta)*ay; float sz = rinv3*daz + (-6.f*alpha)*jz + (-3.f*beta)*az; #if 0 if(r2 != eps2){ fo.pot = float2_accum(fo.pot, pot); } #endif fo.acc[0] = float2_accum(fo.acc[0], ax); fo.acc[1] = float2_accum(fo.acc[1], ay); fo.acc[2] = float2_accum(fo.acc[2], az); fo.jrk[0] += jx; fo.jrk[1] += jy; fo.jrk[2] += jz; fo.snp[0] += sx; fo.snp[1] += sy; fo.snp[2] += sz; } __global__ void h6_gravity( int ni, int nj, Predictor ipred[], Predictor jpred[], Force_dev force[][NJBLOCKS_], float eps2){ int ibid = blockIdx.x; int jbid = blockIdx.y; int tid = threadIdx.x; int iaddr = tid + NTHREADS * ibid; int jstart = (nj * (jbid )) / NJBLOCKS; int jend = (nj * (jbid+1)) / NJBLOCKS; // small kernel opt int nskip = 1; int niloc = ni - NTHREADS * ibid; if(niloc <= NTHREADS/2) nskip = 2; if(niloc <= NTHREADS/4) nskip = 4; if(niloc <= NTHREADS/8) nskip = 8; if(niloc <= NTHREADS/16) nskip = 16; if(niloc <= NTHREADS/32) nskip = 32; int joff = tid / (NTHREADS/nskip); __shared__ Predictor jpshare[NTHREADS]; Force_dev fo; Predictor ip = ipred[tid % (NTHREADS/nskip) + NTHREADS * ibid]; for(int j=jstart; j<jend; j+=NTHREADS){ __syncthreads(); #if 0 jpshare[tid] = jpred[j+tid]; #else float4 *src = (float4 *)&jpred[j]; float4 *dst = (float4 *)jpshare; for(int it=0; it<sizeof(Predictor)/sizeof(float4); it++){ dst[tid] = src[tid]; dst += NTHREADS; src += NTHREADS; } #endif __syncthreads(); if(jend-j < NTHREADS){ for(int jj=0; jj<jend-j; jj+=nskip){ Predictor &jp = jpshare[jj+joff]; if(jj+joff < jend-j) h6_kernel(ip, jp, fo, eps2); } }else{ #if 0 #pragma unroll for(int jj=0; jj<NTHREADS; jj+=nskip){ Predictor &jp = jpshare[jj+joff]; h6_kernel(ip, jp, fo, eps2); } #else for(int jj=0; jj<NTHREADS; jj+=4*nskip){ Predictor &jp0 = jpshare[0*nskip+jj+joff]; Predictor &jp1 = jpshare[1*nskip+jj+joff]; Predictor &jp2 = jpshare[2*nskip+jj+joff]; Predictor &jp3 = jpshare[3*nskip+jj+joff]; h6_kernel(ip, jp0, fo, eps2); h6_kernel(ip, jp1, fo, eps2); h6_kernel(ip, jp2, fo, eps2); h6_kernel(ip, jp3, fo, eps2); } #endif } } // horizontal reduce // __shared__ Force_dev foshare[NTHREADS]; Force_dev *foshare = (Force_dev *)jpshare; __syncthreads(); foshare[tid] = fo; __syncthreads(); if(nskip > 1){ if(tid < NTHREADS/2){ force_reduce(foshare[tid], foshare[tid + NTHREADS/2]); } __syncthreads(); } if(nskip > 2){ if(tid < NTHREADS/4){ force_reduce(foshare[tid], foshare[tid + NTHREADS/4]); } __syncthreads(); } if(nskip > 4){ if(tid < NTHREADS/8){ force_reduce(foshare[tid], foshare[tid + NTHREADS/8]); } __syncthreads(); } if(nskip > 8){ if(tid < NTHREADS/16){ force_reduce(foshare[tid], foshare[tid + NTHREADS/16]); } __syncthreads(); } if(nskip > 16){ if(tid < NTHREADS/32){ force_reduce(foshare[tid], foshare[tid + NTHREADS/32]); } __syncthreads(); } // store if(tid < niloc){ fo = foshare[tid]; force[iaddr][jbid] = fo; } } #ifdef GPU_REDUCE __global__ void reduce_kernel( Force_dev fo_dev[][NJBLOCKS_], Force_dev fo_reduce[]) { int bid = blockIdx.x; int tid = threadIdx.x; int ioff = bid * NREDUCE; #if 0 __shared__ Force_dev fo_share[NTHREADS]; #else __shared__ Predictor jpshare[NTHREADS]; Force_dev *fo_share = (Force_dev *)jpshare; #endif #if 0 fo_share[tid] = fo_dev[ioff][tid]; #else float4 *src = (float4 *)fo_dev[ioff]; float4 *dst = (float4 *)fo_share; for(int it=0; it<sizeof(Force_dev)/sizeof(float4); it++){ dst[tid] = src[tid]; dst += NTHREADS; src += NTHREADS; } #endif __syncthreads(); int n = NJBLOCKS_; while(n > 1){ n /= 2; if(tid % NJBLOCKS_ < n){ force_reduce(fo_share[tid], fo_share[tid + n]); } // __syncthreads(); } // Force_dev fotmp = fo_share[tid]; __syncthreads(); if(tid % NJBLOCKS_ == 0){ // fo_reduce[ioff + tid / NJBLOCKS_] = fo_share[tid]; fo_share[tid / NJBLOCKS_] = fo_share[tid]; // fo_share[tid / NJBLOCKS_] = fotmp; } __syncthreads(); #if 0 if(tid < NREDUCE){ fo_reduce[ioff + tid] = fo_share[tid]; } #else if(tid < NREDUCE * sizeof(Force_dev) / sizeof(float)){ // (tid < 96) float *dst = (float *)&fo_reduce[ioff]; float *src = (float *)fo_share; dst[tid] = src[tid]; } #endif } #endif extern double wtime(); void calc_force( int nitot, int nj, float eps2, Predictor ipred[], Predictor jpred[], Force force[], double &t1, double &t_send, double &t_recv){ static Predictor *jp_dev = NULL; static Predictor *ip_dev = NULL; static Force_dev (*fo_dev)[NJBLOCKS_] = NULL; #ifdef GPU_REDUCE static Force_dev (*fo_reduce) = NULL; static Force_dev (*fo_host) = NULL; #else static Force_dev (*fo_host)[NJBLOCKS_] = NULL; #endif if(jp_dev == NULL){ // first call /* const int dev = 0; checkCudaErrors(cudaSetDevice(dev)); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); printf("GPU: %s\n", deviceProp.name); */ cudaMalloc((void **)&jp_dev, (nj + NTHREADS) * sizeof(Predictor)); cudaMalloc((void **)&ip_dev, NIMAX * sizeof(Predictor)); cudaMalloc((void **)&fo_dev, NIMAX * sizeof(*fo_dev)); cudaMemset(fo_dev, 0, NIMAX * sizeof(*fo_dev)); #ifdef GPU_REDUCE cudaMalloc((void **)&fo_reduce, NIMAX * sizeof(*fo_reduce)); #endif cudaMallocHost((void **)&fo_host, NIMAX * sizeof(*fo_host)); } cudaMemcpy(jp_dev, jpred, nj * sizeof(Predictor), cudaMemcpyHostToDevice); t1 = wtime(); int nimax = NIMAX; for(int ioff=0; ioff<nitot; ioff+=nimax){ int ni = std::min(nimax, nitot-ioff); double t2 = wtime(); cudaMemcpy(ip_dev, ipred+ioff, ni * sizeof(Predictor), cudaMemcpyHostToDevice); double t3 = wtime(); t_send += t3 - t2; // kernel call int niblocks = 1 + (ni-1) / NTHREADS; dim3 grid(niblocks, NJBLOCKS, 1); dim3 threads(NTHREADS, 1, 1); // std::cerr << "call h6_gravity " << niblocks << std::endl; //int sharedMemSize = NTHREADS * sizeof(Predictor); // h6_gravity <<< grid, threads, sharedMemSize >>> // (ni, nj, ip_dev, jp_dev, fo_dev, eps2); h6_gravity <<< grid, threads >>> (ni, nj, ip_dev, jp_dev, fo_dev, eps2); // CUDA_SAFE_THREAD_SYNC(); cudaThreadSynchronize(); #ifdef GPU_REDUCE dim3 grid_reduce(1 + (ni-1)/NREDUCE, 1, 1); reduce_kernel <<< grid_reduce, threads >>> (fo_dev, fo_reduce); // cudaThreadSynchronize(); // CUDA_SAFE_THREAD_SYNC(); cudaThreadSynchronize(); double t4 = wtime(); cudaMemcpy(fo_host, fo_reduce, ni * sizeof(*fo_reduce), cudaMemcpyDeviceToHost); double t5 = wtime(); t_recv += t5 - t4; for(int i=0; i<ni; i++){ Force f; // 0 flashed by the constructer Force_dev &fo = fo_host[i]; f.acc.x = float2_reduce(fo.acc[0]); f.acc.y = float2_reduce(fo.acc[1]); f.acc.z = float2_reduce(fo.acc[2]); // f.pot = float2_reduce(fo.pot); f.jrk.x = fo.jrk[0]; f.jrk.y = fo.jrk[1]; f.jrk.z = fo.jrk[2]; f.snp.x = fo.snp[0]; f.snp.y = fo.snp[1]; f.snp.z = fo.snp[2]; force[ioff + i] = f; } #else cudaMemcpy(fo_host, fo_dev, ni * sizeof(*fo_dev), cudaMemcpyDeviceToHost); // std::cerr << "done" << std::endl; for(int i=0; i<ni; i++){ Force f; // 0 flashed by the constructer for(int jb=0; jb<NJBLOCKS; jb++){ Force_dev &fo = fo_host[i][jb]; f.acc.x += float2_reduce(fo.acc[0]); f.acc.y += float2_reduce(fo.acc[1]); f.acc.z += float2_reduce(fo.acc[2]); f.pot -= float2_reduce(fo.pot); f.jrk.x += fo.jrk[0]; f.jrk.y += fo.jrk[1]; f.jrk.z += fo.jrk[2]; f.snp.x += fo.snp[0]; f.snp.y += fo.snp[1]; f.snp.z += fo.snp[2]; } force[ioff + i] = f; } #endif } } __global__ void pot_kernel( int js, int je, float eps2, Posm posm[], float2 pot[]){ int bid = blockIdx.x; int tid = threadIdx.x; int iaddr = tid + NTHREADS * bid; Posm ip = posm[iaddr]; float2 poti = make_float2(0.f, 0.f); for(int j=js; j<je; j+=NTHREADS){ __shared__ Posm posmshare[NTHREADS]; __syncthreads(); posmshare[tid] = posm[j + tid]; __syncthreads(); int njj = NTHREADS < je-j ? NTHREADS : je-j; for(int jj=0; jj< njj; jj++){ Posm &jp = posmshare[jj]; float dx = float2_sub(jp.pos[0], ip.pos[0]); float dy = float2_sub(jp.pos[1], ip.pos[1]); float dz = float2_sub(jp.pos[2], ip.pos[2]); float r2 = eps2 + dx*dx + dy*dy + dz*dz; float mrinv = jp.mass * rsqrtf(r2); if(r2 > eps2) poti = float2_accum(poti, mrinv); } } pot[iaddr] = poti; } void calc_pot( int ni, int js, int je, float eps2, Posm posm[], double dpot[]){ Posm *posm_dev; float2 *pot, *pot_dev; cudaMalloc((void **)&posm_dev, (ni+NTHREADS) * sizeof(Posm)); cudaMalloc((void **)&pot_dev, (ni+NTHREADS) * sizeof(float2)); cudaMallocHost((void **)&pot, (ni+NTHREADS) * sizeof(float2)); cudaMemcpy(posm_dev, posm, ni * sizeof(Posm), cudaMemcpyHostToDevice); int nblocks = 1 + (ni-1) / NTHREADS; dim3 grid(nblocks, 1, 1); dim3 threads(NTHREADS, 1, 1); int sharedMemSize = NTHREADS * sizeof(Posm); pot_kernel <<< grid, threads, sharedMemSize >>> (js, je, eps2, posm_dev, pot_dev); // CUDA_SAFE_THREAD_SYNC(); cudaThreadSynchronize(); cudaMemcpy(pot, pot_dev, ni * sizeof(float2), cudaMemcpyDeviceToHost); for(int i=0; i<ni; i++){ dpot[i] = -float2_reduce(pot[i]); } cudaFree(posm_dev); cudaFree(pot_dev); cudaFreeHost(pot); } void CUDA_MPI_Init(int myRank){ int numGPU; checkCudaErrors(cudaGetDeviceCount(&numGPU)); const int dev = myRank % numGPU; checkCudaErrors(cudaSetDevice(dev)); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); printf("Rank %02d : GPU %d : %s\n", myRank, dev, deviceProp.name); cudaFuncSetCacheConfig(h6_gravity, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(reduce_kernel, cudaFuncCachePreferShared); cudaFuncSetCacheConfig(pot_kernel, cudaFuncCachePreferShared); }
2245e5ece8b60b28bcab5b344d9cac90e77407df.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated c Wed Nov 14 22:53:47 2012 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, cuFloatComplex* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce __global__ void magma_clarf_kernel( int m, cuFloatComplex *v, cuFloatComplex *tau, cuFloatComplex *c, int ldc, float *xnorm ) { if ( !MAGMA_C_EQUAL(*tau, MAGMA_C_ZERO) ) { const int i = threadIdx.x; cuFloatComplex *dc = c + blockIdx.x * ldc; __shared__ cuFloatComplex sum[ BLOCK_SIZE ]; /* w := v' * C */ sum[i] = MAGMA_C_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ){ if (j==0) sum[i] += MAGMA_C_MUL( MAGMA_C_ONE, dc[j] ); else sum[i] += MAGMA_C_MUL( MAGMA_C_CNJG( v[j] ), dc[j] ); } sum_reduce< BLOCK_SIZE >( i, sum ); /* C := C - v * w */ __syncthreads(); cuFloatComplex z__1 = - MAGMA_C_CNJG(*tau) * sum[0]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ float temp = MAGMA_C_ABS( dc[0] ) / xnorm[blockIdx.x]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp); } } } /* Apply a complex elementary reflector H to a complex M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a complex scalar and v is a complex vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. */ extern "C" void magma_clarf_gpu(int m, int n, cuFloatComplex *v, cuFloatComplex *tau, cuFloatComplex *c, int ldc, float *xnorm) { dim3 blocks( n ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_clarf_kernel), dim3(blocks), dim3(threads) , 0, 0, m, v, tau, c, ldc, xnorm); }
2245e5ece8b60b28bcab5b344d9cac90e77407df.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated c Wed Nov 14 22:53:47 2012 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 // ---------------------------------------- // Does sum reduction of array x, leaving total in x[0]. // Contents of x are destroyed in the process. // With k threads, can reduce array up to 2*k in size. // Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0) // Having n as template parameter allows compiler to evaluate some conditions at compile time. template< int n > __device__ void sum_reduce( /*int n,*/ int i, cuFloatComplex* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } // end sum_reduce __global__ void magma_clarf_kernel( int m, cuFloatComplex *v, cuFloatComplex *tau, cuFloatComplex *c, int ldc, float *xnorm ) { if ( !MAGMA_C_EQUAL(*tau, MAGMA_C_ZERO) ) { const int i = threadIdx.x; cuFloatComplex *dc = c + blockIdx.x * ldc; __shared__ cuFloatComplex sum[ BLOCK_SIZE ]; /* w := v' * C */ sum[i] = MAGMA_C_ZERO; for( int j = i; j < m; j += BLOCK_SIZE ){ if (j==0) sum[i] += MAGMA_C_MUL( MAGMA_C_ONE, dc[j] ); else sum[i] += MAGMA_C_MUL( MAGMA_C_CNJG( v[j] ), dc[j] ); } sum_reduce< BLOCK_SIZE >( i, sum ); /* C := C - v * w */ __syncthreads(); cuFloatComplex z__1 = - MAGMA_C_CNJG(*tau) * sum[0]; for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZE ) { if (j==0) dc[j] += z__1; else dc[j] += z__1 * v[j]; } __syncthreads(); /* Adjust the rest of the column norms */ if (i==0){ float temp = MAGMA_C_ABS( dc[0] ) / xnorm[blockIdx.x]; temp = (temp + 1.) * (1. - temp); xnorm[blockIdx.x] = xnorm[blockIdx.x] * sqrt(temp); } } } /* Apply a complex elementary reflector H to a complex M-by-N matrix C from the left. H is represented in the form H = I - tau * v * v' where tau is a complex scalar and v is a complex vector. If tau = 0, then H is taken to be the unit matrix. To apply H' (the conjugate transpose of H), supply conjg(tau) instead tau. */ extern "C" void magma_clarf_gpu(int m, int n, cuFloatComplex *v, cuFloatComplex *tau, cuFloatComplex *c, int ldc, float *xnorm) { dim3 blocks( n ); dim3 threads( BLOCK_SIZE ); magma_clarf_kernel<<< blocks, threads >>>( m, v, tau, c, ldc, xnorm); }
615dd544bfb39dae530cd504e8697b9e0be89868.hip
// !!! This is a file automatically generated by hipify!!! #include "thrust\host_vector.h" #include "thrust\device_vector.h" #include <thrust/count.h> #include <thrust/reduce.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include <time.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> using namespace std; struct Point{ float x,y; }; struct Circle{ float x,y,r; }; thrust::host_vector<thrust::host_vector<Point>> events; thrust::host_vector<Circle> circles; hipError_t CUDAfunction(int *c, const int *a, const int *b, unsigned int size); void kcc(thrust::host_vector<Point> &ev, int K, int maxIter, float radiusThreshold, float overfitPenalty, thrust::host_vector<Circle> &circles, float &err); void closestCircles(thrust::host_vector<Point> & ev, int K, int maxIter, bool initializeCirclesFirst, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles); void fitCircles(thrust::host_vector<Point> & ev, int K, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles); void findPoints(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, int &numChanges); void fitring(thrust::host_vector<Point> &points, Circle &circ); void pruneCircles(thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float radiusThreshold); float circleFitError(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float overfitPenalty, int K); float minCircleDist(Point p, thrust::host_vector<Circle> &circles, int &pos); __global__ void addKernel(int *c, int *a, int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void kcc(thrust::host_vector<Point> & ev, int K, int maxIter, float radiusThreshold, float overfitPenalty, thrust::host_vector<Circle> &circles, float &err){ thrust::host_vector<int> points1, points2; thrust::host_vector<Circle> circles1, circles2; float err1, err2; closestCircles(ev, K, maxIter, true, points1, circles1); pruneCircles(points1, circles1, radiusThreshold); err1 = circleFitError(ev, points1, circles1, overfitPenalty, K); closestCircles(ev, K, maxIter, false, points2, circles2); pruneCircles(points2, circles2, radiusThreshold); err2 = circleFitError(ev, points2, circles2, overfitPenalty, K); if(err1 < err2){ circles = circles1; err = err1; }else{ circles = circles2; err = err2; } } //K-means extension for fitting circles to the data void closestCircles(thrust::host_vector<Point> & ev, int K, int maxIter, bool initializeCirclesFirst, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles){ int N = ev.size(); // TODO: Investigate better initialization techniques for better convergence? if(initializeCirclesFirst){ circles.resize(K); points.resize(N, 0); for(unsigned int i=0; i<circles.size(); ++i){ //circle centers are assumed to be in the range [-1.0, 1.0] circles[i].x = rand()/float(RAND_MAX) * 2.0f - 1.0f; circles[i].y = rand()/float(RAND_MAX) * 2.0f - 1.0f; circles[i].r = rand()/float(RAND_MAX); } }else{ points.resize(N , 0); thrust::host_vector<int> idx; //Do a random permutation for (int i=0; i<N; ++i) idx.push_back(i); // 1 2 3 4 5 6 7 8 9...N std::random_shuffle ( idx.begin(), idx.end() ); int cIdx = 0; for(int i=0; i<N; ++i){ points[idx[i]] = cIdx; ++cIdx; if(cIdx >= K) cIdx = 0; } fitCircles(ev, K, points, circles); } int numChanges = 1; while((numChanges > 0) && (maxIter > 0)){ findPoints(ev, points, circles, numChanges); fitCircles(ev, K, points, circles); --maxIter; } } //Function to fit circles to the given set of points void fitCircles(thrust::host_vector<Point> & ev, int K, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles){ circles.resize(K); //copy_if for(int i=0; i<K; ++i){ thrust::host_vector<Point> p; for(unsigned int j=0;j<points.size();++j) if(points[j] == i) p.push_back(ev[j]); //we are assuming that need atleast 4 points to fit a circle //(we can fit an exact circle to 3 non-collinear points, BTW!) if(p.size() < 4){ circles[i].x = circles[i].y = 0.0f; circles[i].r = 100.0f; }else{ fitring(p, circles[i]); } } } __global__ void findPointsKernel(Point *p, int *pInd, int sizePoint, Circle *c, int sizeCircle, bool *d_Change) { int id = threadIdx.x; if(id < sizePoint){ float minErr = FLT_MAX; int pos = -1; for(unsigned int i=0; i<sizeCircle; ++i){ float xa = p[id].x - c[i].x; float yb = p[id].y - c[i].y; float r = xa * xa + yb * yb - c[i].r * c[i].r; r = r * r; if(r < minErr){ minErr = r; pos = i; } } if(pInd[id] != pos){ pInd[id] = pos; d_Change[0] = true; } } } // assign points to their closest circles void findPoints(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, int &numChanges){ numChanges = 0; int minPos; int N = ev.size(); bool Change = false, * d_Change; thrust::device_vector<Point> d_ev = ev; thrust::device_vector<int> d_points = points; thrust::device_vector<Circle> d_circles = circles; hipMalloc(&d_Change, sizeof(bool)); hipMemcpy(d_Change, &Change, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( findPointsKernel), dim3(1), dim3(d_ev.size()), 0, 0, thrust::raw_pointer_cast( &d_ev[0]), thrust::raw_pointer_cast( &d_points[0]), d_ev.size(), thrust::raw_pointer_cast( &d_circles[0]), d_circles.size(), d_Change); points = d_points; hipMemcpy(&Change, d_Change, sizeof(bool), hipMemcpyDeviceToHost); if(Change) numChanges = 1; /*for(int i=0;i<N;++i){ minCircleDist(ev[i], circles, minPos); if(points[i] != minPos){ points[i] = minPos; ++numChanges; } }*/ } void fitring(thrust::host_vector<Point> &points, Circle &circ){ /* Fits the input data points to the equation x^2 + y^2 + a(1)*x + a(2)*y + a(3) = 0 But this function returns circle center and radius as output. So that one can use the equation (x - cx).^2 + (y - cy). ^2 = R.^2 http://www.had2know.com/academics/best-fit-circle-least-squares.html */ //thrust::device_vector<Point> pp = points; //Point p = thrust::reduce(pp.begin(), pp.end()); //Transform float xx = 0, x = 0, yy = 0, y = 0, xy = 0, aux1 = 0, aux2 = 0; for(unsigned int i=0; i<points.size(); ++i){ float px = points[i].x, py = points[i].y; x += px; xx += px * px; y += py; yy += py * py; xy += px * py; aux1 += px * (px * px + py * py); aux2 += py * (px * px + py * py); } float matA[9], b[3], result[3], invertA[9]; /*if(p.x != x){cout<<"Problem!!!"<<endl; exit(0);} if(p.y != y){cout<<"Problem!!!"<<endl; exit(0);}*/ matA[0] = xx; matA[1] = xy; matA[2] = x; matA[3] = xy; matA[4] = yy; matA[5] = y; matA[6] = x; matA[7] = y; matA[8] = float(points.size()); b[0] = aux1; b[1] = aux2; b[2] = xx + yy; /*float determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); float invdet = 1/determinant; invertA(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; invertA(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; invertA(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; invertA(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; invertA(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; invertA(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; invertA(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; invertA(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; invertA(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet;*/ float determinmatAnt = +matA[0]*(matA[4]*matA[8]-matA[7]*matA[5]) -matA[1]*(matA[3]*matA[8]-matA[5]*matA[6]) +matA[2]*(matA[3]*matA[7]-matA[4]*matA[6]); float invdet = 1.0f/determinmatAnt; invertA[0] = (matA[4]*matA[8]-matA[7]*matA[5])*invdet; invertA[3] = -(matA[1]*matA[8]-matA[2]*matA[7])*invdet; invertA[6] = (matA[1]*matA[5]-matA[2]*matA[4])*invdet; invertA[1] = -(matA[3]*matA[8]-matA[5]*matA[6])*invdet; invertA[4] = (matA[0]*matA[8]-matA[2]*matA[6])*invdet; invertA[7] = -(matA[0]*matA[5]-matA[3]*matA[2])*invdet; invertA[2] = (matA[3]*matA[7]-matA[6]*matA[4])*invdet; invertA[5] = -(matA[0]*matA[7]-matA[6]*matA[1])*invdet; invertA[8] = (matA[0]*matA[4]-matA[3]*matA[1])*invdet; result[0] = invertA[0] * b[0] + invertA[1] * b[1] + invertA[2] * b[2]; result[1] = invertA[3] * b[0] + invertA[4] * b[1] + invertA[5] * b[2]; result[2] = invertA[6] * b[0] + invertA[7] * b[1] + invertA[8] * b[2]; circ.x = 0.5f * result[0]; circ.y = 0.5f * result[1]; circ.r = sqrtf(result[2] + circ.x * circ.x + circ.y * circ.y); //circ.r = sqrtf(4 * result[2] + result[0] * result[0] + result[1] * result[1])/2.0f; //circ.r = sqrtf((result[0] * result[0]+result[1] * result[1])/4.0f-result[2]); /*x = X(:,1); y = X(:,2); a = [X, ones(size(x))] \ [-(x.^2 + y.^2)]; cx = -0.5 * a(1); cy = -0.5 * a(2); R = sqrt((a(1)^2+a(2)^2)/4-a(3));*/ } //Prune circles with very small radius and ones with less than 4 points. void pruneCircles(thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float radiusThreshold){ thrust::host_vector<Circle> prunedCircles; thrust::host_vector<int> prunedPoints; prunedPoints.resize(points.size(), 0); //set in 0 for(unsigned int i=0; i<circles.size();++i){ int count = 0; for(unsigned int j=0;j<points.size();++j){ if(points[j] == i) ++count;}; if(circles[i].r < radiusThreshold) continue; if(count < 5) continue; prunedCircles.push_back(circles[i]); for(unsigned int j=0;j<points.size();++j){ if(points[j] == i) prunedPoints[j] = i;}; } points = prunedPoints; circles = prunedCircles; } //Measure the error in circle fitting float circleFitError(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float overfitPenalty, int K){ float err = 0; int errPos = 0; for(unsigned int i=0;i<ev.size();++i) err += minCircleDist(ev[i], circles, errPos); err += overfitPenalty * K * K; return err; } //Function to find distance between a point from all circles float minCircleDist(Point p, thrust::host_vector<Circle> &circles, int &pos){ //Least Absolute Deviations measure //((x - a)^2 + (y - b)^2 - r^2)^2 float minErr = FLT_MAX; pos = -1; for(unsigned int i=0; i<circles.size(); ++i){ float xa = p.x - circles[i].x; float yb = p.y - circles[i].y; float r = xa * xa + yb * yb - circles[i].r * circles[i].r; r = r * r; if(r < minErr){ minErr = r; pos = i; } } return minErr; } hipError_t CUDAfunction() { /* initialize random seed: */ srand ((unsigned int)time(NULL)); hipError_t cudaStatus = hipSuccess; for(unsigned int i=0; i<events.size();++i){ int maxK = 5, minK = 2, maxIter = 100; float radiusThreshold = 0.1f, overfitPenalty = 0.001f; float error = FLT_MAX, newerror = FLT_MAX; for(int j=minK;j<=maxK;++j){ thrust::host_vector<Circle> circ; kcc(events[i], j, maxIter, radiusThreshold, overfitPenalty, circ, newerror); if(newerror < error) { error = newerror; //store circles circles = circ; } } cout<<"Event: "<<i<<endl; for(Circle c:circles) cout<<c.x<<" "<<c.y<<" "<<c.r<<endl; } //End elapsed time /*for(unsigned int i=0;i<events.size();++i){ cout<<"Event: "<<i<<endl; for(unsigned int j=0;j<circles.size();++j){ cout<<circles[j].x<<" "<<circles[j].y<<" "<<circles[j].r<<endl; } }*/ return cudaStatus; } /*// Helper function for using CUDA hipError_t CUDAfunction() { hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(0); } thrust::host_vector<int> h_a; h_a.push_back(1); h_a.push_back(2); h_a.push_back(3); h_a.push_back(4); h_a.push_back(5); thrust::host_vector<int> h_b; h_b.push_back(10); h_b.push_back(20); h_b.push_back(30); h_b.push_back(40); h_b.push_back(50); thrust::host_vector<int> h_c; h_c.push_back(0); h_c.push_back(0); h_c.push_back(0); h_c.push_back(0); h_c.push_back(0); thrust::device_vector<int> d_a = h_a, d_b = h_b, d_c(50); //thrust::copy(d_c.begin(), d_c.end(), h_c.begin()); for(unsigned int i= 0; i < h_c.size(); ++i) { cout<<h_a[i]<< " " <<h_b[i]<< " " <<h_c[i]<<endl; } int * rawc = thrust::raw_pointer_cast(&d_c[0]); int * rawa = thrust::raw_pointer_cast(&d_a[0]); int * rawb = thrust::raw_pointer_cast(&d_b[0]); cout<<"Device"<<endl; for(unsigned int i= 0; i < d_a.size(); ++i) { cout<<d_a[i]<< " " <<d_c[i]<< " " <<d_b[i]<<endl; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(5), 0, 0, rawc, rawa, rawb); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); exit(0); } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); exit(0); } thrust::copy(d_c.begin(), d_c.begin() + 5, h_c.begin()); for(unsigned int i= 0; i < h_c.size(); ++i) { cout<<h_a[i]<< " " <<h_b[i]<< " " <<h_c[i]<<endl; } return cudaStatus; }*/ //Function to read the file void readFile(char *file){ fstream in(file, ios::in); int N, M; Point p; if(!in.is_open()){ cout<<"Could'nt load file "<<file<<endl; exit(0); } //Resize the array of events in>>N; events.resize(N); for(int i=0;i<N;++i){ //Read each set of points in>>M; for(int j=0;j<M;++j){ in>>p.x>>p.y; //Read the points events[i].push_back(p); //Store in the array } } } int main(int argc, char *argv[]){ if(argc < 2){ cout<<"Bad input!!!!! Should enter the name of file by console"<<endl; exit(0); } clock_t begin = clock(); readFile(argv[1]); hipError_t cudaStatus = CUDAfunction(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout<<elapsed_secs<<endl; return 0; }
615dd544bfb39dae530cd504e8697b9e0be89868.cu
#include "thrust\host_vector.h" #include "thrust\device_vector.h" #include <thrust/count.h> #include <thrust/reduce.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_runtime.h" #include <time.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> using namespace std; struct Point{ float x,y; }; struct Circle{ float x,y,r; }; thrust::host_vector<thrust::host_vector<Point>> events; thrust::host_vector<Circle> circles; cudaError_t CUDAfunction(int *c, const int *a, const int *b, unsigned int size); void kcc(thrust::host_vector<Point> &ev, int K, int maxIter, float radiusThreshold, float overfitPenalty, thrust::host_vector<Circle> &circles, float &err); void closestCircles(thrust::host_vector<Point> & ev, int K, int maxIter, bool initializeCirclesFirst, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles); void fitCircles(thrust::host_vector<Point> & ev, int K, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles); void findPoints(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, int &numChanges); void fitring(thrust::host_vector<Point> &points, Circle &circ); void pruneCircles(thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float radiusThreshold); float circleFitError(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float overfitPenalty, int K); float minCircleDist(Point p, thrust::host_vector<Circle> &circles, int &pos); __global__ void addKernel(int *c, int *a, int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void kcc(thrust::host_vector<Point> & ev, int K, int maxIter, float radiusThreshold, float overfitPenalty, thrust::host_vector<Circle> &circles, float &err){ thrust::host_vector<int> points1, points2; thrust::host_vector<Circle> circles1, circles2; float err1, err2; closestCircles(ev, K, maxIter, true, points1, circles1); pruneCircles(points1, circles1, radiusThreshold); err1 = circleFitError(ev, points1, circles1, overfitPenalty, K); closestCircles(ev, K, maxIter, false, points2, circles2); pruneCircles(points2, circles2, radiusThreshold); err2 = circleFitError(ev, points2, circles2, overfitPenalty, K); if(err1 < err2){ circles = circles1; err = err1; }else{ circles = circles2; err = err2; } } //K-means extension for fitting circles to the data void closestCircles(thrust::host_vector<Point> & ev, int K, int maxIter, bool initializeCirclesFirst, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles){ int N = ev.size(); // TODO: Investigate better initialization techniques for better convergence? if(initializeCirclesFirst){ circles.resize(K); points.resize(N, 0); for(unsigned int i=0; i<circles.size(); ++i){ //circle centers are assumed to be in the range [-1.0, 1.0] circles[i].x = rand()/float(RAND_MAX) * 2.0f - 1.0f; circles[i].y = rand()/float(RAND_MAX) * 2.0f - 1.0f; circles[i].r = rand()/float(RAND_MAX); } }else{ points.resize(N , 0); thrust::host_vector<int> idx; //Do a random permutation for (int i=0; i<N; ++i) idx.push_back(i); // 1 2 3 4 5 6 7 8 9...N std::random_shuffle ( idx.begin(), idx.end() ); int cIdx = 0; for(int i=0; i<N; ++i){ points[idx[i]] = cIdx; ++cIdx; if(cIdx >= K) cIdx = 0; } fitCircles(ev, K, points, circles); } int numChanges = 1; while((numChanges > 0) && (maxIter > 0)){ findPoints(ev, points, circles, numChanges); fitCircles(ev, K, points, circles); --maxIter; } } //Function to fit circles to the given set of points void fitCircles(thrust::host_vector<Point> & ev, int K, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles){ circles.resize(K); //copy_if for(int i=0; i<K; ++i){ thrust::host_vector<Point> p; for(unsigned int j=0;j<points.size();++j) if(points[j] == i) p.push_back(ev[j]); //we are assuming that need atleast 4 points to fit a circle //(we can fit an exact circle to 3 non-collinear points, BTW!) if(p.size() < 4){ circles[i].x = circles[i].y = 0.0f; circles[i].r = 100.0f; }else{ fitring(p, circles[i]); } } } __global__ void findPointsKernel(Point *p, int *pInd, int sizePoint, Circle *c, int sizeCircle, bool *d_Change) { int id = threadIdx.x; if(id < sizePoint){ float minErr = FLT_MAX; int pos = -1; for(unsigned int i=0; i<sizeCircle; ++i){ float xa = p[id].x - c[i].x; float yb = p[id].y - c[i].y; float r = xa * xa + yb * yb - c[i].r * c[i].r; r = r * r; if(r < minErr){ minErr = r; pos = i; } } if(pInd[id] != pos){ pInd[id] = pos; d_Change[0] = true; } } } // assign points to their closest circles void findPoints(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, int &numChanges){ numChanges = 0; int minPos; int N = ev.size(); bool Change = false, * d_Change; thrust::device_vector<Point> d_ev = ev; thrust::device_vector<int> d_points = points; thrust::device_vector<Circle> d_circles = circles; cudaMalloc(&d_Change, sizeof(bool)); cudaMemcpy(d_Change, &Change, sizeof(bool), cudaMemcpyHostToDevice); findPointsKernel<<<1, d_ev.size()>>>(thrust::raw_pointer_cast( &d_ev[0]), thrust::raw_pointer_cast( &d_points[0]), d_ev.size(), thrust::raw_pointer_cast( &d_circles[0]), d_circles.size(), d_Change); points = d_points; cudaMemcpy(&Change, d_Change, sizeof(bool), cudaMemcpyDeviceToHost); if(Change) numChanges = 1; /*for(int i=0;i<N;++i){ minCircleDist(ev[i], circles, minPos); if(points[i] != minPos){ points[i] = minPos; ++numChanges; } }*/ } void fitring(thrust::host_vector<Point> &points, Circle &circ){ /* Fits the input data points to the equation x^2 + y^2 + a(1)*x + a(2)*y + a(3) = 0 But this function returns circle center and radius as output. So that one can use the equation (x - cx).^2 + (y - cy). ^2 = R.^2 http://www.had2know.com/academics/best-fit-circle-least-squares.html */ //thrust::device_vector<Point> pp = points; //Point p = thrust::reduce(pp.begin(), pp.end()); //Transform float xx = 0, x = 0, yy = 0, y = 0, xy = 0, aux1 = 0, aux2 = 0; for(unsigned int i=0; i<points.size(); ++i){ float px = points[i].x, py = points[i].y; x += px; xx += px * px; y += py; yy += py * py; xy += px * py; aux1 += px * (px * px + py * py); aux2 += py * (px * px + py * py); } float matA[9], b[3], result[3], invertA[9]; /*if(p.x != x){cout<<"Problem!!!"<<endl; exit(0);} if(p.y != y){cout<<"Problem!!!"<<endl; exit(0);}*/ matA[0] = xx; matA[1] = xy; matA[2] = x; matA[3] = xy; matA[4] = yy; matA[5] = y; matA[6] = x; matA[7] = y; matA[8] = float(points.size()); b[0] = aux1; b[1] = aux2; b[2] = xx + yy; /*float determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2)) -A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0)) +A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0)); float invdet = 1/determinant; invertA(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet; invertA(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet; invertA(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet; invertA(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet; invertA(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet; invertA(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet; invertA(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet; invertA(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet; invertA(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet;*/ float determinmatAnt = +matA[0]*(matA[4]*matA[8]-matA[7]*matA[5]) -matA[1]*(matA[3]*matA[8]-matA[5]*matA[6]) +matA[2]*(matA[3]*matA[7]-matA[4]*matA[6]); float invdet = 1.0f/determinmatAnt; invertA[0] = (matA[4]*matA[8]-matA[7]*matA[5])*invdet; invertA[3] = -(matA[1]*matA[8]-matA[2]*matA[7])*invdet; invertA[6] = (matA[1]*matA[5]-matA[2]*matA[4])*invdet; invertA[1] = -(matA[3]*matA[8]-matA[5]*matA[6])*invdet; invertA[4] = (matA[0]*matA[8]-matA[2]*matA[6])*invdet; invertA[7] = -(matA[0]*matA[5]-matA[3]*matA[2])*invdet; invertA[2] = (matA[3]*matA[7]-matA[6]*matA[4])*invdet; invertA[5] = -(matA[0]*matA[7]-matA[6]*matA[1])*invdet; invertA[8] = (matA[0]*matA[4]-matA[3]*matA[1])*invdet; result[0] = invertA[0] * b[0] + invertA[1] * b[1] + invertA[2] * b[2]; result[1] = invertA[3] * b[0] + invertA[4] * b[1] + invertA[5] * b[2]; result[2] = invertA[6] * b[0] + invertA[7] * b[1] + invertA[8] * b[2]; circ.x = 0.5f * result[0]; circ.y = 0.5f * result[1]; circ.r = sqrtf(result[2] + circ.x * circ.x + circ.y * circ.y); //circ.r = sqrtf(4 * result[2] + result[0] * result[0] + result[1] * result[1])/2.0f; //circ.r = sqrtf((result[0] * result[0]+result[1] * result[1])/4.0f-result[2]); /*x = X(:,1); y = X(:,2); a = [X, ones(size(x))] \ [-(x.^2 + y.^2)]; cx = -0.5 * a(1); cy = -0.5 * a(2); R = sqrt((a(1)^2+a(2)^2)/4-a(3));*/ } //Prune circles with very small radius and ones with less than 4 points. void pruneCircles(thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float radiusThreshold){ thrust::host_vector<Circle> prunedCircles; thrust::host_vector<int> prunedPoints; prunedPoints.resize(points.size(), 0); //set in 0 for(unsigned int i=0; i<circles.size();++i){ int count = 0; for(unsigned int j=0;j<points.size();++j){ if(points[j] == i) ++count;}; if(circles[i].r < radiusThreshold) continue; if(count < 5) continue; prunedCircles.push_back(circles[i]); for(unsigned int j=0;j<points.size();++j){ if(points[j] == i) prunedPoints[j] = i;}; } points = prunedPoints; circles = prunedCircles; } //Measure the error in circle fitting float circleFitError(thrust::host_vector<Point> & ev, thrust::host_vector<int> &points, thrust::host_vector<Circle> &circles, float overfitPenalty, int K){ float err = 0; int errPos = 0; for(unsigned int i=0;i<ev.size();++i) err += minCircleDist(ev[i], circles, errPos); err += overfitPenalty * K * K; return err; } //Function to find distance between a point from all circles float minCircleDist(Point p, thrust::host_vector<Circle> &circles, int &pos){ //Least Absolute Deviations measure //((x - a)^2 + (y - b)^2 - r^2)^2 float minErr = FLT_MAX; pos = -1; for(unsigned int i=0; i<circles.size(); ++i){ float xa = p.x - circles[i].x; float yb = p.y - circles[i].y; float r = xa * xa + yb * yb - circles[i].r * circles[i].r; r = r * r; if(r < minErr){ minErr = r; pos = i; } } return minErr; } cudaError_t CUDAfunction() { /* initialize random seed: */ srand ((unsigned int)time(NULL)); cudaError_t cudaStatus = cudaSuccess; for(unsigned int i=0; i<events.size();++i){ int maxK = 5, minK = 2, maxIter = 100; float radiusThreshold = 0.1f, overfitPenalty = 0.001f; float error = FLT_MAX, newerror = FLT_MAX; for(int j=minK;j<=maxK;++j){ thrust::host_vector<Circle> circ; kcc(events[i], j, maxIter, radiusThreshold, overfitPenalty, circ, newerror); if(newerror < error) { error = newerror; //store circles circles = circ; } } cout<<"Event: "<<i<<endl; for(Circle c:circles) cout<<c.x<<" "<<c.y<<" "<<c.r<<endl; } //End elapsed time /*for(unsigned int i=0;i<events.size();++i){ cout<<"Event: "<<i<<endl; for(unsigned int j=0;j<circles.size();++j){ cout<<circles[j].x<<" "<<circles[j].y<<" "<<circles[j].r<<endl; } }*/ return cudaStatus; } /*// Helper function for using CUDA cudaError_t CUDAfunction() { cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(0); } thrust::host_vector<int> h_a; h_a.push_back(1); h_a.push_back(2); h_a.push_back(3); h_a.push_back(4); h_a.push_back(5); thrust::host_vector<int> h_b; h_b.push_back(10); h_b.push_back(20); h_b.push_back(30); h_b.push_back(40); h_b.push_back(50); thrust::host_vector<int> h_c; h_c.push_back(0); h_c.push_back(0); h_c.push_back(0); h_c.push_back(0); h_c.push_back(0); thrust::device_vector<int> d_a = h_a, d_b = h_b, d_c(50); //thrust::copy(d_c.begin(), d_c.end(), h_c.begin()); for(unsigned int i= 0; i < h_c.size(); ++i) { cout<<h_a[i]<< " " <<h_b[i]<< " " <<h_c[i]<<endl; } int * rawc = thrust::raw_pointer_cast(&d_c[0]); int * rawa = thrust::raw_pointer_cast(&d_a[0]); int * rawb = thrust::raw_pointer_cast(&d_b[0]); cout<<"Device"<<endl; for(unsigned int i= 0; i < d_a.size(); ++i) { cout<<d_a[i]<< " " <<d_c[i]<< " " <<d_b[i]<<endl; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, 5>>>(rawc, rawa, rawb); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); exit(0); } thrust::copy(d_c.begin(), d_c.begin() + 5, h_c.begin()); for(unsigned int i= 0; i < h_c.size(); ++i) { cout<<h_a[i]<< " " <<h_b[i]<< " " <<h_c[i]<<endl; } return cudaStatus; }*/ //Function to read the file void readFile(char *file){ fstream in(file, ios::in); int N, M; Point p; if(!in.is_open()){ cout<<"Could'nt load file "<<file<<endl; exit(0); } //Resize the array of events in>>N; events.resize(N); for(int i=0;i<N;++i){ //Read each set of points in>>M; for(int j=0;j<M;++j){ in>>p.x>>p.y; //Read the points events[i].push_back(p); //Store in the array } } } int main(int argc, char *argv[]){ if(argc < 2){ cout<<"Bad input!!!!! Should enter the name of file by console"<<endl; exit(0); } clock_t begin = clock(); readFile(argv[1]); cudaError_t cudaStatus = CUDAfunction(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } clock_t end = clock(); double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; cout<<elapsed_secs<<endl; return 0; }
c7cb14c49186f2506cd4cc3360c897e310eb65b9.hip
// !!! This is a file automatically generated by hipify!!! /* * * pageableMemcpyHtoDSynchronous.cu * * Microdemo that illustrates how necessary CPU/GPU concurrency * is for a good-performance pageable memcpy. Identical to * pageableMemcpyHtoD.cu except the event synchronize is in a * place that breaks concurrency between the CPU and GPU. * * A pair of pinned staging buffers are allocated, and after the first * staging buffer has been filled, the GPU pulls from one while the * CPU fills the other. CUDA events are used for synchronization. * * This implementation uses the SSE-optimized memcpy of memcpy16.cpp, * so for simplicity, it requires host pointers to be 16-byte aligned. * * Build with: nvcc -I ../chLib <options> pageableMemcpyHtoD16Synchronous.cu memcpy16.cpp * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chError.h" #include "chTimer.h" #define STAGING_BUFFER_SIZE 1048576 void *g_hostBuffers[2]; hipEvent_t g_events[2]; // these are already defined on some platforms - make our // own definitions that will work. #undef min #undef max #define min(a,b) ((a)<(b)?(a):(b)) #define max(a,b) ((b)<(a)?(a):(b)) extern bool memcpy16( void *_dst, const void *_src, size_t N ); void chMemcpyHtoD( void *device, const void *host, size_t N ) { hipError_t status; char *dst = (char *) device; const char *src = (const char *) host; int stagingIndex = 0; while ( N ) { size_t thisCopySize = min( N, STAGING_BUFFER_SIZE ); // cuda(EventSynchronize( g_events[stagingIndex] ) ); memcpy16( g_hostBuffers[stagingIndex], src, thisCopySize ); cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize, hipMemcpyHostToDevice, NULL ) ); cuda(EventRecord( g_events[1-stagingIndex], NULL ) ); cuda(EventSynchronize( g_events[1-stagingIndex] ) ); dst += thisCopySize; src += thisCopySize; N -= thisCopySize; stagingIndex = 1 - stagingIndex; } Error: return; } bool TestMemcpy( int *dstDevice, int *srcHost, const int *srcOriginal, size_t dstOffset, size_t srcOffset, size_t numInts ) { chMemcpyHtoD( dstDevice+dstOffset, srcOriginal+srcOffset, numInts*sizeof(int) ); hipMemcpy( srcHost, dstDevice+dstOffset, numInts*sizeof(int), hipMemcpyDeviceToHost ); for ( size_t i = 0; i < numInts; i++ ) { if ( srcHost[i] != srcOriginal[srcOffset+i] ) { return false; } } return true; } int main( int argc, char *argv[] ) { hipError_t status; int *deviceInt = 0; int *hostInt = 0; const size_t numInts = 32*1048576; const int cIterations = 10; int *testVector = 0; printf( "Pageable memcpy (16-byte aligned)... " ); fflush( stdout ); chTimerTimestamp start, stop; cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, hipHostMallocDefault ) ); cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, hipHostMallocDefault ) ); cuda(EventCreate( &g_events[0] ) ); cuda(EventRecord( g_events[0], 0 ) ); // so it is signaled on first synchronize cuda(EventCreate( &g_events[1] ) ); cuda(EventRecord( g_events[1], 0 ) ); // so it is signaled on first synchronize cuda(Malloc( &deviceInt, numInts*sizeof(int) ) ); cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) ); testVector = (int *) malloc( numInts*sizeof(int) ); if ( ! testVector ) { printf( "malloc() failed\n" ); return 1; } for ( size_t i = 0; i < numInts; i++ ) { testVector[i] = rand(); } if ( ! TestMemcpy( deviceInt, hostInt, testVector, 0, 0, numInts ) ) { goto Error; } for ( int i = 0; i < cIterations; i++ ) { size_t numInts4 = numInts / 4; size_t dstOffset = rand() % (numInts4-1); size_t srcOffset = rand() % (numInts4-1); size_t intsThisIteration = 1 + rand() % (numInts4-max(dstOffset,srcOffset)-1); dstOffset *= 4; srcOffset *= 4; intsThisIteration *= 4; if ( ! TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) { TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ); goto Error; } } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { chMemcpyHtoD( deviceInt, testVector, numInts*sizeof(int) ) ; } cuda(DeviceSynchronize() ); chTimerGetTime( &stop ); { double MBytes = cIterations*numInts*sizeof(int) / 1048576.0; double MBpers = MBytes / chTimerElapsedTime( &start, &stop ); printf( "%.2f MB/s\n", MBpers ); } hipFree( deviceInt ); hipHostFree( hostInt ); return 0; Error: printf( "Error\n" ); return 1; }
c7cb14c49186f2506cd4cc3360c897e310eb65b9.cu
/* * * pageableMemcpyHtoDSynchronous.cu * * Microdemo that illustrates how necessary CPU/GPU concurrency * is for a good-performance pageable memcpy. Identical to * pageableMemcpyHtoD.cu except the event synchronize is in a * place that breaks concurrency between the CPU and GPU. * * A pair of pinned staging buffers are allocated, and after the first * staging buffer has been filled, the GPU pulls from one while the * CPU fills the other. CUDA events are used for synchronization. * * This implementation uses the SSE-optimized memcpy of memcpy16.cpp, * so for simplicity, it requires host pointers to be 16-byte aligned. * * Build with: nvcc -I ../chLib <options> pageableMemcpyHtoD16Synchronous.cu memcpy16.cpp * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include "chError.h" #include "chTimer.h" #define STAGING_BUFFER_SIZE 1048576 void *g_hostBuffers[2]; cudaEvent_t g_events[2]; // these are already defined on some platforms - make our // own definitions that will work. #undef min #undef max #define min(a,b) ((a)<(b)?(a):(b)) #define max(a,b) ((b)<(a)?(a):(b)) extern bool memcpy16( void *_dst, const void *_src, size_t N ); void chMemcpyHtoD( void *device, const void *host, size_t N ) { cudaError_t status; char *dst = (char *) device; const char *src = (const char *) host; int stagingIndex = 0; while ( N ) { size_t thisCopySize = min( N, STAGING_BUFFER_SIZE ); // cuda(EventSynchronize( g_events[stagingIndex] ) ); memcpy16( g_hostBuffers[stagingIndex], src, thisCopySize ); cuda(MemcpyAsync( dst, g_hostBuffers[stagingIndex], thisCopySize, cudaMemcpyHostToDevice, NULL ) ); cuda(EventRecord( g_events[1-stagingIndex], NULL ) ); cuda(EventSynchronize( g_events[1-stagingIndex] ) ); dst += thisCopySize; src += thisCopySize; N -= thisCopySize; stagingIndex = 1 - stagingIndex; } Error: return; } bool TestMemcpy( int *dstDevice, int *srcHost, const int *srcOriginal, size_t dstOffset, size_t srcOffset, size_t numInts ) { chMemcpyHtoD( dstDevice+dstOffset, srcOriginal+srcOffset, numInts*sizeof(int) ); cudaMemcpy( srcHost, dstDevice+dstOffset, numInts*sizeof(int), cudaMemcpyDeviceToHost ); for ( size_t i = 0; i < numInts; i++ ) { if ( srcHost[i] != srcOriginal[srcOffset+i] ) { return false; } } return true; } int main( int argc, char *argv[] ) { cudaError_t status; int *deviceInt = 0; int *hostInt = 0; const size_t numInts = 32*1048576; const int cIterations = 10; int *testVector = 0; printf( "Pageable memcpy (16-byte aligned)... " ); fflush( stdout ); chTimerTimestamp start, stop; cuda(HostAlloc( &g_hostBuffers[0], STAGING_BUFFER_SIZE, cudaHostAllocDefault ) ); cuda(HostAlloc( &g_hostBuffers[1], STAGING_BUFFER_SIZE, cudaHostAllocDefault ) ); cuda(EventCreate( &g_events[0] ) ); cuda(EventRecord( g_events[0], 0 ) ); // so it is signaled on first synchronize cuda(EventCreate( &g_events[1] ) ); cuda(EventRecord( g_events[1], 0 ) ); // so it is signaled on first synchronize cuda(Malloc( &deviceInt, numInts*sizeof(int) ) ); cuda(HostAlloc( &hostInt, numInts*sizeof(int), 0 ) ); testVector = (int *) malloc( numInts*sizeof(int) ); if ( ! testVector ) { printf( "malloc() failed\n" ); return 1; } for ( size_t i = 0; i < numInts; i++ ) { testVector[i] = rand(); } if ( ! TestMemcpy( deviceInt, hostInt, testVector, 0, 0, numInts ) ) { goto Error; } for ( int i = 0; i < cIterations; i++ ) { size_t numInts4 = numInts / 4; size_t dstOffset = rand() % (numInts4-1); size_t srcOffset = rand() % (numInts4-1); size_t intsThisIteration = 1 + rand() % (numInts4-max(dstOffset,srcOffset)-1); dstOffset *= 4; srcOffset *= 4; intsThisIteration *= 4; if ( ! TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ) ) { TestMemcpy( deviceInt, hostInt, testVector, dstOffset, srcOffset, intsThisIteration ); goto Error; } } chTimerGetTime( &start ); for ( int i = 0; i < cIterations; i++ ) { chMemcpyHtoD( deviceInt, testVector, numInts*sizeof(int) ) ; } cuda(DeviceSynchronize() ); chTimerGetTime( &stop ); { double MBytes = cIterations*numInts*sizeof(int) / 1048576.0; double MBpers = MBytes / chTimerElapsedTime( &start, &stop ); printf( "%.2f MB/s\n", MBpers ); } cudaFree( deviceInt ); cudaFreeHost( hostInt ); return 0; Error: printf( "Error\n" ); return 1; }
54ca0c811c384820860ca232f6ac426b87058b51.hip
// !!! This is a file automatically generated by hipify!!! #include "grid.h" #include "solver_cuda.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> static void fail(char const *message); __global__ void red_black_ordering(float *d_grid, int grid_row, int value, float *d_diff1); void Solver_cuda::simulate_eqn_solver(){ int done = 0; float diff = 0.0; struct timeval start_time, end_time; // Variables to capture timing details int size = ( row * col * sizeof(float) ); hipError_t err = hipSuccess; // The code below is optional. We are going to create a diff array instead of // accumulating the diffs in a variable. If we didn't do that, we would need to // synchronize threads as they summed the diffs, and that's harder for beginners. float *diff1 = NULL; diff1 = (float*) malloc (size); for (int i= 0; i < row; i++) { for (int j = 0; j < col; j++) { diff1[i * col + j] = 0.0; } } int threads_per_block = 256; int blocks_per_grid = (int)( ( row*col + threads_per_block - 1) / threads_per_block); float *d_grid = NULL; err = hipMalloc((void **)&d_grid, size); if(err != hipSuccess) cout<<"Failed to allocate grid memory"<<endl; err = hipMemcpy(d_grid,array,size,hipMemcpyHostToDevice); if(err != hipSuccess) cout<<"Failed to copy grid to device"<<endl; float *d_diff1 = NULL; err = hipMalloc((void **)&d_diff1, size); if(err != hipSuccess) cout<<"Failed to allocate diff memory"<<endl; err = hipMemcpy(d_diff1,diff1,size,hipMemcpyHostToDevice); if(err != hipSuccess) cout<<"Failed to copy diff to device"<<endl; // Calculate start time for equation solver gettimeofday(&start_time, NULL); while (!done){ diff = 0.0; hipLaunchKernelGGL(( red_black_ordering) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_grid, row, RED, d_diff1); hipDeviceSynchronize(); hipLaunchKernelGGL(( red_black_ordering) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_grid, row, BLACK, d_diff1); hipDeviceSynchronize(); err = hipMemcpy(diff1,d_diff1,size,hipMemcpyDeviceToHost); if(err != hipSuccess) cout<<"Failed to copy diff from device"<<endl; for (int i= 0; i < row; i++) { for (int j = 0; j < col; j++) { diff = diff + diff1[i * col + j]; } } diff = diff/float (row*col); if(diff<tolerance) {done = 1;} else {done = 0;} } gettimeofday(&end_time, NULL); // Print the final Timing Statistics print_timing_statistics(start_time,end_time); err = hipMemcpy(array,d_grid,size,hipMemcpyDeviceToHost); if(err != hipSuccess) cout<<"Failed to copy grid from device"<<endl; err = hipFree(d_diff1); if(err != hipSuccess) cout<<"Failed to free diff memory"<<endl; err = hipFree(d_grid); if(err != hipSuccess) cout<<"Failed to free grid memory"<<endl; free(diff1); hipDeviceReset(); } // // __global__ void red_black_ordering(float *d_grid, int grid_row, int value, float *d_diff1) { int index = blockIdx.x * blockDim.x + threadIdx.x; float temp = 0.0; int i; int row = index/grid_row; int col = index - (row * grid_row); int flag = 0; if(index<grid_row*grid_row) { if(!((row==0)|| (row==grid_row-1) || (col==0) || (col==grid_row-1))) { if(!value) { if(((row%2==1) && (col%2==1)) || ((row%2==0) && (col%2==0))) { temp = d_grid[row*grid_row + col]; d_grid[row*grid_row + col] = 0.2 * (d_grid[row*grid_row + col] + d_grid[row*grid_row + col +1] + d_grid[row*grid_row + col - 1] + d_grid[(row-1)*grid_row + col] + d_grid[(row+1)*grid_row + col]); d_diff1[index] = fabs(temp - d_grid[row*grid_row + col]); } } else { if(((row%2==1) && (col%2==0)) || ((row%2==0) && (col%2==1))) { temp = d_grid[row*grid_row + col]; d_grid[row*grid_row + col] = 0.2 * (d_grid[row*grid_row + col] + d_grid[row*grid_row + col +1] + d_grid[row*grid_row + col - 1] + d_grid[(row-1)*grid_row + col] + d_grid[(row+1)*grid_row + col]); d_diff1[index] = fabs(temp - d_grid[row*grid_row + col]); } } } } }
54ca0c811c384820860ca232f6ac426b87058b51.cu
#include "grid.h" #include "solver_cuda.h" #include <cuda.h> #include <cuda_runtime.h> static void fail(char const *message); __global__ void red_black_ordering(float *d_grid, int grid_row, int value, float *d_diff1); void Solver_cuda::simulate_eqn_solver(){ int done = 0; float diff = 0.0; struct timeval start_time, end_time; // Variables to capture timing details int size = ( row * col * sizeof(float) ); cudaError_t err = cudaSuccess; // The code below is optional. We are going to create a diff array instead of // accumulating the diffs in a variable. If we didn't do that, we would need to // synchronize threads as they summed the diffs, and that's harder for beginners. float *diff1 = NULL; diff1 = (float*) malloc (size); for (int i= 0; i < row; i++) { for (int j = 0; j < col; j++) { diff1[i * col + j] = 0.0; } } int threads_per_block = 256; int blocks_per_grid = (int)( ( row*col + threads_per_block - 1) / threads_per_block); float *d_grid = NULL; err = cudaMalloc((void **)&d_grid, size); if(err != cudaSuccess) cout<<"Failed to allocate grid memory"<<endl; err = cudaMemcpy(d_grid,array,size,cudaMemcpyHostToDevice); if(err != cudaSuccess) cout<<"Failed to copy grid to device"<<endl; float *d_diff1 = NULL; err = cudaMalloc((void **)&d_diff1, size); if(err != cudaSuccess) cout<<"Failed to allocate diff memory"<<endl; err = cudaMemcpy(d_diff1,diff1,size,cudaMemcpyHostToDevice); if(err != cudaSuccess) cout<<"Failed to copy diff to device"<<endl; // Calculate start time for equation solver gettimeofday(&start_time, NULL); while (!done){ diff = 0.0; red_black_ordering <<< blocks_per_grid, threads_per_block >>> (d_grid, row, RED, d_diff1); cudaDeviceSynchronize(); red_black_ordering <<< blocks_per_grid, threads_per_block >>> (d_grid, row, BLACK, d_diff1); cudaDeviceSynchronize(); err = cudaMemcpy(diff1,d_diff1,size,cudaMemcpyDeviceToHost); if(err != cudaSuccess) cout<<"Failed to copy diff from device"<<endl; for (int i= 0; i < row; i++) { for (int j = 0; j < col; j++) { diff = diff + diff1[i * col + j]; } } diff = diff/float (row*col); if(diff<tolerance) {done = 1;} else {done = 0;} } gettimeofday(&end_time, NULL); // Print the final Timing Statistics print_timing_statistics(start_time,end_time); err = cudaMemcpy(array,d_grid,size,cudaMemcpyDeviceToHost); if(err != cudaSuccess) cout<<"Failed to copy grid from device"<<endl; err = cudaFree(d_diff1); if(err != cudaSuccess) cout<<"Failed to free diff memory"<<endl; err = cudaFree(d_grid); if(err != cudaSuccess) cout<<"Failed to free grid memory"<<endl; free(diff1); cudaDeviceReset(); } // // __global__ void red_black_ordering(float *d_grid, int grid_row, int value, float *d_diff1) { int index = blockIdx.x * blockDim.x + threadIdx.x; float temp = 0.0; int i; int row = index/grid_row; int col = index - (row * grid_row); int flag = 0; if(index<grid_row*grid_row) { if(!((row==0)|| (row==grid_row-1) || (col==0) || (col==grid_row-1))) { if(!value) { if(((row%2==1) && (col%2==1)) || ((row%2==0) && (col%2==0))) { temp = d_grid[row*grid_row + col]; d_grid[row*grid_row + col] = 0.2 * (d_grid[row*grid_row + col] + d_grid[row*grid_row + col +1] + d_grid[row*grid_row + col - 1] + d_grid[(row-1)*grid_row + col] + d_grid[(row+1)*grid_row + col]); d_diff1[index] = fabs(temp - d_grid[row*grid_row + col]); } } else { if(((row%2==1) && (col%2==0)) || ((row%2==0) && (col%2==1))) { temp = d_grid[row*grid_row + col]; d_grid[row*grid_row + col] = 0.2 * (d_grid[row*grid_row + col] + d_grid[row*grid_row + col +1] + d_grid[row*grid_row + col - 1] + d_grid[(row-1)*grid_row + col] + d_grid[(row+1)*grid_row + col]); d_diff1[index] = fabs(temp - d_grid[row*grid_row + col]); } } } } }
45ad2e49f9e9160ef7e989030db5cb2c8610cacc.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/ep/include/primitive/permute.h" #include "oneflow/core/ep/common/primitive/permute_impl.h" #include "oneflow/core/ep/cuda/cuda_stream.h" #include <hip/hip_runtime.h> namespace oneflow { namespace ep { namespace primitive { namespace permute { namespace internal { namespace { constexpr int32_t kMov4TileSize = 32; constexpr int32_t kMov2TileSize = 64; constexpr int32_t kBlockRows = 8; template<size_t num_dims, size_t movement_size, typename IndexType> __global__ void PermuteKernel(PermuteKernelParams<num_dims, IndexType> params) { using T = typename std::aligned_storage<movement_size, movement_size>::type; const T* src = reinterpret_cast<const T*>(params.src); T* dst = reinterpret_cast<T*>(params.dst); IndexType src_index[num_dims]; IndexType dst_index[num_dims]; CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) { params.dst_index_helper.OffsetToNdIndex(i, dst_index); #pragma unroll for (size_t dim = 0; dim < num_dims; ++dim) { src_index[params.permutation[dim]] = dst_index[dim]; } IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index); dst[i] = src[src_offset]; } } // (B, X, Y) -> (B, Y, X) // refer from https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/ template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> __global__ void BatchTransposeKernel(const void* src_ptr, void* dst_ptr, IndexType rows, IndexType cols, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { const IndexType src_rows = rows; const IndexType src_cols = cols; const IndexType dst_rows = cols; const IndexType dst_cols = rows; using T = typename std::aligned_storage<movement_size, movement_size>::type; __shared__ T tile[tile_size][tile_size + 1]; // To avoid bank conflict. const T* src = reinterpret_cast<const T*>(src_ptr); T* dst = reinterpret_cast<T*>(dst_ptr); IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType tile_index = i - batch_index * batch_num_tile; // equal to i % (num_tile_rows*num_tile_cols). the // flatten index of tile in a batch. const IndexType tile_row_index = tile_index / num_tile_cols; // the row index of tile in a batch. const IndexType tile_col_index = tile_index - tile_row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * src_rows * src_cols; { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_col_index * tile_size + threadIdx.x; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_row_index * tile_size; if (col_in_matrix < src_cols && row_in_matrix < src_rows) { tile[row_in_tile][col_in_tile] = src[offset + row_in_matrix * src_cols + col_in_matrix]; } } } __syncthreads(); { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_row_index * tile_size + threadIdx.x; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_col_index * tile_size; if (col_in_matrix < dst_cols && row_in_matrix < dst_rows) { dst[offset + row_in_matrix * dst_cols + col_in_matrix] = tile[col_in_tile][row_in_tile]; } } } __syncthreads(); } } /* Here is a Movementsie=2 version of Batch Transpose. When the H W can be divided by 2. we can read data use movementsize=4, and write back as movementsize=4. */ template<size_t num_dims, size_t tile_size, typename IndexType> __global__ void BatchTransposeMovement2Kernel(const void* src_ptr, void* dst_ptr, IndexType rows, IndexType cols, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { const IndexType src_rows = rows; const IndexType src_cols = cols; const IndexType dst_rows = cols; const IndexType dst_cols = rows; static_assert(tile_size % 2 == 0, ""); using T_MOV2 = typename std::aligned_storage<2, 2>::type; using T_MOV4 = typename std::aligned_storage<4, 4>::type; const T_MOV4* src = reinterpret_cast<const T_MOV4*>(src_ptr); T_MOV4* dst = reinterpret_cast<T_MOV4*>(dst_ptr); // Use union structure to process Load and Store. __shared__ union { T_MOV2 tile_m2[tile_size][tile_size + 2]; // half [64][66] T_MOV4 tile_m4[tile_size][tile_size / 2 + 1]; // half2 [64][33] } tile_mem; IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType tile_index = i - batch_index * batch_num_tile; // equal to i % (num_tile_rows*num_tile_cols). the // flatten index of tile in a batch. const IndexType tile_row_index = tile_index / num_tile_cols; // the row index of tile in a batch. const IndexType tile_col_index = tile_index - tile_row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * src_rows * src_cols; { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_col_index * tile_size + threadIdx.x * 2; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_row_index * tile_size; if (col_in_matrix < src_cols && row_in_matrix < src_rows) { tile_mem.tile_m4[row_in_tile][col_in_tile] = src[(offset + row_in_matrix * src_cols + col_in_matrix) / 2]; } } } __syncthreads(); { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_row_index * tile_size + threadIdx.x * 2; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_col_index * tile_size; union { T_MOV4 m4; T_MOV2 m2[2]; } tmp_storage; if (col_in_matrix < dst_cols && row_in_matrix < dst_rows) { tmp_storage.m2[0] = tile_mem.tile_m2[col_in_tile * 2][row_in_tile]; tmp_storage.m2[1] = tile_mem.tile_m2[col_in_tile * 2 + 1][row_in_tile]; dst[(offset + row_in_matrix * dst_cols + col_in_matrix) / 2] = tmp_storage.m4; } } } __syncthreads(); } } template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> void LaunchBatchTransposeKernel(hipStream_t& cuda_stream, const PermuteKernelParams<num_dims, IndexType>& params, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { IndexType num_tile_rows = (rows + tile_size - 1) / tile_size; IndexType num_tile_cols = (cols + tile_size - 1) / tile_size; const int32_t block_nums = num_batches * num_tile_rows * num_tile_cols; int32_t launched_block_nums = ::min(block_nums, kCudaMaxBlocksNum); if (tile_size == kMov2TileSize) { const int32_t half2_thread = tile_size / 2; // cause each thread process two half elements. hipLaunchKernelGGL(( BatchTransposeMovement2Kernel<num_dims, kMov2TileSize, IndexType>) , dim3(launched_block_nums), dim3(dim3(half2_thread, kBlockRows)), 0, cuda_stream, params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); // Set threads num as 32x8 cause each threads // process 4 elements to 64x66 half share memory. } else { hipLaunchKernelGGL(( BatchTransposeKernel<num_dims, movement_size, tile_size, IndexType>) , dim3(launched_block_nums), dim3(dim3(tile_size, kBlockRows)), 0, cuda_stream, params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); } } template<size_t tile_size, typename IndexType> bool CheckIfGreaterEqualThanTileSize(const IndexType& rows, const IndexType& cols) { if (rows < tile_size || cols < tile_size) { return false; } return true; } template<size_t num_dims, size_t tile_size, typename IndexType> bool CheckLaunchBatchTranspose(const int* permutation, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { if (CheckIfGreaterEqualThanTileSize<tile_size, IndexType>(rows, cols)) { if (num_batches == 1 && permutation[1] == 0 && permutation[0] == 1) { // 2d tensor case: (0, 1) -> (1, 0) return true; } else if (num_dims == 3 && permutation[2] == 1 && permutation[1] == 2) { // 3d tensor case: (0, 1, 2) -> (0, 2, 1) return true; } else { return false; } } return false; } template<typename IndexType, size_t movement_size> bool CheckUseMov2(const IndexType& rows, const IndexType& cols, const void* src, void* dst) { auto src_ptr = reinterpret_cast<std::uintptr_t>(src); auto dst_ptr = reinterpret_cast<std::uintptr_t>(dst); return (movement_size == 2) && (rows % 2 == 0) && (cols % 2 == 0) && (src_ptr % 4 == 0) && (dst_ptr % 4 == 0); } template<size_t num_dims, typename IndexType> void InferBatchTransposeShape(const int64_t* src_dims, IndexType* num_batches, IndexType* rows, IndexType* cols) { if (num_dims == 2) { *num_batches = 1; *rows = src_dims[0]; *cols = src_dims[1]; } else { *num_batches = src_dims[0]; *rows = src_dims[1]; *cols = src_dims[2]; } } template<size_t num_dims, size_t movement_size, typename IndexType> void LaunchKernel(Stream* stream, const int64_t* src_dims, const void* src, const int* permutation, void* dst, size_t count) { PermuteKernelParams<num_dims, IndexType> params = MakePermuteParams<num_dims, IndexType>(src_dims, src, permutation, dst, count); hipStream_t cuda_stream = stream->As<CudaStream>()->cuda_stream(); if (num_dims == 2 || num_dims == 3) { IndexType num_batches; IndexType rows; IndexType cols; InferBatchTransposeShape<num_dims, IndexType>(src_dims, &num_batches, &rows, &cols); if (CheckLaunchBatchTranspose<num_dims, kMov4TileSize>(params.permutation, num_batches, rows, cols)) { if (CheckUseMov2<IndexType, movement_size>(rows, cols, src, dst)) { LaunchBatchTransposeKernel<num_dims, 2, kMov2TileSize, IndexType>(cuda_stream, params, num_batches, rows, cols); } else { LaunchBatchTransposeKernel<num_dims, movement_size, kMov4TileSize, IndexType>( cuda_stream, params, num_batches, rows, cols); } } else { if (params.count == 0) { return; } hipLaunchKernelGGL(( PermuteKernel<num_dims, movement_size, IndexType>) , dim3(BlocksNum4ThreadsNum(params.count)), dim3(kCudaThreadsNumPerBlock), 0, cuda_stream, params); } } else { if (params.count == 0) { return; } hipLaunchKernelGGL(( PermuteKernel<num_dims, movement_size, IndexType>) , dim3(BlocksNum4ThreadsNum(params.count)), dim3(kCudaThreadsNumPerBlock), 0, cuda_stream, params); } } class PermuteImpl : public Permute { public: OF_DISALLOW_COPY_AND_MOVE(PermuteImpl); PermuteImpl() = default; ~PermuteImpl() override = default; using Permute::Launch; void Launch(Stream* stream, DataType data_type, size_t num_dims, const int64_t* src_dims, const void* src, const int* permutation, void* dst) override { SimplifyThenLaunch(stream, data_type, num_dims, src_dims, src, permutation, dst); } }; class PermuteFactoryImpl : public PermuteFactory { public: OF_DISALLOW_COPY_AND_MOVE(PermuteFactoryImpl); PermuteFactoryImpl() = default; ~PermuteFactoryImpl() override = default; std::unique_ptr<Permute> New(size_t max_num_dims) override { if (max_num_dims <= kMaxNumDims) { return std::unique_ptr<Permute>(new PermuteImpl()); } else { return nullptr; } } }; REGISTER_PRIMITIVE_FACTORY(DeviceType::kCUDA, PermuteFactory, PermuteFactoryImpl); } // namespace } // namespace internal } // namespace permute } // namespace primitive } // namespace ep } // namespace oneflow
45ad2e49f9e9160ef7e989030db5cb2c8610cacc.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/ep/include/primitive/permute.h" #include "oneflow/core/ep/common/primitive/permute_impl.h" #include "oneflow/core/ep/cuda/cuda_stream.h" #include <cuda_runtime.h> namespace oneflow { namespace ep { namespace primitive { namespace permute { namespace internal { namespace { constexpr int32_t kMov4TileSize = 32; constexpr int32_t kMov2TileSize = 64; constexpr int32_t kBlockRows = 8; template<size_t num_dims, size_t movement_size, typename IndexType> __global__ void PermuteKernel(PermuteKernelParams<num_dims, IndexType> params) { using T = typename std::aligned_storage<movement_size, movement_size>::type; const T* src = reinterpret_cast<const T*>(params.src); T* dst = reinterpret_cast<T*>(params.dst); IndexType src_index[num_dims]; IndexType dst_index[num_dims]; CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) { params.dst_index_helper.OffsetToNdIndex(i, dst_index); #pragma unroll for (size_t dim = 0; dim < num_dims; ++dim) { src_index[params.permutation[dim]] = dst_index[dim]; } IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index); dst[i] = src[src_offset]; } } // (B, X, Y) -> (B, Y, X) // refer from https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/ template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> __global__ void BatchTransposeKernel(const void* src_ptr, void* dst_ptr, IndexType rows, IndexType cols, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { const IndexType src_rows = rows; const IndexType src_cols = cols; const IndexType dst_rows = cols; const IndexType dst_cols = rows; using T = typename std::aligned_storage<movement_size, movement_size>::type; __shared__ T tile[tile_size][tile_size + 1]; // To avoid bank conflict. const T* src = reinterpret_cast<const T*>(src_ptr); T* dst = reinterpret_cast<T*>(dst_ptr); IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType tile_index = i - batch_index * batch_num_tile; // equal to i % (num_tile_rows*num_tile_cols). the // flatten index of tile in a batch. const IndexType tile_row_index = tile_index / num_tile_cols; // the row index of tile in a batch. const IndexType tile_col_index = tile_index - tile_row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * src_rows * src_cols; { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_col_index * tile_size + threadIdx.x; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_row_index * tile_size; if (col_in_matrix < src_cols && row_in_matrix < src_rows) { tile[row_in_tile][col_in_tile] = src[offset + row_in_matrix * src_cols + col_in_matrix]; } } } __syncthreads(); { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_row_index * tile_size + threadIdx.x; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_col_index * tile_size; if (col_in_matrix < dst_cols && row_in_matrix < dst_rows) { dst[offset + row_in_matrix * dst_cols + col_in_matrix] = tile[col_in_tile][row_in_tile]; } } } __syncthreads(); } } /* Here is a Movementsie=2 version of Batch Transpose. When the H W can be divided by 2. we can read data use movementsize=4, and write back as movementsize=4. */ template<size_t num_dims, size_t tile_size, typename IndexType> __global__ void BatchTransposeMovement2Kernel(const void* src_ptr, void* dst_ptr, IndexType rows, IndexType cols, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { const IndexType src_rows = rows; const IndexType src_cols = cols; const IndexType dst_rows = cols; const IndexType dst_cols = rows; static_assert(tile_size % 2 == 0, ""); using T_MOV2 = typename std::aligned_storage<2, 2>::type; using T_MOV4 = typename std::aligned_storage<4, 4>::type; const T_MOV4* src = reinterpret_cast<const T_MOV4*>(src_ptr); T_MOV4* dst = reinterpret_cast<T_MOV4*>(dst_ptr); // Use union structure to process Load and Store. __shared__ union { T_MOV2 tile_m2[tile_size][tile_size + 2]; // half [64][66] T_MOV4 tile_m4[tile_size][tile_size / 2 + 1]; // half2 [64][33] } tile_mem; IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType tile_index = i - batch_index * batch_num_tile; // equal to i % (num_tile_rows*num_tile_cols). the // flatten index of tile in a batch. const IndexType tile_row_index = tile_index / num_tile_cols; // the row index of tile in a batch. const IndexType tile_col_index = tile_index - tile_row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * src_rows * src_cols; { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_col_index * tile_size + threadIdx.x * 2; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_row_index * tile_size; if (col_in_matrix < src_cols && row_in_matrix < src_rows) { tile_mem.tile_m4[row_in_tile][col_in_tile] = src[(offset + row_in_matrix * src_cols + col_in_matrix) / 2]; } } } __syncthreads(); { IndexType col_in_tile = threadIdx.x; IndexType col_in_matrix = tile_row_index * tile_size + threadIdx.x * 2; #pragma unroll for (IndexType row_in_tile = threadIdx.y; row_in_tile < tile_size; row_in_tile += kBlockRows) { IndexType row_in_matrix = row_in_tile + tile_col_index * tile_size; union { T_MOV4 m4; T_MOV2 m2[2]; } tmp_storage; if (col_in_matrix < dst_cols && row_in_matrix < dst_rows) { tmp_storage.m2[0] = tile_mem.tile_m2[col_in_tile * 2][row_in_tile]; tmp_storage.m2[1] = tile_mem.tile_m2[col_in_tile * 2 + 1][row_in_tile]; dst[(offset + row_in_matrix * dst_cols + col_in_matrix) / 2] = tmp_storage.m4; } } } __syncthreads(); } } template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> void LaunchBatchTransposeKernel(cudaStream_t& cuda_stream, const PermuteKernelParams<num_dims, IndexType>& params, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { IndexType num_tile_rows = (rows + tile_size - 1) / tile_size; IndexType num_tile_cols = (cols + tile_size - 1) / tile_size; const int32_t block_nums = num_batches * num_tile_rows * num_tile_cols; int32_t launched_block_nums = std::min(block_nums, kCudaMaxBlocksNum); if (tile_size == kMov2TileSize) { const int32_t half2_thread = tile_size / 2; // cause each thread process two half elements. BatchTransposeMovement2Kernel<num_dims, kMov2TileSize, IndexType> <<<launched_block_nums, dim3(half2_thread, kBlockRows), 0, cuda_stream>>>( params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); // Set threads num as 32x8 cause each threads // process 4 elements to 64x66 half share memory. } else { BatchTransposeKernel<num_dims, movement_size, tile_size, IndexType> <<<launched_block_nums, dim3(tile_size, kBlockRows), 0, cuda_stream>>>( params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); } } template<size_t tile_size, typename IndexType> bool CheckIfGreaterEqualThanTileSize(const IndexType& rows, const IndexType& cols) { if (rows < tile_size || cols < tile_size) { return false; } return true; } template<size_t num_dims, size_t tile_size, typename IndexType> bool CheckLaunchBatchTranspose(const int* permutation, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { if (CheckIfGreaterEqualThanTileSize<tile_size, IndexType>(rows, cols)) { if (num_batches == 1 && permutation[1] == 0 && permutation[0] == 1) { // 2d tensor case: (0, 1) -> (1, 0) return true; } else if (num_dims == 3 && permutation[2] == 1 && permutation[1] == 2) { // 3d tensor case: (0, 1, 2) -> (0, 2, 1) return true; } else { return false; } } return false; } template<typename IndexType, size_t movement_size> bool CheckUseMov2(const IndexType& rows, const IndexType& cols, const void* src, void* dst) { auto src_ptr = reinterpret_cast<std::uintptr_t>(src); auto dst_ptr = reinterpret_cast<std::uintptr_t>(dst); return (movement_size == 2) && (rows % 2 == 0) && (cols % 2 == 0) && (src_ptr % 4 == 0) && (dst_ptr % 4 == 0); } template<size_t num_dims, typename IndexType> void InferBatchTransposeShape(const int64_t* src_dims, IndexType* num_batches, IndexType* rows, IndexType* cols) { if (num_dims == 2) { *num_batches = 1; *rows = src_dims[0]; *cols = src_dims[1]; } else { *num_batches = src_dims[0]; *rows = src_dims[1]; *cols = src_dims[2]; } } template<size_t num_dims, size_t movement_size, typename IndexType> void LaunchKernel(Stream* stream, const int64_t* src_dims, const void* src, const int* permutation, void* dst, size_t count) { PermuteKernelParams<num_dims, IndexType> params = MakePermuteParams<num_dims, IndexType>(src_dims, src, permutation, dst, count); cudaStream_t cuda_stream = stream->As<CudaStream>()->cuda_stream(); if (num_dims == 2 || num_dims == 3) { IndexType num_batches; IndexType rows; IndexType cols; InferBatchTransposeShape<num_dims, IndexType>(src_dims, &num_batches, &rows, &cols); if (CheckLaunchBatchTranspose<num_dims, kMov4TileSize>(params.permutation, num_batches, rows, cols)) { if (CheckUseMov2<IndexType, movement_size>(rows, cols, src, dst)) { LaunchBatchTransposeKernel<num_dims, 2, kMov2TileSize, IndexType>(cuda_stream, params, num_batches, rows, cols); } else { LaunchBatchTransposeKernel<num_dims, movement_size, kMov4TileSize, IndexType>( cuda_stream, params, num_batches, rows, cols); } } else { if (params.count == 0) { return; } PermuteKernel<num_dims, movement_size, IndexType> <<<BlocksNum4ThreadsNum(params.count), kCudaThreadsNumPerBlock, 0, cuda_stream>>>(params); } } else { if (params.count == 0) { return; } PermuteKernel<num_dims, movement_size, IndexType> <<<BlocksNum4ThreadsNum(params.count), kCudaThreadsNumPerBlock, 0, cuda_stream>>>(params); } } class PermuteImpl : public Permute { public: OF_DISALLOW_COPY_AND_MOVE(PermuteImpl); PermuteImpl() = default; ~PermuteImpl() override = default; using Permute::Launch; void Launch(Stream* stream, DataType data_type, size_t num_dims, const int64_t* src_dims, const void* src, const int* permutation, void* dst) override { SimplifyThenLaunch(stream, data_type, num_dims, src_dims, src, permutation, dst); } }; class PermuteFactoryImpl : public PermuteFactory { public: OF_DISALLOW_COPY_AND_MOVE(PermuteFactoryImpl); PermuteFactoryImpl() = default; ~PermuteFactoryImpl() override = default; std::unique_ptr<Permute> New(size_t max_num_dims) override { if (max_num_dims <= kMaxNumDims) { return std::unique_ptr<Permute>(new PermuteImpl()); } else { return nullptr; } } }; REGISTER_PRIMITIVE_FACTORY(DeviceType::kCUDA, PermuteFactory, PermuteFactoryImpl); } // namespace } // namespace internal } // namespace permute } // namespace primitive } // namespace ep } // namespace oneflow
1ae3d5abcbe51c76ef8490d48305136c7f12dea1.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
1ae3d5abcbe51c76ef8490d48305136c7f12dea1.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, true>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
f450e522e1850d589f4f472d80f5e2fab8a912e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Composite{((i0 + i1) + i2)}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0]; o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0]; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 + i1) + i2)}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_2(unsigned int numEls , const int dim0, const int dim1 , const float * i0_data, int i0_str_0, int i0_str_1 , const float * i1_data, int i1_str_0, int i1_str_1 , const float * i2_data, int i2_str_0, int i2_str_1 , float * o0_data, int o0_str_0, int o0_str_1 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos1 = ii % dim1; ii = ii / dim1; ii_i0_data += pos1 * i0_str_1; ii_i1_data += pos1 * i1_str_1; ii_i2_data += pos1 * i2_str_1; ii_o0_data += pos1 * o0_str_1; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0]; o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0]; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 + i1) + i2)}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = i0_data[i] + i1_data[i]; o0_i = V_DUMMY_ID__tmp1 + i2_data[i]; } o0_data[i] = o0_i; } } static void can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, float * o0_data, const int * o0_str) { numEls = dims[0]*dims[1]*1; int local_dims[2]; int local_str[3][2]; int local_ostr[1][2]; int nd_collapse = 2; for(int i=0;i<2;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<2;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<2;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<3;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<3;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[2] = {1,1}; int nd_collapse_0[2] = {1,1}; can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(nd_collapse, local_dims, local_str[0], nd_collapse_0); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_0[i]==0) nd_collapse_[i]=0; } int nd_collapse_1[2] = {1,1}; can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_2[2] = {1,1}; can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(nd_collapse, local_dims, local_str[2], nd_collapse_2); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_2[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(2,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, i2_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7cffec52bdceaa3cd1779e69bb20e2ba_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7cffec52bdceaa3cd1779e69bb20e2ba_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; case 2: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1]); CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7cffec52bdceaa3cd1779e69bb20e2ba_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V1; __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_10: double __DUMMY_10; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; { py_V1 = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} if (py_V1 == Py_None) { V1 = NULL; } else { assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V1)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); V1 = (CudaNdarray*)py_V1; //std::cerr << "c_extract " << V1 << '\n'; if (V1->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V1->nd); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract " << V1 << " nd check passed\n"; assert(V1); Py_INCREF(py_V1); } else if (py_V1 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract done " << V1 << '\n'; } { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} START\n"; //standard elemwise size checks int dims[2] = {1,1}; int broadcasts_V3[2] = {0, 0}; int broadcasts_V5[2] = {0, 0}; int broadcasts_V7[2] = {0, 0}; //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V3\n"; if (2 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V3->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V5\n"; if (2 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V5->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V7\n"; if (2 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V7->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } Py_XDECREF(V1); V1 = V3; Py_INCREF(V1); for (int i = 0; (i< 2) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 0, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } else // no error { } } //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} END\n"; __label_9: double __DUMMY_9; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_executor(__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba* self) { return self->run(); } static void __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_destructor(void* executor, void* self) { delete ((__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (5 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 5, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba* struct_ptr = new __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_executor), struct_ptr, __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init7cffec52bdceaa3cd1779e69bb20e2ba(void){ (void) Py_InitModule("7cffec52bdceaa3cd1779e69bb20e2ba", MyMethods); }
f450e522e1850d589f4f472d80f5e2fab8a912e8.cu
#include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Composite{((i0 + i1) + i2)}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0]; o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0]; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 + i1) + i2)}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_2(unsigned int numEls , const int dim0, const int dim1 , const float * i0_data, int i0_str_0, int i0_str_1 , const float * i1_data, int i1_str_0, int i1_str_1 , const float * i2_data, int i2_str_0, int i2_str_1 , float * o0_data, int o0_str_0, int o0_str_1 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos1 = ii % dim1; ii = ii / dim1; ii_i0_data += pos1 * i0_str_1; ii_i1_data += pos1 * i1_str_1; ii_i2_data += pos1 * i2_str_1; ii_o0_data += pos1 * o0_str_1; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = ii_i0_data[0] + ii_i1_data[0]; o0_i = V_DUMMY_ID__tmp1 + ii_i2_data[0]; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 + i1) + i2)}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, matrix) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; V_DUMMY_ID__tmp1 = i0_data[i] + i1_data[i]; o0_i = V_DUMMY_ID__tmp1 + i2_data[i]; } o0_data[i] = o0_i; } } static void can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, float * o0_data, const int * o0_str) { numEls = dims[0]*dims[1]*1; int local_dims[2]; int local_str[3][2]; int local_ostr[1][2]; int nd_collapse = 2; for(int i=0;i<2;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<2;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<2;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<3;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<3;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[2] = {1,1}; int nd_collapse_0[2] = {1,1}; can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(nd_collapse, local_dims, local_str[0], nd_collapse_0); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_0[i]==0) nd_collapse_[i]=0; } int nd_collapse_1[2] = {1,1}; can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_2[2] = {1,1}; can_collapse_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(nd_collapse, local_dims, local_str[2], nd_collapse_2); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_2[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(2,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7cffec52bdceaa3cd1779e69bb20e2ba_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7cffec52bdceaa3cd1779e69bb20e2ba_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; case 2: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_7cffec52bdceaa3cd1779e69bb20e2ba_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_7cffec52bdceaa3cd1779e69bb20e2ba_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], o0_data, local_ostr[0][0], local_ostr[0][1])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V1; __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_10: double __DUMMY_10; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; { py_V1 = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} if (py_V1 == Py_None) { V1 = NULL; } else { assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V1)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); V1 = (CudaNdarray*)py_V1; //std::cerr << "c_extract " << V1 << '\n'; if (V1->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V1->nd); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract " << V1 << " nd check passed\n"; assert(V1); Py_INCREF(py_V1); } else if (py_V1 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract done " << V1 << '\n'; } { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} START\n"; //standard elemwise size checks int dims[2] = {1,1}; int broadcasts_V3[2] = {0, 0}; int broadcasts_V5[2] = {0, 0}; int broadcasts_V7[2] = {0, 0}; //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V3\n"; if (2 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V3->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V5\n"; if (2 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V5->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V7\n"; if (2 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V7->nd); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } Py_XDECREF(V1); V1 = V3; Py_INCREF(V1); for (int i = 0; (i< 2) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 0, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_7cffec52bdceaa3cd1779e69bb20e2ba_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 9; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_9;}; } else // no error { } } //std::cerr << "C_CODE Composite{((i0 + i1) + i2)} END\n"; __label_9: double __DUMMY_9; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_executor(__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba* self) { return self->run(); } static void __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_destructor(void* executor, void* self) { delete ((__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (5 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 5, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba* struct_ptr = new __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_executor), struct_ptr, __struct_compiled_op_7cffec52bdceaa3cd1779e69bb20e2ba_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init7cffec52bdceaa3cd1779e69bb20e2ba(void){ (void) Py_InitModule("7cffec52bdceaa3cd1779e69bb20e2ba", MyMethods); }
41063862d49714755344907c16537f82b6b42f35.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> struct Index{ int block, thread; }; __global__ void prob_idx(Index id[]){ int b_idx = blockIdx.x; int t_idx = threadIdx.x; int b_dim = blockDim.x; int position = b_idx * b_dim + t_idx; id[position].block=b_idx; id[position].thread=t_idx; }; int main(){ Index* d; Index h[100]; hipMalloc((void**) &d, 100*sizeof(Index)); int threadsPerBlock = 3; int blocksPerGrid = 4; int N = threadsPerBlock * blocksPerGrid; hipLaunchKernelGGL(( prob_idx), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d); hipMemcpy(h, d, 100*sizeof(Index), hipMemcpyDeviceToHost); for(int i=0; i<N; i++){ printf("h[%d]={block:%d, thread:%d}\n", i,h[i].block,h[i].thread); } hipFree(d); return 0; }
41063862d49714755344907c16537f82b6b42f35.cu
#include<stdio.h> #include<cuda.h> struct Index{ int block, thread; }; __global__ void prob_idx(Index id[]){ int b_idx = blockIdx.x; int t_idx = threadIdx.x; int b_dim = blockDim.x; int position = b_idx * b_dim + t_idx; id[position].block=b_idx; id[position].thread=t_idx; }; int main(){ Index* d; Index h[100]; cudaMalloc((void**) &d, 100*sizeof(Index)); int threadsPerBlock = 3; int blocksPerGrid = 4; int N = threadsPerBlock * blocksPerGrid; prob_idx<<< blocksPerGrid, threadsPerBlock>>>(d); cudaMemcpy(h, d, 100*sizeof(Index), cudaMemcpyDeviceToHost); for(int i=0; i<N; i++){ printf("h[%d]={block:%d, thread:%d}\n", i,h[i].block,h[i].thread); } cudaFree(d); return 0; }
bd566cf48f0975ef0a33721de1b9766fec40d4d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void GlobalAvePoolForward(const int spatial_dim, const Dtype* bottom_data, Dtype* top_data) { __shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS]; unsigned int tid = threadIdx.x; buffer[tid] = 0; __syncthreads(); for (int j = tid; j < spatial_dim; j += blockDim.x) { buffer[tid] += bottom_data[blockIdx.x * spatial_dim + j]; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tid < i) { buffer[threadIdx.x] += buffer[threadIdx.x + i]; } __syncthreads(); } if (tid == 0) { top_data[blockIdx.x] = buffer[0] / spatial_dim; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = 0.; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: if (this->layer_param_.pooling_param().global_pooling()) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( GlobalAvePoolForward<Dtype>), dim3(bottom[0]->count(0, 2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(2), bottom_data, top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); } break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void GlobalAvePoolBackward(const int nthreads, const int spatial_dim, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; bottom_diff[index] = top_diff[n] / spatial_dim; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: if (this->layer_param_.pooling_param().global_pooling()) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( GlobalAvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->count(2), top_diff, bottom_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); } break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
bd566cf48f0975ef0a33721de1b9766fec40d4d5.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; if (mask) { mask[index] = maxidx; } else { top_mask[index] = maxidx; } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void GlobalAvePoolForward(const int spatial_dim, const Dtype* bottom_data, Dtype* top_data) { __shared__ Dtype buffer[CAFFE_CUDA_NUM_THREADS]; unsigned int tid = threadIdx.x; buffer[tid] = 0; __syncthreads(); for (int j = tid; j < spatial_dim; j += blockDim.x) { buffer[tid] += bottom_data[blockIdx.x * spatial_dim + j]; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tid < i) { buffer[threadIdx.x] += buffer[threadIdx.x + i]; } __syncthreads(); } if (tid == 0) { top_data[blockIdx.x] = buffer[0] / spatial_dim; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = 0.; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = (cumsum > 0.) ? cumvalues / cumsum : 0.; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: if (this->layer_param_.pooling_param().global_pooling()) { // NOLINT_NEXT_LINE(whitespace/operators) GlobalAvePoolForward<Dtype><<<bottom[0]->count(0, 2), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->count(2), bottom_data, top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); } break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; } } } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename Dtype> __global__ void GlobalAvePoolBackward(const int nthreads, const int spatial_dim, const Dtype* top_diff, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; bottom_diff[index] = top_diff[n] / spatial_dim; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: if (this->layer_param_.pooling_param().global_pooling()) { // NOLINT_NEXT_LINE(whitespace/operators) GlobalAvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->count(2), top_diff, bottom_diff); } else { // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); } break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
f81a2b47e71fabfab8690d2c40b8b97f4b7d25ca.hip
// !!! This is a file automatically generated by hipify!!! /** * CUDA C/C++ implementation for Accelerating Graph Betweenness Centrality for Sparse Graphs * * @author Ashwin Joisa * @author Praveen Gupta **/ //=============================================================================================// // Include header files #include <iostream> #include <hip/hip_runtime.h> // Include custom header file for implementation of Graphs #include "Graph.h" //=============================================================================================// #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) //=============================================================================================// using namespace std; //=============================================================================================// #define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); } float device_time_taken; void printTime(float ms) { int h = ms / (1000*3600); int m = (((int)ms) / (1000*60)) % 60; int s = (((int)ms) / 1000) % 60; int intMS = ms; intMS %= 1000; printf("Time Taken (Parallel) = %dh %dm %ds %dms\n", h, m, s, intMS); printf("Time Taken in milliseconds : %d\n", (int)ms); } // Catch Cuda errors inline void gpuAssert(hipError_t error, const char *file, int line, bool abort = false) { if (error != hipSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, hipGetErrorString(error)); printf("\nIn file :%s\nOn line: %d", file, line); if(abort) exit(-1); } } //=============================================================================================// __global__ void betweennessCentralityKernel(Graph *graph, double *bwCentrality, int nodeCount, int *sigma, int *distance, double *dependency) { int idx = threadIdx.x; if(idx >= nodeCount) return; __shared__ int s; __shared__ int current_depth; __shared__ bool done; if(idx == 0) { s = -1; // printf("Progress... %3d%%", 0); } __syncthreads(); while(s < nodeCount -1) { if(idx == 0) { ++s; // printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount); done = false; current_depth = -1; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(v == s) { distance[v] = 0; sigma[v] = 1; } else { distance[v] = INT_MAX; sigma[v] = 0; } dependency[v] = 0.0; } __syncthreads(); // BFS while(!done) { if(idx == 0){ current_depth++; } done = true; __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(distance[v] == current_depth) { for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w] == INT_MAX) { distance[w] = distance[v] + 1; done = false; } if(distance[w] == (distance[v] + 1)) { atomicAdd(&sigma[w], sigma[v]); } } } } __syncthreads(); } // Reverse BFS while(current_depth) { if(idx == 0){ current_depth--; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(distance[v] == current_depth) { for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w] == (distance[v] + 1)) { if (sigma[w] != 0) dependency[v] += (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]); } } if (v != s) { // Each shortest path is counted twice. So, each partial shortest path dependency is halved. bwCentrality[v] += dependency[v] / 2; } } } __syncthreads(); } } } double *betweennessCentrality(Graph *graph, int nodeCount) { double *bwCentrality = new double[nodeCount](); double *device_bwCentrality, *dependency; int *sigma, *distance; //TODO: Allocate device memory for bwCentrality catchCudaError(hipMalloc((void **)&device_bwCentrality, sizeof(double) * nodeCount)); catchCudaError(hipMalloc((void **)&sigma, sizeof(int) * nodeCount)); catchCudaError(hipMalloc((void **)&distance, sizeof(int) * nodeCount)); catchCudaError(hipMalloc((void **)&dependency, sizeof(double) * nodeCount)); catchCudaError(hipMemcpy(device_bwCentrality, bwCentrality, sizeof(double) * nodeCount, hipMemcpyHostToDevice)); // Timer hipEvent_t device_start, device_end; catchCudaError(hipEventCreate(&device_start)); catchCudaError(hipEventCreate(&device_end)); catchCudaError(hipEventRecord(device_start)); hipLaunchKernelGGL(( betweennessCentralityKernel), dim3(1), dim3(MAX_THREAD_COUNT), 0, 0, graph, device_bwCentrality, nodeCount, sigma, distance, dependency); hipDeviceSynchronize(); //End of progress bar cout << endl; // Timer catchCudaError(hipEventRecord(device_end)); catchCudaError(hipEventSynchronize(device_end)); hipEventElapsedTime(&device_time_taken, device_start, device_end); // Copy back and free memory catchCudaError(hipMemcpy(bwCentrality, device_bwCentrality, sizeof(double) * nodeCount, hipMemcpyDeviceToHost)); catchCudaError(hipFree(device_bwCentrality)); catchCudaError(hipFree(sigma)); catchCudaError(hipFree(dependency)); catchCudaError(hipFree(distance)); return bwCentrality; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(hipMalloc((void **)&device_graph, sizeof(Graph))); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); catchCudaError(hipMemcpy(device_graph, host_graph, sizeof(Graph), hipMemcpyHostToDevice)); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1))); catchCudaError(hipMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), hipMemcpyHostToDevice)); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1))); catchCudaError(hipMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), hipMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), hipMemcpyHostToDevice)); double *bwCentrality = betweennessCentrality(device_graph, nodeCount); double maxBetweenness = -1; for (int i = 0; i < nodeCount; i++) { maxBetweenness = max(maxBetweenness, bwCentrality[i]); if (choice == 'y' || choice == 'Y') printf("Node %d => Betweeness Centrality %0.2lf\n", i, bwCentrality[i]); } cout << endl; printf("\nMaximum Betweenness Centrality ==> %0.2lf\n", maxBetweenness); printTime(device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << bwCentrality[i] << " "; cout << endl; } // Free all memory delete[] bwCentrality; catchCudaError(hipFree(adjacencyList)); catchCudaError(hipFree(adjacencyListPointers)); catchCudaError(hipFree(device_graph)); }
f81a2b47e71fabfab8690d2c40b8b97f4b7d25ca.cu
/** * CUDA C/C++ implementation for Accelerating Graph Betweenness Centrality for Sparse Graphs * * @author Ashwin Joisa * @author Praveen Gupta **/ //=============================================================================================// // Include header files #include <iostream> #include <cuda.h> // Include custom header file for implementation of Graphs #include "Graph.h" //=============================================================================================// #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) //=============================================================================================// using namespace std; //=============================================================================================// #define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); } float device_time_taken; void printTime(float ms) { int h = ms / (1000*3600); int m = (((int)ms) / (1000*60)) % 60; int s = (((int)ms) / 1000) % 60; int intMS = ms; intMS %= 1000; printf("Time Taken (Parallel) = %dh %dm %ds %dms\n", h, m, s, intMS); printf("Time Taken in milliseconds : %d\n", (int)ms); } // Catch Cuda errors inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false) { if (error != cudaSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error)); printf("\nIn file :%s\nOn line: %d", file, line); if(abort) exit(-1); } } //=============================================================================================// __global__ void betweennessCentralityKernel(Graph *graph, double *bwCentrality, int nodeCount, int *sigma, int *distance, double *dependency) { int idx = threadIdx.x; if(idx >= nodeCount) return; __shared__ int s; __shared__ int current_depth; __shared__ bool done; if(idx == 0) { s = -1; // printf("Progress... %3d%%", 0); } __syncthreads(); while(s < nodeCount -1) { if(idx == 0) { ++s; // printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount); done = false; current_depth = -1; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(v == s) { distance[v] = 0; sigma[v] = 1; } else { distance[v] = INT_MAX; sigma[v] = 0; } dependency[v] = 0.0; } __syncthreads(); // BFS while(!done) { if(idx == 0){ current_depth++; } done = true; __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(distance[v] == current_depth) { for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w] == INT_MAX) { distance[w] = distance[v] + 1; done = false; } if(distance[w] == (distance[v] + 1)) { atomicAdd(&sigma[w], sigma[v]); } } } } __syncthreads(); } // Reverse BFS while(current_depth) { if(idx == 0){ current_depth--; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(distance[v] == current_depth) { for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w] == (distance[v] + 1)) { if (sigma[w] != 0) dependency[v] += (sigma[v] * 1.0 / sigma[w]) * (1 + dependency[w]); } } if (v != s) { // Each shortest path is counted twice. So, each partial shortest path dependency is halved. bwCentrality[v] += dependency[v] / 2; } } } __syncthreads(); } } } double *betweennessCentrality(Graph *graph, int nodeCount) { double *bwCentrality = new double[nodeCount](); double *device_bwCentrality, *dependency; int *sigma, *distance; //TODO: Allocate device memory for bwCentrality catchCudaError(cudaMalloc((void **)&device_bwCentrality, sizeof(double) * nodeCount)); catchCudaError(cudaMalloc((void **)&sigma, sizeof(int) * nodeCount)); catchCudaError(cudaMalloc((void **)&distance, sizeof(int) * nodeCount)); catchCudaError(cudaMalloc((void **)&dependency, sizeof(double) * nodeCount)); catchCudaError(cudaMemcpy(device_bwCentrality, bwCentrality, sizeof(double) * nodeCount, cudaMemcpyHostToDevice)); // Timer cudaEvent_t device_start, device_end; catchCudaError(cudaEventCreate(&device_start)); catchCudaError(cudaEventCreate(&device_end)); catchCudaError(cudaEventRecord(device_start)); betweennessCentralityKernel<<<1, MAX_THREAD_COUNT>>>(graph, device_bwCentrality, nodeCount, sigma, distance, dependency); cudaDeviceSynchronize(); //End of progress bar cout << endl; // Timer catchCudaError(cudaEventRecord(device_end)); catchCudaError(cudaEventSynchronize(device_end)); cudaEventElapsedTime(&device_time_taken, device_start, device_end); // Copy back and free memory catchCudaError(cudaMemcpy(bwCentrality, device_bwCentrality, sizeof(double) * nodeCount, cudaMemcpyDeviceToHost)); catchCudaError(cudaFree(device_bwCentrality)); catchCudaError(cudaFree(sigma)); catchCudaError(cudaFree(dependency)); catchCudaError(cudaFree(distance)); return bwCentrality; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(cudaMalloc((void **)&device_graph, sizeof(Graph))); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); catchCudaError(cudaMemcpy(device_graph, host_graph, sizeof(Graph), cudaMemcpyHostToDevice)); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1))); catchCudaError(cudaMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), cudaMemcpyHostToDevice)); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1))); catchCudaError(cudaMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), cudaMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), cudaMemcpyHostToDevice)); double *bwCentrality = betweennessCentrality(device_graph, nodeCount); double maxBetweenness = -1; for (int i = 0; i < nodeCount; i++) { maxBetweenness = max(maxBetweenness, bwCentrality[i]); if (choice == 'y' || choice == 'Y') printf("Node %d => Betweeness Centrality %0.2lf\n", i, bwCentrality[i]); } cout << endl; printf("\nMaximum Betweenness Centrality ==> %0.2lf\n", maxBetweenness); printTime(device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << bwCentrality[i] << " "; cout << endl; } // Free all memory delete[] bwCentrality; catchCudaError(cudaFree(adjacencyList)); catchCudaError(cudaFree(adjacencyListPointers)); catchCudaError(cudaFree(device_graph)); }
1c9ad39bbdeb6fae3a0206a5a7af2fd42cf62638.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <chrono> #include <type_traits> #include <hip/hip_runtime.h> #include <thrust/tuple.h> #include "utils.h" template <typename T> __global__ void RowwiseMomentsKernel( int64_t N, T eps, const T* X, T* mean, T* rstd) { using T_ACC = T; using WelfordType = WelfordData<T_ACC, int64_t>; using WelfordOp = WelfordOps<T_ACC, T_ACC, int64_t, thrust::pair<T_ACC, T_ACC>>; const int64_t i = blockIdx.x; WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; WelfordType val(0, 0, 0, 0); for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index); } // There will be a warning if we declare a __shared__ WelfordType array. // https://github.com/pytorch/pytorch/pull/13967 __shared__ typename std::aligned_storage< sizeof(WelfordType), alignof(WelfordType)>::type val_shared[WARP_SIZE]; WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared); val = BlockReduce( val, welford_op, /*identity_element=*/WelfordType(0, 0, 0, 0), val_shared_ptr); if (threadIdx.x == 0) { T_ACC m1; T_ACC m2; thrust::tie(m1, m2) = welford_op.project(val); rstd[i] = rsqrt(m1 + static_cast<T_ACC>(eps)); mean[i] = m2; } } int main(int argc, char* argv[]) { if (argc != 7) { printf("Usage: %s <batch> <channel> <width> <height> <group> <repeat>\n", argv[0]); return 1; } const int N = atoi(argv[1]); const int C = atoi(argv[2]); const int W = atoi(argv[3]); const int H = atoi(argv[4]); const int G = atoi(argv[5]); const int repeat = atoi(argv[6]); const int64_t D = C / G; double eps = 1e-6; size_t input_size = (size_t)N * C * W * H; size_t input_size_bytes = input_size * sizeof(float); size_t output_size = N * G; size_t output_size_bytes = output_size * sizeof(float); float* h_X = (float*) malloc (input_size_bytes); float* h_mean = (float*) malloc (output_size_bytes); float* h_rstd = (float*) malloc (output_size_bytes); srand(123); for (size_t i = 0; i < input_size; i++) { h_X[i] = rand() / (float)RAND_MAX; } float *d_X; hipMalloc((void**)&d_X, input_size_bytes); hipMemcpy(d_X, h_X, input_size_bytes, hipMemcpyHostToDevice); float* d_mean, *d_rstd; hipMalloc((void**)&d_mean, output_size_bytes); hipMalloc((void**)&d_rstd, output_size_bytes); dim3 grid (N * G); dim3 block (kNumThreads); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { hipLaunchKernelGGL(( RowwiseMomentsKernel<float>), dim3(grid), dim3(block), 0, 0, D * H * W, eps, d_X, d_mean, d_rstd); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of RowwiseMoments kernel: %f (us)\n", (time * 1e-3f) / repeat); hipMemcpy(h_mean, d_mean, output_size_bytes, hipMemcpyDeviceToHost); hipMemcpy(h_rstd, d_rstd, output_size_bytes, hipMemcpyDeviceToHost); double avg_rstd = 0.0, avg_mean = 0.0; for (size_t i = 0; i < output_size; i++) { avg_mean += h_mean[i]; avg_rstd += h_rstd[i]; } avg_rstd /= output_size; avg_mean /= output_size; printf("Checksum: mean = %lf and rstd = %lf\n", avg_mean, avg_rstd); hipFree(d_X); hipFree(d_mean); hipFree(d_rstd); free(h_X); free(h_mean); free(h_rstd); return 0; }
1c9ad39bbdeb6fae3a0206a5a7af2fd42cf62638.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <chrono> #include <type_traits> #include <cuda.h> #include <thrust/tuple.h> #include "utils.h" template <typename T> __global__ void RowwiseMomentsKernel( int64_t N, T eps, const T* X, T* mean, T* rstd) { using T_ACC = T; using WelfordType = WelfordData<T_ACC, int64_t>; using WelfordOp = WelfordOps<T_ACC, T_ACC, int64_t, thrust::pair<T_ACC, T_ACC>>; const int64_t i = blockIdx.x; WelfordOp welford_op = {/*correction=*/0, /*take_sqrt=*/false}; WelfordType val(0, 0, 0, 0); for (int64_t j = threadIdx.x; j < N; j += blockDim.x) { const int64_t index = i * N + j; val = welford_op.reduce(val, static_cast<T_ACC>(X[index]), index); } // There will be a warning if we declare a __shared__ WelfordType array. // https://github.com/pytorch/pytorch/pull/13967 __shared__ typename std::aligned_storage< sizeof(WelfordType), alignof(WelfordType)>::type val_shared[WARP_SIZE]; WelfordType* val_shared_ptr = reinterpret_cast<WelfordType*>(val_shared); val = BlockReduce( val, welford_op, /*identity_element=*/WelfordType(0, 0, 0, 0), val_shared_ptr); if (threadIdx.x == 0) { T_ACC m1; T_ACC m2; thrust::tie(m1, m2) = welford_op.project(val); rstd[i] = rsqrt(m1 + static_cast<T_ACC>(eps)); mean[i] = m2; } } int main(int argc, char* argv[]) { if (argc != 7) { printf("Usage: %s <batch> <channel> <width> <height> <group> <repeat>\n", argv[0]); return 1; } const int N = atoi(argv[1]); const int C = atoi(argv[2]); const int W = atoi(argv[3]); const int H = atoi(argv[4]); const int G = atoi(argv[5]); const int repeat = atoi(argv[6]); const int64_t D = C / G; double eps = 1e-6; size_t input_size = (size_t)N * C * W * H; size_t input_size_bytes = input_size * sizeof(float); size_t output_size = N * G; size_t output_size_bytes = output_size * sizeof(float); float* h_X = (float*) malloc (input_size_bytes); float* h_mean = (float*) malloc (output_size_bytes); float* h_rstd = (float*) malloc (output_size_bytes); srand(123); for (size_t i = 0; i < input_size; i++) { h_X[i] = rand() / (float)RAND_MAX; } float *d_X; cudaMalloc((void**)&d_X, input_size_bytes); cudaMemcpy(d_X, h_X, input_size_bytes, cudaMemcpyHostToDevice); float* d_mean, *d_rstd; cudaMalloc((void**)&d_mean, output_size_bytes); cudaMalloc((void**)&d_rstd, output_size_bytes); dim3 grid (N * G); dim3 block (kNumThreads); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < repeat; i++) { RowwiseMomentsKernel<float><<<grid, block>>>( D * H * W, eps, d_X, d_mean, d_rstd); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average execution time of RowwiseMoments kernel: %f (us)\n", (time * 1e-3f) / repeat); cudaMemcpy(h_mean, d_mean, output_size_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(h_rstd, d_rstd, output_size_bytes, cudaMemcpyDeviceToHost); double avg_rstd = 0.0, avg_mean = 0.0; for (size_t i = 0; i < output_size; i++) { avg_mean += h_mean[i]; avg_rstd += h_rstd[i]; } avg_rstd /= output_size; avg_mean /= output_size; printf("Checksum: mean = %lf and rstd = %lf\n", avg_mean, avg_rstd); cudaFree(d_X); cudaFree(d_mean); cudaFree(d_rstd); free(h_X); free(h_mean); free(h_rstd); return 0; }
ca185f1990f6096d879c95ae9baaefa32035e361.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color is //specified by how much Red, Grean and Blue is in it. The 'A' stands for Alpha //and is used for transparency, it will be ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. Since we //are using one byte for each color there are 256 different possible values for //each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel which //is one byte in size. //To convert an image from color to grayscale one simple method is to set the //intensity to the average of the RGB channels. But we will use a more //sophisticated method that takes into account how the eye perceives color and //weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. The //NTSC (National Television System Committee) recommends the following formula //for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are single //precision floating point constants and not double precision constants. //You should fill in the kernel as well as set the block and grid sizes so that //the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> #define THREADS_PER_BLOCK_MAX_X 32 #define THREADS_PER_BLOCK_MAX_Y 32 __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset. // //NOTE: Be careful not to try to access memory that is outside the bounds of //the image. You'll want code that performs the following check before accessing //GPU memory: // //if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) //{ // return; //} int gridSizeX = (numCols - 1) / THREADS_PER_BLOCK_MAX_X + 1; int gridSizeY = (numRows - 1) / THREADS_PER_BLOCK_MAX_Y + 1; int threadId = (threadIdx.x + THREADS_PER_BLOCK_MAX_X * threadIdx.y); int blockId = blockIdx.x + gridSizeX * blockIdx.y; int imageOffset = threadId + (THREADS_PER_BLOCK_MAX_X * THREADS_PER_BLOCK_MAX_Y * blockId); if (imageOffset < (numCols * numRows)) { uchar4 rgba = rgbaImage[imageOffset]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[imageOffset] = channelSum; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int gridSizeX = (numCols - 1) / THREADS_PER_BLOCK_MAX_X + 1; int gridSizeY = (numRows - 1) / THREADS_PER_BLOCK_MAX_Y + 1; const dim3 blockSize(THREADS_PER_BLOCK_MAX_X, THREADS_PER_BLOCK_MAX_Y, 1); //TODO const dim3 gridSize(gridSizeX, gridSizeY, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
ca185f1990f6096d879c95ae9baaefa32035e361.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color is //specified by how much Red, Grean and Blue is in it. The 'A' stands for Alpha //and is used for transparency, it will be ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. Since we //are using one byte for each color there are 256 different possible values for //each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel which //is one byte in size. //To convert an image from color to grayscale one simple method is to set the //intensity to the average of the RGB channels. But we will use a more //sophisticated method that takes into account how the eye perceives color and //weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. The //NTSC (National Television System Committee) recommends the following formula //for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are single //precision floating point constants and not double precision constants. //You should fill in the kernel as well as set the block and grid sizes so that //the entire image is processed. #include "reference_calc.cpp" #include "utils.h" #include <stdio.h> #define THREADS_PER_BLOCK_MAX_X 32 #define THREADS_PER_BLOCK_MAX_Y 32 __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset. // //NOTE: Be careful not to try to access memory that is outside the bounds of //the image. You'll want code that performs the following check before accessing //GPU memory: // //if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) //{ // return; //} int gridSizeX = (numCols - 1) / THREADS_PER_BLOCK_MAX_X + 1; int gridSizeY = (numRows - 1) / THREADS_PER_BLOCK_MAX_Y + 1; int threadId = (threadIdx.x + THREADS_PER_BLOCK_MAX_X * threadIdx.y); int blockId = blockIdx.x + gridSizeX * blockIdx.y; int imageOffset = threadId + (THREADS_PER_BLOCK_MAX_X * THREADS_PER_BLOCK_MAX_Y * blockId); if (imageOffset < (numCols * numRows)) { uchar4 rgba = rgbaImage[imageOffset]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[imageOffset] = channelSum; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched int gridSizeX = (numCols - 1) / THREADS_PER_BLOCK_MAX_X + 1; int gridSizeY = (numRows - 1) / THREADS_PER_BLOCK_MAX_Y + 1; const dim3 blockSize(THREADS_PER_BLOCK_MAX_X, THREADS_PER_BLOCK_MAX_Y, 1); //TODO const dim3 gridSize(gridSizeX, gridSizeY, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
a14b5fd7da94ba8ba2c2f21b07367a7840a820b3.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2019,2020,2021 Sony Corporation. // Copyright 2021 Sony Group Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // tanh_shrink.cpp #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/tanh_shrink.hpp> #include <nbla/cuda/function/utils/base_transform_unary.cuh> #include <cmath> namespace nbla { NBLA_DEFINE_TRANSFORM_UNARY_CUDA(TanhShrink, x - std::tanh(x), dy *::pow(std::tanh(x), (T)2), false, true); }
a14b5fd7da94ba8ba2c2f21b07367a7840a820b3.cu
// Copyright 2019,2020,2021 Sony Corporation. // Copyright 2021 Sony Group Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // tanh_shrink.cpp #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/tanh_shrink.hpp> #include <nbla/cuda/function/utils/base_transform_unary.cuh> #include <cmath> namespace nbla { NBLA_DEFINE_TRANSFORM_UNARY_CUDA(TanhShrink, x - std::tanh(x), dy *std::pow(std::tanh(x), (T)2), false, true); }
8749e8768ed0661da0120506d4a04d803d538e38.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #include <cstdio> __global__ void reduce_kernel(float* d_out, const float* const d_in, const int arraySize, bool isMax) { extern __shared__ float sdata[]; const int myId = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x; //if(myId >= arraySize) // return; if(myId < arraySize) sdata[tid] = d_in[myId]; __syncthreads(); int maxId = 0; for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s && myId + s < arraySize) { float val1 = sdata[tid]; float val2 = sdata[tid+s]; sdata[tid] = isMax ? (val1 > val2 ? val1 : val2) : (val1 < val2 ? val1 : val2); maxId = isMax ? (val1 > val2 ? myId : myId+s) : (val1 < val2 ? myId : myId+s); } __syncthreads(); } if (tid == 0) d_out[blockIdx.x] = sdata[0]; } __global__ void histo_kernel(unsigned int* d_histogram, const float* const d_lum, const float lumMin, const float lumRange, const size_t numBins, const size_t arraySize) { const int myId = blockIdx.x * blockDim.x + threadIdx.x; if (myId >= arraySize) return; unsigned int bin = min(static_cast<unsigned int>(numBins - 1), static_cast<unsigned int>((d_lum[myId] - lumMin) / lumRange * static_cast<float>(numBins))); // for rounding error atomicAdd(&d_histogram[bin], 1); } //__global__ void blelloch_scan_reduce_kernel (unsigned int* const d_out, // unsigned int* d_in, // const size_t numBins) //{ // extern __shared__ unsigned int reduce_data[]; // const int numThreads = gridDim.x*blockDim.x; // const int chunkSize = ceil((float)numBins / (float)numThreads); // const int myId = (blockIdx.x * blockDim.x + threadIdx.x + 1) * chunkSize -1; // if (myId >= numBins) // return; // // Copy data to shared memory // const int tid = threadIdx.x; // reduce_data[tid] = d_in[myId]; // __syncthreads(); // // // for (int offset = 1; offset < blockDim.x; offset <<= 1) { // if(tid - offset >= 0 && (tid+1)%(2*offset) == 0) { // reduce_data[tid] += reduce_data[tid-offset]; // } // __syncthreads(); // } // d_out[myId] = reduce_data[tid]; //} // //__global__ void blelloch_scan_down_sweep (unsigned int* d_in_out, // const size_t numBins) //{ // extern __shared__ unsigned int sweep_data[]; // const int numThreads = gridDim.x*blockDim.x; // const int chunkSize = ceil((float)numBins / (float)numThreads); // const int myId = (blockIdx.x * blockDim.x + threadIdx.x + 1) * chunkSize -1; // if (myId >= numBins) // return; // // Copy data to shared memory // const int tid = threadIdx.x; // if(myId == numBins -1) // sweep_data[tid] = 0; // else // sweep_data[tid] = d_in_out[myId]; // __syncthreads(); // // // for (int offset = blockDim.x >> 1; offset >=1; offset >>= 1) { // if(tid - offset >= 0 && (blockDim.x-1-tid)%(offset << 1) == 0) { // int tmp = sweep_data[tid]; // sweep_data[tid] += sweep_data[tid-offset]; // sweep_data[tid-offset] = tmp; // } // __syncthreads(); // } // d_in_out[myId] = sweep_data[tid]; //} __global__ void hillis_steele_scan (unsigned int* const d_out, unsigned int* d_in, const size_t numBins) { extern __shared__ int scan_data[]; const int myId = blockIdx.x * blockDim.x + threadIdx.x; if(myId >= numBins) return; const int tid = threadIdx.x; scan_data[tid] = d_in[myId]; __syncthreads(); for (unsigned int step = 1; step < numBins; step <<= 1) { if (tid >= step) { scan_data[tid] += scan_data[tid-step]; } __syncthreads(); } if (myId == 0) d_out[myId] = 0; else if(myId < numBins) { d_out[myId] = scan_data[tid-1]; } } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ float* d_tmp; float* d_out; unsigned int* d_histogram; const size_t arraySize = numRows * numCols; const size_t arrayBytes = arraySize * sizeof(float); size_t numThreads = 512; size_t numBlocks = ceil(((float)arraySize) / ((float)numThreads)); printf("Array size: %d\n", arraySize); printf("block number: %d\n", numBlocks); // Allocate memory checkCudaErrors(hipMalloc(&d_tmp, sizeof(float) * numBlocks)); checkCudaErrors(hipMalloc(&d_out, sizeof(float))); checkCudaErrors(hipMalloc(&d_histogram, sizeof(unsigned int)*numBins)); checkCudaErrors(hipMemset(d_histogram, 0, sizeof(unsigned int)*numBins)); hipLaunchKernelGGL(( reduce_kernel), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(float), 0, d_tmp, d_logLuminance, arraySize, false); int threads = 1; while (threads < numBlocks) threads <<= 1; hipLaunchKernelGGL(( reduce_kernel), dim3(1), dim3(threads), threads * sizeof(float), 0, // # of threads must be power of 2! d_out, d_tmp, numBlocks, false); checkCudaErrors(hipMemcpy(&min_logLum, d_out, sizeof(float), hipMemcpyDeviceToHost)); hipLaunchKernelGGL(( reduce_kernel), dim3(numBlocks), dim3(numThreads), numThreads * sizeof(float), 0, d_tmp, d_logLuminance, arraySize, true); hipLaunchKernelGGL(( reduce_kernel), dim3(1), dim3(threads), threads * sizeof(float), 0, // # of threads must be power of 2! d_out, d_tmp, numBlocks, true); checkCudaErrors(hipMemcpy(&max_logLum, d_out, sizeof(float), hipMemcpyDeviceToHost)); printf("min: %f\n", min_logLum); printf("max: %f\n", max_logLum); hipFree(d_tmp); hipFree(d_out); float lumRange = max_logLum - min_logLum; printf("Bins: %d, lumRang: %f\n", numBins, lumRange); numBlocks = ceil(((float)arraySize) / ((float)numThreads)); hipLaunchKernelGGL(( histo_kernel), dim3(numBlocks), dim3(numThreads), 0, 0, d_histogram, d_logLuminance, min_logLum, lumRange, numBins, arraySize); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //// scan reduce //numBlocks = ceil( (float)numBins / (float)numThreads ); //blelloch_scan_reduce_kernel<<<numBlocks, numThreads, numThreads*sizeof(int)>>> // ( d_cdf, // d_histogram, // numBins ); // //blelloch_scan_reduce_kernel<<<1, numBlocks, numBlocks*sizeof(int)>>> // (d_cdf, // d_cdf, // numBins ); // //// scan down sweep //blelloch_scan_down_sweep<<<1, numBlocks, numBlocks*sizeof(int)>>> // (d_cdf, // numBins); //blelloch_scan_down_sweep<<<numBlocks, numThreads, numThreads*sizeof(int)>>> // (d_cdf, // numBins); hipLaunchKernelGGL(( hillis_steele_scan), dim3(1), dim3(numBins), numBins*sizeof(unsigned int), 0, d_cdf, d_histogram, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipFree(d_histogram); }
8749e8768ed0661da0120506d4a04d803d538e38.cu
/* Udacity Homework 3 HDR Tone-mapping Background HDR ============== A High Dynamic Range (HDR) image contains a wider variation of intensity and color than is allowed by the RGB format with 1 byte per channel that we have used in the previous assignment. To store this extra information we use single precision floating point for each channel. This allows for an extremely wide range of intensity values. In the image for this assignment, the inside of church with light coming in through stained glass windows, the raw input floating point values for the channels range from 0 to 275. But the mean is .41 and 98% of the values are less than 3! This means that certain areas (the windows) are extremely bright compared to everywhere else. If we linearly map this [0-275] range into the [0-255] range that we have been using then most values will be mapped to zero! The only thing we will be able to see are the very brightest areas - the windows - everything else will appear pitch black. The problem is that although we have cameras capable of recording the wide range of intensity that exists in the real world our monitors are not capable of displaying them. Our eyes are also quite capable of observing a much wider range of intensities than our image formats / monitors are capable of displaying. Tone-mapping is a process that transforms the intensities in the image so that the brightest values aren't nearly so far away from the mean. That way when we transform the values into [0-255] we can actually see the entire image. There are many ways to perform this process and it is as much an art as a science - there is no single "right" answer. In this homework we will implement one possible technique. Background Chrominance-Luminance ================================ The RGB space that we have been using to represent images can be thought of as one possible set of axes spanning a three dimensional space of color. We sometimes choose other axes to represent this space because they make certain operations more convenient. Another possible way of representing a color image is to separate the color information (chromaticity) from the brightness information. There are multiple different methods for doing this - a common one during the analog television days was known as Chrominance-Luminance or YUV. We choose to represent the image in this way so that we can remap only the intensity channel and then recombine the new intensity values with the color information to form the final image. Old TV signals used to be transmitted in this way so that black & white televisions could display the luminance channel while color televisions would display all three of the channels. Tone-mapping ============ In this assignment we are going to transform the luminance channel (actually the log of the luminance, but this is unimportant for the parts of the algorithm that you will be implementing) by compressing its range to [0, 1]. To do this we need the cumulative distribution of the luminance values. Example ------- input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2] min / max / range: 0 / 9 / 9 histo with 3 bins: [4 7 3] cdf : [4 11 14] Your task is to calculate this cumulative distribution by following these steps. */ #include "utils.h" #include <cstdio> __global__ void reduce_kernel(float* d_out, const float* const d_in, const int arraySize, bool isMax) { extern __shared__ float sdata[]; const int myId = blockIdx.x*blockDim.x + threadIdx.x; const int tid = threadIdx.x; //if(myId >= arraySize) // return; if(myId < arraySize) sdata[tid] = d_in[myId]; __syncthreads(); int maxId = 0; for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s && myId + s < arraySize) { float val1 = sdata[tid]; float val2 = sdata[tid+s]; sdata[tid] = isMax ? (val1 > val2 ? val1 : val2) : (val1 < val2 ? val1 : val2); maxId = isMax ? (val1 > val2 ? myId : myId+s) : (val1 < val2 ? myId : myId+s); } __syncthreads(); } if (tid == 0) d_out[blockIdx.x] = sdata[0]; } __global__ void histo_kernel(unsigned int* d_histogram, const float* const d_lum, const float lumMin, const float lumRange, const size_t numBins, const size_t arraySize) { const int myId = blockIdx.x * blockDim.x + threadIdx.x; if (myId >= arraySize) return; unsigned int bin = min(static_cast<unsigned int>(numBins - 1), static_cast<unsigned int>((d_lum[myId] - lumMin) / lumRange * static_cast<float>(numBins))); // for rounding error atomicAdd(&d_histogram[bin], 1); } //__global__ void blelloch_scan_reduce_kernel (unsigned int* const d_out, // unsigned int* d_in, // const size_t numBins) //{ // extern __shared__ unsigned int reduce_data[]; // const int numThreads = gridDim.x*blockDim.x; // const int chunkSize = ceil((float)numBins / (float)numThreads); // const int myId = (blockIdx.x * blockDim.x + threadIdx.x + 1) * chunkSize -1; // if (myId >= numBins) // return; // // Copy data to shared memory // const int tid = threadIdx.x; // reduce_data[tid] = d_in[myId]; // __syncthreads(); // // // for (int offset = 1; offset < blockDim.x; offset <<= 1) { // if(tid - offset >= 0 && (tid+1)%(2*offset) == 0) { // reduce_data[tid] += reduce_data[tid-offset]; // } // __syncthreads(); // } // d_out[myId] = reduce_data[tid]; //} // //__global__ void blelloch_scan_down_sweep (unsigned int* d_in_out, // const size_t numBins) //{ // extern __shared__ unsigned int sweep_data[]; // const int numThreads = gridDim.x*blockDim.x; // const int chunkSize = ceil((float)numBins / (float)numThreads); // const int myId = (blockIdx.x * blockDim.x + threadIdx.x + 1) * chunkSize -1; // if (myId >= numBins) // return; // // Copy data to shared memory // const int tid = threadIdx.x; // if(myId == numBins -1) // sweep_data[tid] = 0; // else // sweep_data[tid] = d_in_out[myId]; // __syncthreads(); // // // for (int offset = blockDim.x >> 1; offset >=1; offset >>= 1) { // if(tid - offset >= 0 && (blockDim.x-1-tid)%(offset << 1) == 0) { // int tmp = sweep_data[tid]; // sweep_data[tid] += sweep_data[tid-offset]; // sweep_data[tid-offset] = tmp; // } // __syncthreads(); // } // d_in_out[myId] = sweep_data[tid]; //} __global__ void hillis_steele_scan (unsigned int* const d_out, unsigned int* d_in, const size_t numBins) { extern __shared__ int scan_data[]; const int myId = blockIdx.x * blockDim.x + threadIdx.x; if(myId >= numBins) return; const int tid = threadIdx.x; scan_data[tid] = d_in[myId]; __syncthreads(); for (unsigned int step = 1; step < numBins; step <<= 1) { if (tid >= step) { scan_data[tid] += scan_data[tid-step]; } __syncthreads(); } if (myId == 0) d_out[myId] = 0; else if(myId < numBins) { d_out[myId] = scan_data[tid-1]; } } void your_histogram_and_prefixsum(const float* const d_logLuminance, unsigned int* const d_cdf, float &min_logLum, float &max_logLum, const size_t numRows, const size_t numCols, const size_t numBins) { //TODO /*Here are the steps you need to implement 1) find the minimum and maximum value in the input logLuminance channel store in min_logLum and max_logLum 2) subtract them to find the range 3) generate a histogram of all the values in the logLuminance channel using the formula: bin = (lum[i] - lumMin) / lumRange * numBins 4) Perform an exclusive scan (prefix sum) on the histogram to get the cumulative distribution of luminance values (this should go in the incoming d_cdf pointer which already has been allocated for you) */ float* d_tmp; float* d_out; unsigned int* d_histogram; const size_t arraySize = numRows * numCols; const size_t arrayBytes = arraySize * sizeof(float); size_t numThreads = 512; size_t numBlocks = ceil(((float)arraySize) / ((float)numThreads)); printf("Array size: %d\n", arraySize); printf("block number: %d\n", numBlocks); // Allocate memory checkCudaErrors(cudaMalloc(&d_tmp, sizeof(float) * numBlocks)); checkCudaErrors(cudaMalloc(&d_out, sizeof(float))); checkCudaErrors(cudaMalloc(&d_histogram, sizeof(unsigned int)*numBins)); checkCudaErrors(cudaMemset(d_histogram, 0, sizeof(unsigned int)*numBins)); reduce_kernel<<<numBlocks, numThreads, numThreads * sizeof(float)>>> (d_tmp, d_logLuminance, arraySize, false); int threads = 1; while (threads < numBlocks) threads <<= 1; reduce_kernel<<<1, threads, threads * sizeof(float)>>> // # of threads must be power of 2! (d_out, d_tmp, numBlocks, false); checkCudaErrors(cudaMemcpy(&min_logLum, d_out, sizeof(float), cudaMemcpyDeviceToHost)); reduce_kernel<<<numBlocks, numThreads, numThreads * sizeof(float)>>> (d_tmp, d_logLuminance, arraySize, true); reduce_kernel<<<1, threads, threads * sizeof(float)>>> // # of threads must be power of 2! (d_out, d_tmp, numBlocks, true); checkCudaErrors(cudaMemcpy(&max_logLum, d_out, sizeof(float), cudaMemcpyDeviceToHost)); printf("min: %f\n", min_logLum); printf("max: %f\n", max_logLum); cudaFree(d_tmp); cudaFree(d_out); float lumRange = max_logLum - min_logLum; printf("Bins: %d, lumRang: %f\n", numBins, lumRange); numBlocks = ceil(((float)arraySize) / ((float)numThreads)); histo_kernel<<<numBlocks, numThreads>>> (d_histogram, d_logLuminance, min_logLum, lumRange, numBins, arraySize); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //// scan reduce //numBlocks = ceil( (float)numBins / (float)numThreads ); //blelloch_scan_reduce_kernel<<<numBlocks, numThreads, numThreads*sizeof(int)>>> // ( d_cdf, // d_histogram, // numBins ); // //blelloch_scan_reduce_kernel<<<1, numBlocks, numBlocks*sizeof(int)>>> // (d_cdf, // d_cdf, // numBins ); // //// scan down sweep //blelloch_scan_down_sweep<<<1, numBlocks, numBlocks*sizeof(int)>>> // (d_cdf, // numBins); //blelloch_scan_down_sweep<<<numBlocks, numThreads, numThreads*sizeof(int)>>> // (d_cdf, // numBins); hillis_steele_scan<<<1, numBins, numBins*sizeof(unsigned int)>>>(d_cdf, d_histogram, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); cudaFree(d_histogram); }
0951d23c5b1ecec14b7b8d3988f2ad2b7800913a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "clustertree.h" #ifndef __linux__ #include <iso646.h> #include <helpers/ImageDisplay.h> #endif #include <thrust/reduce.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/copy.h> #include <algorithm> #ifdef _DEBUG ImageDisplay::ImageDisplay Display; #endif __global__ void _distacesToAll(float* d_image, float* d_out, float* filter, int x_size, int y_size, float* mu, unsigned int* indicator, int node_index, int patch_radius, float inv_sqr_sigma){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if(indicator[yId*x_size + xId] != node_index){ d_out[yId*x_size + xId] = -1; return; } int center = patch_radius; int vec_x_size = 2 * patch_radius + 1; float distance(0); // go through each patch for (int py = -patch_radius; py <= patch_radius; py++) for (int px = -patch_radius; px <= patch_radius; px++){ // Main patch int px1 = CLIP(0, xId + px, x_size - 1); int py1 = CLIP(0, yId + py, y_size - 1); // Patch in the window //int px2 = CLIP(0, xpos + px, x_size - 1); //int py2 = CLIP(0, ypos + py, y_size - 1); float tmp = d_image[px1 + py1*x_size] - mu[(px + patch_radius) + (py + patch_radius)*vec_x_size]; distance += tmp*tmp*filter[center + px] * filter[center + py]; }// go through each patch float w = exp(-distance * inv_sqr_sigma); __syncthreads(); d_out[yId*x_size + xId] = w; } __global__ void _compareTwoMatrixes(float *first_mat, float *second_mat, float *d_out, unsigned int* indicator, int node_index, int x_size, int y_size, int strictly){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; d_out[yId*x_size + xId] = 0; if (indicator[yId*x_size + xId] == node_index){ // Read values float val1 = first_mat[yId*x_size + xId]; float val2 = second_mat[yId*x_size + xId]; if (strictly){ if (val1>val2) d_out[yId*x_size + xId] = 1; } else{ if (val1 >= val2) d_out[yId*x_size + xId] = 1; } } } // Updates global indicator mat based on local step matrix __global__ void _updateIndicatorMatrix(unsigned int* d_indicator, int x_size, int y_size, float* d_step_indicator, int index1, int index2){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_step_indicator[yId*x_size + xId] == 1) d_indicator[yId*x_size + xId] = index1; if (d_step_indicator[yId*x_size + xId] == 0) d_indicator[yId*x_size + xId] = index2; } __global__ void _matIntToFloat(unsigned int *d_in, float* d_out, int x_size, int y_size){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; d_out[yId*x_size + xId] = (float)d_in[yId*x_size + xId]; } __global__ void _shiftMat(float* d_shifted, float* d_image, int x_size, int y_size, int x_shift, int y_shift, float* d_indicator){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (!d_indicator[yId*x_size + xId]){ d_shifted[yId*x_size + xId] = 0; // There could be smth left from previous calls return; } int x = CLIP(0, xId + x_shift, x_size - 1); int y = CLIP(0, yId + y_shift, y_size - 1); d_shifted[yId*x_size + xId] = d_image[y*x_size + x]; } __global__ void _updateIndicator(unsigned int* d_indicator, int x_size, int y_size, float* d_mask, int index){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_mask[yId*x_size + xId] == 1) d_indicator[yId*x_size + xId] = index; } __global__ void _getTheHeck(int2* list, unsigned int* d_indicator, int x_size, int y_size, int index){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_indicator[yId*x_size + xId] != index){ int2 p; p.x = -1; p.y = -1; list[yId*x_size + xId] = p; } else{ int2 p; p.x = xId; p.y = yId; list[yId*x_size + xId] = p; } } __global__ void _fillRefMat(int** refmat, unsigned int* d_indicator, int x_size, int y_size, int* reference, int index){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_indicator[yId*x_size + xId] == index) refmat[yId*x_size + xId] = reference; } __global__ void _nlmCudaList(float* d_image, float* d_out, float* filter, int x_size, int y_size, int** refmat, int patch_radius, float inv_sqr_sigma, int w){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; int center = patch_radius; float new_value(0); float normalizer(0); //int w = 1; for (int nx = -w; nx <= w; nx++) for (int ny = -w; ny <= w; ny++) { if ((ny + yId) >= 0 && (ny + yId) < y_size) // we shouldn't fall over the edge of the disc world if ((nx + xId) >= 0 && (nx + xId) < x_size) // Does it make sense to peek there at all? However we need to compute patch with it's corresponding list if ((refmat[(yId + ny)*x_size + (xId + nx)] != refmat[(yId)*x_size + (xId)]) || (nx==0 && ny==0)) { int* list = refmat[(yId + ny)*x_size + (xId + nx)]; int numel = list[0]; // go through list of pixels for (int k = 1; k < numel; k++){ float distance(0); // go through each patch for (int py = -patch_radius; py <= patch_radius; py++) for (int px = -patch_radius; px <= patch_radius; px++){ // Main patch int px1 = CLIP(0, xId + px, x_size - 1); int py1 = CLIP(0, yId + py, y_size - 1); // Patch in the window int px2 = CLIP(0, list[k] + px, x_size - 1); int py2 = CLIP(0, list[k + numel] + py, y_size - 1); float tmp = d_image[px1 + py1*x_size] - d_image[px2 + py2*x_size]; distance += tmp*tmp*filter[center + px] * filter[center + py]; } float w = exp(-distance * inv_sqr_sigma); new_value += w*d_image[list[k] + list[k + numel] * x_size]; normalizer += w; } // Go through list with pixels } // Makes sense if we are not out }// Go through neig // We need syncthreads before writing the final result //__syncthreads(); d_out[yId*x_size + xId] = new_value / normalizer; } ClusterTree::ClusterTree(CMatrix<float> data, int patch_radius, float sqr_sigma) : x_size(data.xSize()), y_size(data.ySize()), patch_radius(patch_radius), sqr_sigma(sqr_sigma), image(data){ this->inv_sqr_sigma = 1.0f / sqr_sigma; // Allocate memory for the reference matrix //this->refmat = new node*[this->x_size*this->y_size]; this->Head = new node; this->Head->index = 0; this->Head->leaf = 1; // this could be our only node so for now it's a leaf this->Head->left = NULL; // those pointers shouldn't point anywhere this->Head->right = NULL; // for now // Initialize domain list for (int j = 0; j < y_size; ++j) for (int i = 0; i < x_size; ++i){ point p; p.x = i; p.y = j; this->Head->coordinates.push_back(p); } node_count = 0; //initialize variable (head excluded) allocateIdicatorMatrix(); mu_x_size = (2 * patch_radius + 1); // Device initialization this->block = dim3(32, 32, 1); this->grid.x = DIV_UP(image.xSize(), (float)block.x); this->grid.y = DIV_UP(image.ySize(), (float)block.y); // create a gauss lut for 1D this->h_filter = new float[mu_x_size]; float* center = h_filter + patch_radius; for (int x = -patch_radius; x <= patch_radius; ++x) *(center + x) = ::exp(-0.5*x*x / (patch_radius*patch_radius)); this->image_size = data.xSize()*data.ySize(); this->maxlvl = 1; this->maxpoints = 50; this->w = 1; // Initialize a random generator #ifdef _DEBUG srand(5); #else srand(time(NULL)); #endif // Allocate memory on the device checkCudaErrors(hipMalloc((void ***)&this->refmat, sizeof(int*) * image_size)); checkCudaErrors(hipMalloc((void**)&d_image, sizeof(float) * image_size)); checkCudaErrors(hipMalloc((void**)&d_mat1, sizeof(float) * image_size)); checkCudaErrors(hipMemset(d_mat1, 0, sizeof(float) * image_size)); checkCudaErrors(hipMalloc((void**)&d_mat2, sizeof(float) * image_size)); checkCudaErrors(hipMemset(d_mat2, 0, sizeof(float) * image_size)); checkCudaErrors(hipMalloc((void**)&d_step_indicator, sizeof(float) * image_size)); checkCudaErrors(hipMemset(d_step_indicator, 0, sizeof(float) * image_size)); checkCudaErrors(hipMalloc((void**)&d_filter, sizeof(float) * (2 * patch_radius + 1))); // Copy to the device checkCudaErrors(hipMemcpy(d_image, image.data(), sizeof(float) * image_size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * (mu_x_size), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&d_mu1, sizeof(float) * SQR(2 * patch_radius + 1))); checkCudaErrors(hipMalloc((void**)&d_mu2, sizeof(float) * SQR(2 * patch_radius + 1))); checkCudaErrors(hipMalloc(&d_shifted, sizeof(float) * image_size)); }; void ClusterTree::setMaximumLevel(int lvl){ if (lvl > 0) this->maxlvl = lvl; else{ std::cerr << "Max level could be between 0 or 10" << std::endl; if (lvl <= 0) { lvl = 1; std::cout << "Max level has been set to 1"; }; if (lvl > 10) { lvl = 10; std::cout << "Max level has been set to 10"; }; } } void ClusterTree::setMaximumOfPointsInTheLeafs(int points){ if (points > 2) this->maxpoints = points; else{ std::cerr << "Max point in leaf should be more than 2" << std::endl; } } ClusterTree::~ClusterTree(){ // Everything allocated should be deleted destroyTree(this->Head); //destroyIndicatorMatrix(); //delete[] this->refmat; hipFree(d_image); hipFree(d_mat1); hipFree(d_mat2); hipFree(d_mu1); hipFree(d_mu2); hipFree(d_shifted); hipFree(d_step_indicator); hipFree(d_filter); hipFree(d_indicatorMat); //for (int i = 0; i < image_size; i++){ // if (refmat[i]) // hipFree(refmat[i]); //} hipFree(refmat); //hipFree(d_indicator); checkCudaErrors(hipDeviceReset()); } void ClusterTree::displayCoordinateList(node* n){ for (int i = 0; i < n->coordinates.size(); ++i){ std::cout << "x: " << n->coordinates[i].x << "y: " << n->coordinates[i].y << std::endl; } } node* ClusterTree::getHead(){ return this->Head; }; int ClusterTree::getIndexOfNode(node* n){ return n->index; } unsigned int* ClusterTree::getIndicatorMatrix(){ if (this->d_indicatorMat == NULL) this->allocateIdicatorMatrix(); return this->d_indicatorMat; } void ClusterTree::destroyTree(node* p){ if (p->left){ // Does it have left node? this->destroyTree(p->left); // Traverse further p->left = NULL; } if (p->right){ // Does it have right node? this->destroyTree(p->right); // Traverse further p->right = NULL; } // Doesn't have childs but still considered non leaf if (!p->right && !p->left && !p->leaf){ p->leaf = 1; } if (p->leaf){ // Is this a leaf node? (shouldn't have left\right) //delete[] p->mu; // Kill it's allocated memory delete p; // Kill it } } point ClusterTree::sampleRand(node* n){ unsigned int minx = 0; unsigned int maxx = n->coordinates.size()-1; unsigned int index = rand() % (maxx - minx) + minx; // random value in [min, max] point p; p.x = n->coordinates[index].x; p.y = n->coordinates[index].y; //#ifdef _DEBUG // std::cout << "xval: " << n->coordinates[index].x << std::endl; // std::cout << "yval: " << n->coordinates[index].y << std::endl; //#endif return p; } void ClusterTree::allocateIdicatorMatrix(){ int size = this->x_size*this->y_size; unsigned int* initmat = new unsigned int[size]; std::fill(initmat, initmat + size, 0); // Fill with initial values [Head node index is 0] // Allocate memory checkCudaErrors(hipMalloc(&this->d_indicatorMat, size*sizeof(unsigned int))); // Copy checkCudaErrors(hipMemcpy(this->d_indicatorMat, initmat, size*sizeof(unsigned int), hipMemcpyHostToDevice)); } void ClusterTree::destroyIndicatorMatrix(){ if (this->d_indicatorMat != NULL) checkCudaErrors(hipFree(this->d_indicatorMat)); this->d_indicatorMat = NULL; } void ClusterTree::create2Childs(node* current){ // Allocate memory for new nodes node* left = new node; node* right = new node; // A proper initialization of nodes left->leaf = 1; left->left = NULL; left->right = NULL; //left->mu = new float[patch_radius * 2 + 1]; left->index = ++node_count; left->parent = current; right->leaf = 1; right->left = NULL; right->right = NULL; //right->mu = new float[patch_radius * 2 + 1]; right->index = ++node_count; right->parent = current; current->leaf = 0; // This is no longer a leaf node current->left = left; current->right = right; } void ClusterTree::setW(int num){ if (num < 0) return; this->w = num; } int ClusterTree::getMaximumLevel(){ return this->maxlvl; } int ClusterTree::getMaximumOfPointsInTheLeafs(){ return this->maxpoints; } int ClusterTree::getW(){ return this->w; } float* ClusterTree::pickVecByPos(float* image, point p, int x_size, int y_size, int patch_radius){ int vecsize = SQR(2 * patch_radius + 1); int vs_half = vecsize / 2; int vec_x_size = 2 * patch_radius + 1; float* mu = new float[vecsize]; for (int i = 0; i < vecsize; ++i) mu[i] = 0; for (int py = -patch_radius; py <= patch_radius; py++) for (int px = -patch_radius; px <= patch_radius; px++){ int px1 = CLIP(0, px + p.x, x_size - 1); int py1 = CLIP(0, py + p.y, y_size - 1); mu[(py + patch_radius)*vec_x_size + (px + patch_radius)] += image[py1*x_size + px1]; } return mu; } struct nonneg{ __host__ __device__ bool operator()(const int2 x) { return (x.x != -1); } }; void ClusterTree::updateCoordinateList(node* n){ int2* d_list; checkCudaErrors(hipMalloc(&d_list, sizeof(int2) * image_size)); checkCudaErrors(hipMemset(d_list, 0, sizeof(int2) * image_size)); _getTheHeck << <grid, block >> >(d_list, d_indicatorMat, x_size, y_size, n->index); thrust::device_ptr<int2> d_list_(d_list); int size = image_size; //DIV_UP(image_size, divisor); int2* result = new int2[size]; thrust::device_vector<int2> d_vec(size); thrust::host_vector<int2> h_vec(size); thrust::copy_if(d_list_, d_list_ + size, d_vec.begin(), nonneg()); h_vec = d_vec; thrust::copy(h_vec.begin(), h_vec.end(), result); n->coordinates.clear(); for (int i = 0; ; ++i){ if (i == 0 && !result[i].x && !result[i].y) { // The first point could be 0 0 for some nodes point p; p.x = result[i].x; p.y = result[i].y; n->coordinates.push_back(p); continue; } if (result[i].x <= 0 && result[i].y <= 0) break; point p; p.x = result[i].x; p.y = result[i].y; n->coordinates.push_back(p); //#ifdef _DEBUG // std::cout << "Result x " << result[i].x << " y " << result[i].y << std::endl; // std::cout << i << std::endl; //#endif } delete[] result; checkCudaErrors(hipFree(d_list)); } // This function computes distortion measure J from mat1, mat2, d_out // Trat no time to write it //float ClusterTree::getDistortionMeasure(){ // return 0; //} int counter(0); void ClusterTree::buildTree(node* n, int level){ #ifdef _DEBUG CMatrix<float> result(x_size, y_size); #endif level++; node* current = n; unsigned int* d_indicator = getIndicatorMatrix(); if (current->coordinates.size() < maxpoints || level > maxlvl){ // int* d_link, *xarr, *yarr; int size = current->coordinates.size(); std::sort(current->coordinates.begin(), current->coordinates.end()); xarr = new int[size+1]; yarr = new int[size]; xarr[0] = size; for (int i = 0; i < size; i++){ xarr[i+1] = current->coordinates[i].x; yarr[i] = current->coordinates[i].y; } checkCudaErrors(hipMalloc(&d_link, sizeof(int) * (2*size+1))); //checkCudaErrors(hipMemset(d_link, size, sizeof(int)+1)); checkCudaErrors(hipMemcpy(d_link, xarr, sizeof(int) * (size+1), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_link+ 1 + size, yarr, sizeof(int) * size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( _fillRefMat), dim3(grid), dim3(block), 0, 0, this->refmat, d_indicator, x_size, y_size, d_link, current->index); delete[] xarr; delete[] yarr; //std::cout << "Dumped " << size << " points" << std::endl; current->coordinates.clear(); return; } point p1, p2; // define and initialize variables // Sample two random positions in the image p1 = sampleRand(current); p2 = sampleRand(current); // Initialize mu vectors float *mu1 = pickVecByPos(image.data(), p1, x_size, y_size, patch_radius); float *mu2 = pickVecByPos(image.data(), p2, x_size, y_size, patch_radius); double mu1_normalizer, mu2_normalizer; hipStream_t s1; hipStream_t s2; hipStreamCreate(&s1); hipStreamCreate(&s2); // ========================== // K-Means steps for (int k = 0; k < 5; ++k){ checkCudaErrors(hipMemset(d_step_indicator, 0, sizeof(float) * image_size)); // Reset the function // Update values on the device checkCudaErrors(hipMemcpyAsync(d_mu1, mu1, sizeof(float) * SQR(2 * patch_radius + 1), hipMemcpyHostToDevice, s1)); checkCudaErrors(hipMemcpyAsync(d_mu2, mu2, sizeof(float) * SQR(2 * patch_radius + 1), hipMemcpyHostToDevice, s2)); // Assign step _distacesToAll << < grid, block, 0, s1 >> >(d_image, d_mat1, d_filter, x_size, y_size, d_mu1, d_indicator, current->index, patch_radius, inv_sqr_sigma); _distacesToAll << < grid, block, 0, s2 >> >(d_image, d_mat2, d_filter, x_size, y_size, d_mu2, d_indicator, current->index, patch_radius, inv_sqr_sigma); _compareTwoMatrixes << < grid, block >> >(d_mat1, d_mat2, d_step_indicator, d_indicator, current->index, x_size, y_size, 1); //checkCudaErrors(hipMemcpy((void*)result.data(), d_step_indicator, sizeof(float) * result.size(), hipMemcpyDeviceToHost)); ////result.writeToTXT("res.txt"); //result.normalize(0, 255); //Display.Display(result, "res"); thrust::device_ptr<float> d_step_indicator_(d_step_indicator); // Update step // Compute mu1 through reduction mu1_normalizer = thrust::reduce(d_step_indicator_, d_step_indicator_ + image_size); if (mu1_normalizer == 0){ int* d_link, *xarr, *yarr; int size = current->coordinates.size(); std::sort(current->coordinates.begin(), current->coordinates.end()); xarr = new int[size + 1]; yarr = new int[size]; xarr[0] = size; for (int i = 0; i < size; i++){ xarr[i + 1] = current->coordinates[i].x; yarr[i] = current->coordinates[i].y; } checkCudaErrors(hipMalloc(&d_link, sizeof(int) * (2 * size + 1))); //checkCudaErrors(hipMemset(d_link, size, sizeof(int)+1)); checkCudaErrors(hipMemcpy(d_link, xarr, sizeof(int) * (size + 1), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_link + 1 + size, yarr, sizeof(int) * size, hipMemcpyHostToDevice)); _fillRefMat << <grid, block >> >(this->refmat, d_indicator, x_size, y_size, d_link, current->index); delete[] xarr; delete[] yarr; //std::cout << "Dumped " << size << " points" << std::endl; current->coordinates.clear(); return; } //#pragma omp parallel for for (int j = -patch_radius; j <= patch_radius; j++) for (int i = -patch_radius; i <= patch_radius; i++){ //int sh = (patch_radius + j) * image_size; _shiftMat << <grid, block >> >(d_shifted, d_image, x_size, y_size, i, j, d_step_indicator); thrust::device_ptr<float> d_shifted_(d_shifted); double val = thrust::reduce(d_shifted_, d_shifted_ + image_size); mu1[(patch_radius + i) + (2 * patch_radius + 1)*(patch_radius + j)] = val / mu1_normalizer; } // Compute mu2 through reduction _compareTwoMatrixes << < grid, block >> >(d_mat2, d_mat1, d_step_indicator, d_indicator, current->index, x_size, y_size, 0); mu2_normalizer = thrust::reduce(d_step_indicator_, d_step_indicator_ + image_size); if (mu2_normalizer == 0){ int* d_link, *xarr, *yarr; int size = current->coordinates.size(); std::sort(current->coordinates.begin(), current->coordinates.end()); xarr = new int[size + 1]; yarr = new int[size]; xarr[0] = size; for (int i = 0; i < size; i++){ xarr[i + 1] = current->coordinates[i].x; yarr[i] = current->coordinates[i].y; } checkCudaErrors(hipMalloc(&d_link, sizeof(int) * (2 * size + 1))); //checkCudaErrors(hipMemset(d_link, size, sizeof(int)+1)); checkCudaErrors(hipMemcpy(d_link, xarr, sizeof(int) * (size + 1), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_link + 1 + size, yarr, sizeof(int) * size, hipMemcpyHostToDevice)); _fillRefMat << <grid, block >> >(this->refmat, d_indicator, x_size, y_size, d_link, current->index); delete[] xarr; delete[] yarr; //std::cout << "Dumped " << size << " points" << std::endl; current->coordinates.clear(); return; } //#pragma omp parallel for for (int j = -patch_radius; j <= patch_radius; j++) for (int i = -patch_radius; i <= patch_radius; i++){ //int sh = (patch_radius + j) * image_size; _shiftMat << <grid, block >> >(d_shifted, d_image, x_size, y_size, i, j, d_step_indicator); thrust::device_ptr<float> d_shifted_(d_shifted); double val = thrust::reduce(d_shifted_, d_shifted_ + image_size); mu2[(patch_radius + i) + (2 * patch_radius + 1)*(patch_radius + j)] = val / mu2_normalizer; } #ifdef _DEBUG assert((mu1_normalizer + mu2_normalizer == (x_size*y_size)) || level > 1); #endif } #ifdef _DEBUG if ((counter % 1) == 0){ // tmp display step float* tmp; checkCudaErrors(hipMalloc(&tmp, sizeof(float) * image_size)); checkCudaErrors(hipMemset(tmp, 0, sizeof(float) * image_size)); _matIntToFloat << <grid, block >> >(d_indicator, tmp, x_size, y_size); checkCudaErrors(hipMemcpy((void*)result.data(), tmp, sizeof(float) * result.size(), hipMemcpyDeviceToHost)); //result.writeToTXT("res.txt"); result.normalize(0, 255); Display.Display(result, "res"); result.writeToPGM(SSTR("notes/images/kmeansfull/salesman" << counter << ".pgm").c_str()); } counter++; #endif // ========================== // Go to the next level // Allocate new nodes create2Childs(current); current->coordinates.clear(); // oh save some memory, please.. // Update main indicator matrix // Update main indicator matrix _compareTwoMatrixes << < grid, block, 0, s1 >> >(d_mat1, d_mat2, d_step_indicator, d_indicator, current->index, x_size, y_size, 1); _updateIndicator << <grid, block, 0, s1 >> >(d_indicator, x_size, y_size, d_step_indicator, current->left->index); _compareTwoMatrixes << < grid, block, 0, s2 >> >(d_mat2, d_mat1, d_step_indicator, d_indicator, current->index, x_size, y_size, 0); _updateIndicator << <grid, block, 0, s2 >> >(d_indicator, x_size, y_size, d_step_indicator, current->right->index); // Assign them right pixels updateCoordinateList(current->right); updateCoordinateList(current->left); #ifdef _DEBUG // Check sizes of lists int ss1 = current->left->coordinates.size(); assert(mu1_normalizer == ss1); int ss2 = current->right->coordinates.size(); assert(mu2_normalizer == ss2); std::cout << ss1 + ss2 << " " << x_size*y_size << std::endl; assert(((ss1 + ss2) == x_size*y_size) || level > 1); #endif hipStreamDestroy(s1); hipStreamDestroy(s2); // Declare foo as arrays of array 8 of pointer to pointer // to function returning a pointer t array of pointer to char // and we need to go deeper if ( current->left != NULL) buildTree(current->left, level); if ( current->right != NULL) buildTree(current->right, level); //std::cout << node_count << std::endl; }; CMatrix<float> ClusterTree::nlm(){ CMatrix<float> result(x_size, y_size); float* d_out; checkCudaErrors(hipMalloc(&d_out, sizeof(float)*image_size)); checkCudaErrors(hipMemset(d_out, 0, sizeof(float)*image_size)); _nlmCudaList << < grid, block >> >(d_image, d_out, d_filter, x_size, y_size, refmat, patch_radius, inv_sqr_sigma, this->w); checkCudaErrors(hipMemcpy(result.data(), d_out, sizeof(float)*image_size, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_out)); return result; }
0951d23c5b1ecec14b7b8d3988f2ad2b7800913a.cu
#include "clustertree.h" #ifndef __linux__ #include <iso646.h> #include <helpers/ImageDisplay.h> #endif #include <thrust/reduce.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/copy.h> #include <algorithm> #ifdef _DEBUG ImageDisplay::ImageDisplay Display; #endif __global__ void _distacesToAll(float* d_image, float* d_out, float* filter, int x_size, int y_size, float* mu, unsigned int* indicator, int node_index, int patch_radius, float inv_sqr_sigma){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if(indicator[yId*x_size + xId] != node_index){ d_out[yId*x_size + xId] = -1; return; } int center = patch_radius; int vec_x_size = 2 * patch_radius + 1; float distance(0); // go through each patch for (int py = -patch_radius; py <= patch_radius; py++) for (int px = -patch_radius; px <= patch_radius; px++){ // Main patch int px1 = CLIP(0, xId + px, x_size - 1); int py1 = CLIP(0, yId + py, y_size - 1); // Patch in the window //int px2 = CLIP(0, xpos + px, x_size - 1); //int py2 = CLIP(0, ypos + py, y_size - 1); float tmp = d_image[px1 + py1*x_size] - mu[(px + patch_radius) + (py + patch_radius)*vec_x_size]; distance += tmp*tmp*filter[center + px] * filter[center + py]; }// go through each patch float w = exp(-distance * inv_sqr_sigma); __syncthreads(); d_out[yId*x_size + xId] = w; } __global__ void _compareTwoMatrixes(float *first_mat, float *second_mat, float *d_out, unsigned int* indicator, int node_index, int x_size, int y_size, int strictly){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; d_out[yId*x_size + xId] = 0; if (indicator[yId*x_size + xId] == node_index){ // Read values float val1 = first_mat[yId*x_size + xId]; float val2 = second_mat[yId*x_size + xId]; if (strictly){ if (val1>val2) d_out[yId*x_size + xId] = 1; } else{ if (val1 >= val2) d_out[yId*x_size + xId] = 1; } } } // Updates global indicator mat based on local step matrix __global__ void _updateIndicatorMatrix(unsigned int* d_indicator, int x_size, int y_size, float* d_step_indicator, int index1, int index2){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_step_indicator[yId*x_size + xId] == 1) d_indicator[yId*x_size + xId] = index1; if (d_step_indicator[yId*x_size + xId] == 0) d_indicator[yId*x_size + xId] = index2; } __global__ void _matIntToFloat(unsigned int *d_in, float* d_out, int x_size, int y_size){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; d_out[yId*x_size + xId] = (float)d_in[yId*x_size + xId]; } __global__ void _shiftMat(float* d_shifted, float* d_image, int x_size, int y_size, int x_shift, int y_shift, float* d_indicator){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (!d_indicator[yId*x_size + xId]){ d_shifted[yId*x_size + xId] = 0; // There could be smth left from previous calls return; } int x = CLIP(0, xId + x_shift, x_size - 1); int y = CLIP(0, yId + y_shift, y_size - 1); d_shifted[yId*x_size + xId] = d_image[y*x_size + x]; } __global__ void _updateIndicator(unsigned int* d_indicator, int x_size, int y_size, float* d_mask, int index){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_mask[yId*x_size + xId] == 1) d_indicator[yId*x_size + xId] = index; } __global__ void _getTheHeck(int2* list, unsigned int* d_indicator, int x_size, int y_size, int index){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_indicator[yId*x_size + xId] != index){ int2 p; p.x = -1; p.y = -1; list[yId*x_size + xId] = p; } else{ int2 p; p.x = xId; p.y = yId; list[yId*x_size + xId] = p; } } __global__ void _fillRefMat(int** refmat, unsigned int* d_indicator, int x_size, int y_size, int* reference, int index){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; if (d_indicator[yId*x_size + xId] == index) refmat[yId*x_size + xId] = reference; } __global__ void _nlmCudaList(float* d_image, float* d_out, float* filter, int x_size, int y_size, int** refmat, int patch_radius, float inv_sqr_sigma, int w){ int xId = blockDim.x * blockIdx.x + threadIdx.x; int yId = blockDim.y * blockIdx.y + threadIdx.y; // Check global image boundaries if (xId >= x_size || yId >= y_size) return; int center = patch_radius; float new_value(0); float normalizer(0); //int w = 1; for (int nx = -w; nx <= w; nx++) for (int ny = -w; ny <= w; ny++) { if ((ny + yId) >= 0 && (ny + yId) < y_size) // we shouldn't fall over the edge of the disc world if ((nx + xId) >= 0 && (nx + xId) < x_size) // Does it make sense to peek there at all? However we need to compute patch with it's corresponding list if ((refmat[(yId + ny)*x_size + (xId + nx)] != refmat[(yId)*x_size + (xId)]) || (nx==0 && ny==0)) { int* list = refmat[(yId + ny)*x_size + (xId + nx)]; int numel = list[0]; // go through list of pixels for (int k = 1; k < numel; k++){ float distance(0); // go through each patch for (int py = -patch_radius; py <= patch_radius; py++) for (int px = -patch_radius; px <= patch_radius; px++){ // Main patch int px1 = CLIP(0, xId + px, x_size - 1); int py1 = CLIP(0, yId + py, y_size - 1); // Patch in the window int px2 = CLIP(0, list[k] + px, x_size - 1); int py2 = CLIP(0, list[k + numel] + py, y_size - 1); float tmp = d_image[px1 + py1*x_size] - d_image[px2 + py2*x_size]; distance += tmp*tmp*filter[center + px] * filter[center + py]; } float w = exp(-distance * inv_sqr_sigma); new_value += w*d_image[list[k] + list[k + numel] * x_size]; normalizer += w; } // Go through list with pixels } // Makes sense if we are not out }// Go through neig // We need syncthreads before writing the final result //__syncthreads(); d_out[yId*x_size + xId] = new_value / normalizer; } ClusterTree::ClusterTree(CMatrix<float> data, int patch_radius, float sqr_sigma) : x_size(data.xSize()), y_size(data.ySize()), patch_radius(patch_radius), sqr_sigma(sqr_sigma), image(data){ this->inv_sqr_sigma = 1.0f / sqr_sigma; // Allocate memory for the reference matrix //this->refmat = new node*[this->x_size*this->y_size]; this->Head = new node; this->Head->index = 0; this->Head->leaf = 1; // this could be our only node so for now it's a leaf this->Head->left = NULL; // those pointers shouldn't point anywhere this->Head->right = NULL; // for now // Initialize domain list for (int j = 0; j < y_size; ++j) for (int i = 0; i < x_size; ++i){ point p; p.x = i; p.y = j; this->Head->coordinates.push_back(p); } node_count = 0; //initialize variable (head excluded) allocateIdicatorMatrix(); mu_x_size = (2 * patch_radius + 1); // Device initialization this->block = dim3(32, 32, 1); this->grid.x = DIV_UP(image.xSize(), (float)block.x); this->grid.y = DIV_UP(image.ySize(), (float)block.y); // create a gauss lut for 1D this->h_filter = new float[mu_x_size]; float* center = h_filter + patch_radius; for (int x = -patch_radius; x <= patch_radius; ++x) *(center + x) = std::exp(-0.5*x*x / (patch_radius*patch_radius)); this->image_size = data.xSize()*data.ySize(); this->maxlvl = 1; this->maxpoints = 50; this->w = 1; // Initialize a random generator #ifdef _DEBUG srand(5); #else srand(time(NULL)); #endif // Allocate memory on the device checkCudaErrors(cudaMalloc((void ***)&this->refmat, sizeof(int*) * image_size)); checkCudaErrors(cudaMalloc((void**)&d_image, sizeof(float) * image_size)); checkCudaErrors(cudaMalloc((void**)&d_mat1, sizeof(float) * image_size)); checkCudaErrors(cudaMemset(d_mat1, 0, sizeof(float) * image_size)); checkCudaErrors(cudaMalloc((void**)&d_mat2, sizeof(float) * image_size)); checkCudaErrors(cudaMemset(d_mat2, 0, sizeof(float) * image_size)); checkCudaErrors(cudaMalloc((void**)&d_step_indicator, sizeof(float) * image_size)); checkCudaErrors(cudaMemset(d_step_indicator, 0, sizeof(float) * image_size)); checkCudaErrors(cudaMalloc((void**)&d_filter, sizeof(float) * (2 * patch_radius + 1))); // Copy to the device checkCudaErrors(cudaMemcpy(d_image, image.data(), sizeof(float) * image_size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * (mu_x_size), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&d_mu1, sizeof(float) * SQR(2 * patch_radius + 1))); checkCudaErrors(cudaMalloc((void**)&d_mu2, sizeof(float) * SQR(2 * patch_radius + 1))); checkCudaErrors(cudaMalloc(&d_shifted, sizeof(float) * image_size)); }; void ClusterTree::setMaximumLevel(int lvl){ if (lvl > 0) this->maxlvl = lvl; else{ std::cerr << "Max level could be between 0 or 10" << std::endl; if (lvl <= 0) { lvl = 1; std::cout << "Max level has been set to 1"; }; if (lvl > 10) { lvl = 10; std::cout << "Max level has been set to 10"; }; } } void ClusterTree::setMaximumOfPointsInTheLeafs(int points){ if (points > 2) this->maxpoints = points; else{ std::cerr << "Max point in leaf should be more than 2" << std::endl; } } ClusterTree::~ClusterTree(){ // Everything allocated should be deleted destroyTree(this->Head); //destroyIndicatorMatrix(); //delete[] this->refmat; cudaFree(d_image); cudaFree(d_mat1); cudaFree(d_mat2); cudaFree(d_mu1); cudaFree(d_mu2); cudaFree(d_shifted); cudaFree(d_step_indicator); cudaFree(d_filter); cudaFree(d_indicatorMat); //for (int i = 0; i < image_size; i++){ // if (refmat[i]) // cudaFree(refmat[i]); //} cudaFree(refmat); //cudaFree(d_indicator); checkCudaErrors(cudaDeviceReset()); } void ClusterTree::displayCoordinateList(node* n){ for (int i = 0; i < n->coordinates.size(); ++i){ std::cout << "x: " << n->coordinates[i].x << "y: " << n->coordinates[i].y << std::endl; } } node* ClusterTree::getHead(){ return this->Head; }; int ClusterTree::getIndexOfNode(node* n){ return n->index; } unsigned int* ClusterTree::getIndicatorMatrix(){ if (this->d_indicatorMat == NULL) this->allocateIdicatorMatrix(); return this->d_indicatorMat; } void ClusterTree::destroyTree(node* p){ if (p->left){ // Does it have left node? this->destroyTree(p->left); // Traverse further p->left = NULL; } if (p->right){ // Does it have right node? this->destroyTree(p->right); // Traverse further p->right = NULL; } // Doesn't have childs but still considered non leaf if (!p->right && !p->left && !p->leaf){ p->leaf = 1; } if (p->leaf){ // Is this a leaf node? (shouldn't have left\right) //delete[] p->mu; // Kill it's allocated memory delete p; // Kill it } } point ClusterTree::sampleRand(node* n){ unsigned int minx = 0; unsigned int maxx = n->coordinates.size()-1; unsigned int index = rand() % (maxx - minx) + minx; // random value in [min, max] point p; p.x = n->coordinates[index].x; p.y = n->coordinates[index].y; //#ifdef _DEBUG // std::cout << "xval: " << n->coordinates[index].x << std::endl; // std::cout << "yval: " << n->coordinates[index].y << std::endl; //#endif return p; } void ClusterTree::allocateIdicatorMatrix(){ int size = this->x_size*this->y_size; unsigned int* initmat = new unsigned int[size]; std::fill(initmat, initmat + size, 0); // Fill with initial values [Head node index is 0] // Allocate memory checkCudaErrors(cudaMalloc(&this->d_indicatorMat, size*sizeof(unsigned int))); // Copy checkCudaErrors(cudaMemcpy(this->d_indicatorMat, initmat, size*sizeof(unsigned int), cudaMemcpyHostToDevice)); } void ClusterTree::destroyIndicatorMatrix(){ if (this->d_indicatorMat != NULL) checkCudaErrors(cudaFree(this->d_indicatorMat)); this->d_indicatorMat = NULL; } void ClusterTree::create2Childs(node* current){ // Allocate memory for new nodes node* left = new node; node* right = new node; // A proper initialization of nodes left->leaf = 1; left->left = NULL; left->right = NULL; //left->mu = new float[patch_radius * 2 + 1]; left->index = ++node_count; left->parent = current; right->leaf = 1; right->left = NULL; right->right = NULL; //right->mu = new float[patch_radius * 2 + 1]; right->index = ++node_count; right->parent = current; current->leaf = 0; // This is no longer a leaf node current->left = left; current->right = right; } void ClusterTree::setW(int num){ if (num < 0) return; this->w = num; } int ClusterTree::getMaximumLevel(){ return this->maxlvl; } int ClusterTree::getMaximumOfPointsInTheLeafs(){ return this->maxpoints; } int ClusterTree::getW(){ return this->w; } float* ClusterTree::pickVecByPos(float* image, point p, int x_size, int y_size, int patch_radius){ int vecsize = SQR(2 * patch_radius + 1); int vs_half = vecsize / 2; int vec_x_size = 2 * patch_radius + 1; float* mu = new float[vecsize]; for (int i = 0; i < vecsize; ++i) mu[i] = 0; for (int py = -patch_radius; py <= patch_radius; py++) for (int px = -patch_radius; px <= patch_radius; px++){ int px1 = CLIP(0, px + p.x, x_size - 1); int py1 = CLIP(0, py + p.y, y_size - 1); mu[(py + patch_radius)*vec_x_size + (px + patch_radius)] += image[py1*x_size + px1]; } return mu; } struct nonneg{ __host__ __device__ bool operator()(const int2 x) { return (x.x != -1); } }; void ClusterTree::updateCoordinateList(node* n){ int2* d_list; checkCudaErrors(cudaMalloc(&d_list, sizeof(int2) * image_size)); checkCudaErrors(cudaMemset(d_list, 0, sizeof(int2) * image_size)); _getTheHeck << <grid, block >> >(d_list, d_indicatorMat, x_size, y_size, n->index); thrust::device_ptr<int2> d_list_(d_list); int size = image_size; //DIV_UP(image_size, divisor); int2* result = new int2[size]; thrust::device_vector<int2> d_vec(size); thrust::host_vector<int2> h_vec(size); thrust::copy_if(d_list_, d_list_ + size, d_vec.begin(), nonneg()); h_vec = d_vec; thrust::copy(h_vec.begin(), h_vec.end(), result); n->coordinates.clear(); for (int i = 0; ; ++i){ if (i == 0 && !result[i].x && !result[i].y) { // The first point could be 0 0 for some nodes point p; p.x = result[i].x; p.y = result[i].y; n->coordinates.push_back(p); continue; } if (result[i].x <= 0 && result[i].y <= 0) break; point p; p.x = result[i].x; p.y = result[i].y; n->coordinates.push_back(p); //#ifdef _DEBUG // std::cout << "Result x " << result[i].x << " y " << result[i].y << std::endl; // std::cout << i << std::endl; //#endif } delete[] result; checkCudaErrors(cudaFree(d_list)); } // This function computes distortion measure J from mat1, mat2, d_out // Trat no time to write it //float ClusterTree::getDistortionMeasure(){ // return 0; //} int counter(0); void ClusterTree::buildTree(node* n, int level){ #ifdef _DEBUG CMatrix<float> result(x_size, y_size); #endif level++; node* current = n; unsigned int* d_indicator = getIndicatorMatrix(); if (current->coordinates.size() < maxpoints || level > maxlvl){ // int* d_link, *xarr, *yarr; int size = current->coordinates.size(); std::sort(current->coordinates.begin(), current->coordinates.end()); xarr = new int[size+1]; yarr = new int[size]; xarr[0] = size; for (int i = 0; i < size; i++){ xarr[i+1] = current->coordinates[i].x; yarr[i] = current->coordinates[i].y; } checkCudaErrors(cudaMalloc(&d_link, sizeof(int) * (2*size+1))); //checkCudaErrors(cudaMemset(d_link, size, sizeof(int)+1)); checkCudaErrors(cudaMemcpy(d_link, xarr, sizeof(int) * (size+1), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_link+ 1 + size, yarr, sizeof(int) * size, cudaMemcpyHostToDevice)); _fillRefMat<<<grid, block>>>(this->refmat, d_indicator, x_size, y_size, d_link, current->index); delete[] xarr; delete[] yarr; //std::cout << "Dumped " << size << " points" << std::endl; current->coordinates.clear(); return; } point p1, p2; // define and initialize variables // Sample two random positions in the image p1 = sampleRand(current); p2 = sampleRand(current); // Initialize mu vectors float *mu1 = pickVecByPos(image.data(), p1, x_size, y_size, patch_radius); float *mu2 = pickVecByPos(image.data(), p2, x_size, y_size, patch_radius); double mu1_normalizer, mu2_normalizer; cudaStream_t s1; cudaStream_t s2; cudaStreamCreate(&s1); cudaStreamCreate(&s2); // ========================== // K-Means steps for (int k = 0; k < 5; ++k){ checkCudaErrors(cudaMemset(d_step_indicator, 0, sizeof(float) * image_size)); // Reset the function // Update values on the device checkCudaErrors(cudaMemcpyAsync(d_mu1, mu1, sizeof(float) * SQR(2 * patch_radius + 1), cudaMemcpyHostToDevice, s1)); checkCudaErrors(cudaMemcpyAsync(d_mu2, mu2, sizeof(float) * SQR(2 * patch_radius + 1), cudaMemcpyHostToDevice, s2)); // Assign step _distacesToAll << < grid, block, 0, s1 >> >(d_image, d_mat1, d_filter, x_size, y_size, d_mu1, d_indicator, current->index, patch_radius, inv_sqr_sigma); _distacesToAll << < grid, block, 0, s2 >> >(d_image, d_mat2, d_filter, x_size, y_size, d_mu2, d_indicator, current->index, patch_radius, inv_sqr_sigma); _compareTwoMatrixes << < grid, block >> >(d_mat1, d_mat2, d_step_indicator, d_indicator, current->index, x_size, y_size, 1); //checkCudaErrors(cudaMemcpy((void*)result.data(), d_step_indicator, sizeof(float) * result.size(), cudaMemcpyDeviceToHost)); ////result.writeToTXT("res.txt"); //result.normalize(0, 255); //Display.Display(result, "res"); thrust::device_ptr<float> d_step_indicator_(d_step_indicator); // Update step // Compute mu1 through reduction mu1_normalizer = thrust::reduce(d_step_indicator_, d_step_indicator_ + image_size); if (mu1_normalizer == 0){ int* d_link, *xarr, *yarr; int size = current->coordinates.size(); std::sort(current->coordinates.begin(), current->coordinates.end()); xarr = new int[size + 1]; yarr = new int[size]; xarr[0] = size; for (int i = 0; i < size; i++){ xarr[i + 1] = current->coordinates[i].x; yarr[i] = current->coordinates[i].y; } checkCudaErrors(cudaMalloc(&d_link, sizeof(int) * (2 * size + 1))); //checkCudaErrors(cudaMemset(d_link, size, sizeof(int)+1)); checkCudaErrors(cudaMemcpy(d_link, xarr, sizeof(int) * (size + 1), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_link + 1 + size, yarr, sizeof(int) * size, cudaMemcpyHostToDevice)); _fillRefMat << <grid, block >> >(this->refmat, d_indicator, x_size, y_size, d_link, current->index); delete[] xarr; delete[] yarr; //std::cout << "Dumped " << size << " points" << std::endl; current->coordinates.clear(); return; } //#pragma omp parallel for for (int j = -patch_radius; j <= patch_radius; j++) for (int i = -patch_radius; i <= patch_radius; i++){ //int sh = (patch_radius + j) * image_size; _shiftMat << <grid, block >> >(d_shifted, d_image, x_size, y_size, i, j, d_step_indicator); thrust::device_ptr<float> d_shifted_(d_shifted); double val = thrust::reduce(d_shifted_, d_shifted_ + image_size); mu1[(patch_radius + i) + (2 * patch_radius + 1)*(patch_radius + j)] = val / mu1_normalizer; } // Compute mu2 through reduction _compareTwoMatrixes << < grid, block >> >(d_mat2, d_mat1, d_step_indicator, d_indicator, current->index, x_size, y_size, 0); mu2_normalizer = thrust::reduce(d_step_indicator_, d_step_indicator_ + image_size); if (mu2_normalizer == 0){ int* d_link, *xarr, *yarr; int size = current->coordinates.size(); std::sort(current->coordinates.begin(), current->coordinates.end()); xarr = new int[size + 1]; yarr = new int[size]; xarr[0] = size; for (int i = 0; i < size; i++){ xarr[i + 1] = current->coordinates[i].x; yarr[i] = current->coordinates[i].y; } checkCudaErrors(cudaMalloc(&d_link, sizeof(int) * (2 * size + 1))); //checkCudaErrors(cudaMemset(d_link, size, sizeof(int)+1)); checkCudaErrors(cudaMemcpy(d_link, xarr, sizeof(int) * (size + 1), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_link + 1 + size, yarr, sizeof(int) * size, cudaMemcpyHostToDevice)); _fillRefMat << <grid, block >> >(this->refmat, d_indicator, x_size, y_size, d_link, current->index); delete[] xarr; delete[] yarr; //std::cout << "Dumped " << size << " points" << std::endl; current->coordinates.clear(); return; } //#pragma omp parallel for for (int j = -patch_radius; j <= patch_radius; j++) for (int i = -patch_radius; i <= patch_radius; i++){ //int sh = (patch_radius + j) * image_size; _shiftMat << <grid, block >> >(d_shifted, d_image, x_size, y_size, i, j, d_step_indicator); thrust::device_ptr<float> d_shifted_(d_shifted); double val = thrust::reduce(d_shifted_, d_shifted_ + image_size); mu2[(patch_radius + i) + (2 * patch_radius + 1)*(patch_radius + j)] = val / mu2_normalizer; } #ifdef _DEBUG assert((mu1_normalizer + mu2_normalizer == (x_size*y_size)) || level > 1); #endif } #ifdef _DEBUG if ((counter % 1) == 0){ // tmp display step float* tmp; checkCudaErrors(cudaMalloc(&tmp, sizeof(float) * image_size)); checkCudaErrors(cudaMemset(tmp, 0, sizeof(float) * image_size)); _matIntToFloat << <grid, block >> >(d_indicator, tmp, x_size, y_size); checkCudaErrors(cudaMemcpy((void*)result.data(), tmp, sizeof(float) * result.size(), cudaMemcpyDeviceToHost)); //result.writeToTXT("res.txt"); result.normalize(0, 255); Display.Display(result, "res"); result.writeToPGM(SSTR("notes/images/kmeansfull/salesman" << counter << ".pgm").c_str()); } counter++; #endif // ========================== // Go to the next level // Allocate new nodes create2Childs(current); current->coordinates.clear(); // oh save some memory, please.. // Update main indicator matrix // Update main indicator matrix _compareTwoMatrixes << < grid, block, 0, s1 >> >(d_mat1, d_mat2, d_step_indicator, d_indicator, current->index, x_size, y_size, 1); _updateIndicator << <grid, block, 0, s1 >> >(d_indicator, x_size, y_size, d_step_indicator, current->left->index); _compareTwoMatrixes << < grid, block, 0, s2 >> >(d_mat2, d_mat1, d_step_indicator, d_indicator, current->index, x_size, y_size, 0); _updateIndicator << <grid, block, 0, s2 >> >(d_indicator, x_size, y_size, d_step_indicator, current->right->index); // Assign them right pixels updateCoordinateList(current->right); updateCoordinateList(current->left); #ifdef _DEBUG // Check sizes of lists int ss1 = current->left->coordinates.size(); assert(mu1_normalizer == ss1); int ss2 = current->right->coordinates.size(); assert(mu2_normalizer == ss2); std::cout << ss1 + ss2 << " " << x_size*y_size << std::endl; assert(((ss1 + ss2) == x_size*y_size) || level > 1); #endif cudaStreamDestroy(s1); cudaStreamDestroy(s2); // Declare foo as arrays of array 8 of pointer to pointer // to function returning a pointer t array of pointer to char // and we need to go deeper if ( current->left != NULL) buildTree(current->left, level); if ( current->right != NULL) buildTree(current->right, level); //std::cout << node_count << std::endl; }; CMatrix<float> ClusterTree::nlm(){ CMatrix<float> result(x_size, y_size); float* d_out; checkCudaErrors(cudaMalloc(&d_out, sizeof(float)*image_size)); checkCudaErrors(cudaMemset(d_out, 0, sizeof(float)*image_size)); _nlmCudaList << < grid, block >> >(d_image, d_out, d_filter, x_size, y_size, refmat, patch_radius, inv_sqr_sigma, this->w); checkCudaErrors(cudaMemcpy(result.data(), d_out, sizeof(float)*image_size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_out)); return result; }