hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
26fc26a591590b48e8e26fbf3a6e1cb9b4f5205b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Ahmad Abdelfattah @author Azzam Haidar @generated from magmablas/zgeqrf_batched_smallsq.cu, normal z -> d, Mon Jun 25 18:24:15 2018 */ #include "magma_internal.h" #include "magma_templates.h" #include "sync.cuh" #include "batched_kernel_param.h" #define SLDA(N) ( (N==15||N==23||N==31)? (N+2) : (N+1) ) extern __shared__ double zdata[]; template<int N> __global__ void dgeqrf_batched_sq1d_reg_kernel( double **dA_array, magma_int_t ldda, double **dtau_array, magma_int_t *info_array, magma_int_t batchCount) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int batchid = blockIdx.x * blockDim.y + ty; if(batchid >= batchCount) return; if(tx >= N) return; const int slda = SLDA(N); double* dA = dA_array[batchid]; double* dtau = dtau_array[batchid]; magma_int_t* info = &info_array[batchid]; // shared memory pointers double* sA = (double*)(zdata + ty * slda * N); double* sdw = (double*)(zdata + blockDim.y * slda * N); sdw += ty * N; double rA[N] = {MAGMA_D_ZERO}; double alpha, tau, tmp, zsum, scale = MAGMA_D_ZERO; double sum = MAGMA_D_ZERO, norm = MAGMA_D_ZERO, beta; if( tx == 0 ){ (*info) = 0; } // init tau dtau[tx] = MAGMA_D_ZERO; // read #pragma unroll for(int i = 0; i < N; i++){ rA[i] = dA[ i * ldda + tx ]; } #pragma unroll for(int i = 0; i < N-1; i++){ sA[ i * slda + tx] = rA[i]; sdw[tx] = ( MAGMA_D_REAL(rA[i]) * MAGMA_D_REAL(rA[i]) + MAGMA_D_IMAG(rA[i]) * MAGMA_D_IMAG(rA[i]) ); magmablas_syncwarp(); alpha = sA[i * slda + i]; sum = MAGMA_D_ZERO; #pragma unroll for(int j = i; j < N; j++){ sum += sdw[j]; } norm = sqrt(sum); beta = -copysign(norm, real(alpha)); scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha - MAGMA_D_MAKE(beta, 0)); tau = MAGMA_D_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); if(tx == i){ dtau[i] = tau; } tmp = (tx == i)? MAGMA_D_MAKE(beta, MAGMA_D_ZERO) : rA[i] * scale; if(tx >= i){ rA[i] = tmp; } dA[ i * ldda + tx ] = rA[i]; rA[i] = (tx == i) ? MAGMA_D_ONE : rA[i]; rA[i] = (tx < i ) ? MAGMA_D_ZERO : rA[i]; tmp = MAGMA_D_CONJ( rA[i] ) * MAGMA_D_CONJ( tau ); magmablas_syncwarp(); #pragma unroll for(int j = i+1; j < N; j++){ sA[j * slda + tx] = rA[j] * tmp; } magmablas_syncwarp(); zsum = MAGMA_D_ZERO; #pragma unroll for(int j = i; j < N; j++){ zsum += sA[tx * slda + j]; } sA[tx * slda + N] = zsum; magmablas_syncwarp(); #pragma unroll for(int j = i+1; j < N; j++){ rA[j] -= rA[i] * sA[j * slda + N]; } magmablas_syncwarp(); } // write the last column dA[ (N-1) * ldda + tx ] = rA[N-1]; } /***************************************************************************//** Purpose ------- DGEQRF computes a QR factorization of a real M-by-N matrix A: A = Q * R. This is a batched version of the routine, and works only for small square matrices of size up to 32. Arguments --------- @param[in] n INTEGER The size of the matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N) On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). To benefit from coalescent memory accesses LDDA must be divisible by 16. @param[out] dtau_array Array of pointers, dimension (batchCount). Each is a DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_geqrf_batched *******************************************************************************/ extern "C" magma_int_t magma_dgeqrf_batched_smallsq( magma_int_t n, double** dA_array, magma_int_t ldda, double **dtau_array, magma_int_t* info_array, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t arginfo = 0; magma_int_t m = n; if( (m < 0) || ( m > 32 ) ){ arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( m == 0 || n == 0) return 0; const magma_int_t ntcol = magma_get_dgeqrf_batched_ntcol(m, n); magma_int_t shmem = ( SLDA(m) * m * sizeof(double) ); shmem += ( m * sizeof(double) ); shmem *= ntcol; magma_int_t nth = magma_ceilpow2(m); magma_int_t gridx = magma_ceildiv(batchCount, ntcol); dim3 grid(gridx, 1, 1); dim3 threads(nth, ntcol, 1); switch(m){ case 1:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 1>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 2:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 2>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 3:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 3>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 4:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 4>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 5:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 5>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 6:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 6>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 7:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 7>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 8:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 8>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 9:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel< 9>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 10:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<10>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 11:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<11>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 12:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<12>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 13:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<13>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 14:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<14>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 15:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<15>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 16:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<16>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 17:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<17>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 18:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<18>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 19:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<19>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 20:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<20>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 21:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<21>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 22:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<22>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 23:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<23>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 24:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<24>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 25:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<25>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 26:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<26>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 27:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<27>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 28:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<28>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 29:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<29>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 30:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<30>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 31:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<31>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; case 32:hipLaunchKernelGGL(( dgeqrf_batched_sq1d_reg_kernel<32>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, dtau_array, info_array, batchCount); break; default: printf("error: size %lld is not supported\n", (long long) m); } return arginfo; }
26fc26a591590b48e8e26fbf3a6e1cb9b4f5205b.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Ahmad Abdelfattah @author Azzam Haidar @generated from magmablas/zgeqrf_batched_smallsq.cu, normal z -> d, Mon Jun 25 18:24:15 2018 */ #include "magma_internal.h" #include "magma_templates.h" #include "sync.cuh" #include "batched_kernel_param.h" #define SLDA(N) ( (N==15||N==23||N==31)? (N+2) : (N+1) ) extern __shared__ double zdata[]; template<int N> __global__ void dgeqrf_batched_sq1d_reg_kernel( double **dA_array, magma_int_t ldda, double **dtau_array, magma_int_t *info_array, magma_int_t batchCount) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int batchid = blockIdx.x * blockDim.y + ty; if(batchid >= batchCount) return; if(tx >= N) return; const int slda = SLDA(N); double* dA = dA_array[batchid]; double* dtau = dtau_array[batchid]; magma_int_t* info = &info_array[batchid]; // shared memory pointers double* sA = (double*)(zdata + ty * slda * N); double* sdw = (double*)(zdata + blockDim.y * slda * N); sdw += ty * N; double rA[N] = {MAGMA_D_ZERO}; double alpha, tau, tmp, zsum, scale = MAGMA_D_ZERO; double sum = MAGMA_D_ZERO, norm = MAGMA_D_ZERO, beta; if( tx == 0 ){ (*info) = 0; } // init tau dtau[tx] = MAGMA_D_ZERO; // read #pragma unroll for(int i = 0; i < N; i++){ rA[i] = dA[ i * ldda + tx ]; } #pragma unroll for(int i = 0; i < N-1; i++){ sA[ i * slda + tx] = rA[i]; sdw[tx] = ( MAGMA_D_REAL(rA[i]) * MAGMA_D_REAL(rA[i]) + MAGMA_D_IMAG(rA[i]) * MAGMA_D_IMAG(rA[i]) ); magmablas_syncwarp(); alpha = sA[i * slda + i]; sum = MAGMA_D_ZERO; #pragma unroll for(int j = i; j < N; j++){ sum += sdw[j]; } norm = sqrt(sum); beta = -copysign(norm, real(alpha)); scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha - MAGMA_D_MAKE(beta, 0)); tau = MAGMA_D_MAKE( (beta - real(alpha)) / beta, -imag(alpha) / beta ); if(tx == i){ dtau[i] = tau; } tmp = (tx == i)? MAGMA_D_MAKE(beta, MAGMA_D_ZERO) : rA[i] * scale; if(tx >= i){ rA[i] = tmp; } dA[ i * ldda + tx ] = rA[i]; rA[i] = (tx == i) ? MAGMA_D_ONE : rA[i]; rA[i] = (tx < i ) ? MAGMA_D_ZERO : rA[i]; tmp = MAGMA_D_CONJ( rA[i] ) * MAGMA_D_CONJ( tau ); magmablas_syncwarp(); #pragma unroll for(int j = i+1; j < N; j++){ sA[j * slda + tx] = rA[j] * tmp; } magmablas_syncwarp(); zsum = MAGMA_D_ZERO; #pragma unroll for(int j = i; j < N; j++){ zsum += sA[tx * slda + j]; } sA[tx * slda + N] = zsum; magmablas_syncwarp(); #pragma unroll for(int j = i+1; j < N; j++){ rA[j] -= rA[i] * sA[j * slda + N]; } magmablas_syncwarp(); } // write the last column dA[ (N-1) * ldda + tx ] = rA[N-1]; } /***************************************************************************//** Purpose ------- DGEQRF computes a QR factorization of a real M-by-N matrix A: A = Q * R. This is a batched version of the routine, and works only for small square matrices of size up to 32. Arguments --------- @param[in] n INTEGER The size of the matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a DOUBLE PRECISION array on the GPU, dimension (LDDA,N) On entry, the M-by-N matrix A. On exit, the elements on and above the diagonal of the array contain the min(M,N)-by-N upper trapezoidal matrix R (R is upper triangular if m >= n); the elements below the diagonal, with the array TAU, represent the orthogonal matrix Q as a product of min(m,n) elementary reflectors (see Further Details). @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= max(1,M). To benefit from coalescent memory accesses LDDA must be divisible by 16. @param[out] dtau_array Array of pointers, dimension (batchCount). Each is a DOUBLE PRECISION array, dimension (min(M,N)) The scalar factors of the elementary reflectors (see Further Details). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. Further Details --------------- The matrix Q is represented as a product of elementary reflectors Q = H(1) H(2) . . . H(k), where k = min(m,n). Each H(i) has the form H(i) = I - tau * v * v' where tau is a real scalar, and v is a real vector with v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), and tau in TAU(i). @ingroup magma_geqrf_batched *******************************************************************************/ extern "C" magma_int_t magma_dgeqrf_batched_smallsq( magma_int_t n, double** dA_array, magma_int_t ldda, double **dtau_array, magma_int_t* info_array, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t arginfo = 0; magma_int_t m = n; if( (m < 0) || ( m > 32 ) ){ arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( m == 0 || n == 0) return 0; const magma_int_t ntcol = magma_get_dgeqrf_batched_ntcol(m, n); magma_int_t shmem = ( SLDA(m) * m * sizeof(double) ); shmem += ( m * sizeof(double) ); shmem *= ntcol; magma_int_t nth = magma_ceilpow2(m); magma_int_t gridx = magma_ceildiv(batchCount, ntcol); dim3 grid(gridx, 1, 1); dim3 threads(nth, ntcol, 1); switch(m){ case 1: dgeqrf_batched_sq1d_reg_kernel< 1><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 2: dgeqrf_batched_sq1d_reg_kernel< 2><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 3: dgeqrf_batched_sq1d_reg_kernel< 3><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 4: dgeqrf_batched_sq1d_reg_kernel< 4><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 5: dgeqrf_batched_sq1d_reg_kernel< 5><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 6: dgeqrf_batched_sq1d_reg_kernel< 6><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 7: dgeqrf_batched_sq1d_reg_kernel< 7><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 8: dgeqrf_batched_sq1d_reg_kernel< 8><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 9: dgeqrf_batched_sq1d_reg_kernel< 9><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 10: dgeqrf_batched_sq1d_reg_kernel<10><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 11: dgeqrf_batched_sq1d_reg_kernel<11><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 12: dgeqrf_batched_sq1d_reg_kernel<12><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 13: dgeqrf_batched_sq1d_reg_kernel<13><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 14: dgeqrf_batched_sq1d_reg_kernel<14><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 15: dgeqrf_batched_sq1d_reg_kernel<15><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 16: dgeqrf_batched_sq1d_reg_kernel<16><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 17: dgeqrf_batched_sq1d_reg_kernel<17><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 18: dgeqrf_batched_sq1d_reg_kernel<18><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 19: dgeqrf_batched_sq1d_reg_kernel<19><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 20: dgeqrf_batched_sq1d_reg_kernel<20><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 21: dgeqrf_batched_sq1d_reg_kernel<21><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 22: dgeqrf_batched_sq1d_reg_kernel<22><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 23: dgeqrf_batched_sq1d_reg_kernel<23><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 24: dgeqrf_batched_sq1d_reg_kernel<24><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 25: dgeqrf_batched_sq1d_reg_kernel<25><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 26: dgeqrf_batched_sq1d_reg_kernel<26><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 27: dgeqrf_batched_sq1d_reg_kernel<27><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 28: dgeqrf_batched_sq1d_reg_kernel<28><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 29: dgeqrf_batched_sq1d_reg_kernel<29><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 30: dgeqrf_batched_sq1d_reg_kernel<30><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 31: dgeqrf_batched_sq1d_reg_kernel<31><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; case 32: dgeqrf_batched_sq1d_reg_kernel<32><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, dtau_array, info_array, batchCount); break; default: printf("error: size %lld is not supported\n", (long long) m); } return arginfo; }
b26aa94ac88bd1f74bc51eab72822f5bd2276f0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstdio> #include <vector> #ifdef __NVCC__ #include "hipcub/hipcub.hpp" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/float16.h" // set cub base traits in order to handle float16 namespace paddle { namespace operators { using Tensor = framework::Tensor; #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename DeviceContext, typename T> class TopkOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto* indices = ctx.Output<Tensor>("Indices"); int k = static_cast<int>(ctx.Attr<int>("k")); auto* k_t = ctx.Input<Tensor>("K"); if (k_t) { Tensor k_host; framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host); k = k_host.data<int>()[0]; framework::DDim output_dims = output->dims(); output_dims[output_dims.size() - 1] = k; output->Resize(output_dims); indices->Resize(output_dims); } const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(ctx.GetPlace()); // FIXME(typhoonzero): data is always converted to type T? framework::DDim inputdims = input->dims(); const int64_t input_height = phi::product(phi::slice_ddim(inputdims, 0, inputdims.size() - 1)); const int64_t input_width = inputdims[inputdims.size() - 1]; const auto& dev_ctx = ctx.cuda_device_context(); if ((input_width <= 1024 || k >= 128 || k == input_width)) { if (SortTopk<T>(dev_ctx, input, input_width, input_height, k, output, indices)) { // Successed, return. return; } else { LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace()); if (k > input_width) k = input_width; // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. // TODO(typhoonzero): refine this kernel. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (GetDesiredBlockDim(input_width)) { FIXED_BLOCK_DIM( hipLaunchKernelGGL(( KeMatrixTopK<T, 5, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height)); default: PADDLE_THROW(platform::errors::Unavailable( "Calculation error occurred in TopK Operator's CUDA Kernel.")); } } }; template <typename DeviceContext, typename T> class TopkOpGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); const T* out_grad_data = out_grad->data<T>(); const int64_t* indices_data = indices->data<int64_t>(); size_t k = indices->dims()[indices->dims().size() - 1]; framework::DDim xdims = x->dims(); const size_t row = phi::product(phi::slice_ddim(xdims, 0, xdims.size() - 1)); const size_t col = xdims[xdims.size() - 1]; const auto& dev_ctx = context.cuda_device_context(); const int kMaxHeight = 2048; int gridx = row < kMaxHeight ? row : kMaxHeight; switch (GetDesiredBlockDim(col)) { FIXED_BLOCK_DIM( hipLaunchKernelGGL(( AssignGrad<T, 5, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), x_grad_data, indices_data, out_grad_data, row, col, k)); default: PADDLE_THROW( platform::errors::Unavailable("Error occurs when Assign Grad.")); } } }; #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( top_k, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, float>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, double>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, int>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( top_k_grad, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, float>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, double>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, int>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>);
b26aa94ac88bd1f74bc51eab72822f5bd2276f0d.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstdio> #include <vector> #ifdef __NVCC__ #include "cub/cub.cuh" #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/fluid/operators/top_k_op.h" #include "paddle/fluid/platform/float16.h" // set cub base traits in order to handle float16 namespace paddle { namespace operators { using Tensor = framework::Tensor; #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename DeviceContext, typename T> class TopkOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto* indices = ctx.Output<Tensor>("Indices"); int k = static_cast<int>(ctx.Attr<int>("k")); auto* k_t = ctx.Input<Tensor>("K"); if (k_t) { Tensor k_host; framework::TensorCopySync(*k_t, platform::CPUPlace(), &k_host); k = k_host.data<int>()[0]; framework::DDim output_dims = output->dims(); output_dims[output_dims.size() - 1] = k; output->Resize(output_dims); indices->Resize(output_dims); } const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(ctx.GetPlace()); // FIXME(typhoonzero): data is always converted to type T? framework::DDim inputdims = input->dims(); const int64_t input_height = phi::product(phi::slice_ddim(inputdims, 0, inputdims.size() - 1)); const int64_t input_width = inputdims[inputdims.size() - 1]; const auto& dev_ctx = ctx.cuda_device_context(); if ((input_width <= 1024 || k >= 128 || k == input_width)) { if (SortTopk<T>(dev_ctx, input, input_width, input_height, k, output, indices)) { // Successed, return. return; } else { LOG(INFO) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } int64_t* indices_data = indices->mutable_data<int64_t>(ctx.GetPlace()); if (k > input_width) k = input_width; // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. // TODO(typhoonzero): refine this kernel. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (GetDesiredBlockDim(input_width)) { FIXED_BLOCK_DIM( KeMatrixTopK<T, 5, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height)); default: PADDLE_THROW(platform::errors::Unavailable( "Calculation error occurred in TopK Operator's CUDA Kernel.")); } } }; template <typename DeviceContext, typename T> class TopkOpGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* x = context.Input<Tensor>("X"); auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out")); auto* indices = context.Input<Tensor>("Indices"); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X")); T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace()); const T* out_grad_data = out_grad->data<T>(); const int64_t* indices_data = indices->data<int64_t>(); size_t k = indices->dims()[indices->dims().size() - 1]; framework::DDim xdims = x->dims(); const size_t row = phi::product(phi::slice_ddim(xdims, 0, xdims.size() - 1)); const size_t col = xdims[xdims.size() - 1]; const auto& dev_ctx = context.cuda_device_context(); const int kMaxHeight = 2048; int gridx = row < kMaxHeight ? row : kMaxHeight; switch (GetDesiredBlockDim(col)) { FIXED_BLOCK_DIM( AssignGrad<T, 5, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>( x_grad_data, indices_data, out_grad_data, row, col, k)); default: PADDLE_THROW( platform::errors::Unavailable("Error occurs when Assign Grad.")); } } }; #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( top_k, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, float>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, double>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, int>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, paddle::operators::TopkOpCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( top_k_grad, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, float>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, double>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, int>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, int64_t>, paddle::operators::TopkOpGradCUDAKernel<paddle::platform::CUDADeviceContext, paddle::platform::float16>);
de61719bb1b815bfcfd430571a6a269048d1ff4b.hip
// !!! This is a file automatically generated by hipify!!! #include "ours.h" #include "ours_varbit.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <device_atomic_functions.h> #include <sm_35_intrinsics.h> #include <vector_functions.h> #include <vector> #include <cfloat> #include "BlockBuild.h" float __device__ rand_lut[64] = { 0.9486982, 0.972871958, 0.248168957, 0.493126931, 0.738212088, 0.653544012, 0.67056634, 0.204192427, 0.972804412, 0.991614247, 0.907730512, 0.826491797, 0.79865054, 0.94179941, 0.867766025, 0.280207877, 0.757674479, 0.184792714, 0.894972863, 0.700680464, 0.397279507, 0.827494222, 0.0977131338, 0.108998389, 0.503181245, 0.207843145, 0.828793763, 0.973948237, 0.791490856, 0.913345491, 0.859345573, 0.333875761, 0.250367264, 0.68947019, 0.08739105, 0.95076748, 0.934732119, 0.928425798, 0.199394464, 0.549738232, 0.0507203067, 0.0957588106, 0.0232692207, 0.611455875, 0.0713980492, 0.485231558, 0.162556085, 0.944551351, 0.0615243969, 0.0616311938, 0.0927325418, 0.450354735, 0.0980233341, 0.962107109, 0.411898038, 0.560993149, 0.997294696, 0.845310842, 0.522109665, 0.293706246, 0.542670523, 0.79422221, 0.0684990289, 0.410180829 }; using std::vector; //#include "colorspace.h" namespace colorspace { struct Lab { float L; float a; float b; }; struct sRGB { float R; float G; float B; }; // Whitepoints (D65) #define CSPC_XR 0.95047f #define CSPC_YR 1.00000f #define CSPC_ZR 1.08883f // Constants #define CSPC_EPS 0.008856f #define CSPC_KAPPA 903.3f __device__ inline Lab as_Lab(sRGB c) { // For reference http://www.brucelindbloom.com/ auto sRGB_compand = [](float val) { return (val > 0.04045f ? powf((val + 0.055f) / 1.055f, 2.4f) : val / 12.92f); }; c.R = sRGB_compand(c.R); c.G = sRGB_compand(c.G); c.B = sRGB_compand(c.B); // sRGB -> XYZ float X = 0.4124564f * c.R + 0.3575761f * c.G + 0.1804375f * c.B; float Y = 0.2126729f * c.R + 0.7151522f * c.G + 0.0721750f * c.B; float Z = 0.0193339f * c.R + 0.1191920f * c.G + 0.9503041f * c.B; // XYZ -> Lab float xr = X / CSPC_XR; float yr = Y / CSPC_YR; float zr = Z / CSPC_ZR; float fx = xr > CSPC_EPS ? pow(xr, 1.0f / 3.0f) : (CSPC_KAPPA * xr + 16.0f) / 116.0f; float fy = yr > CSPC_EPS ? pow(yr, 1.0f / 3.0f) : (CSPC_KAPPA * yr + 16.0f) / 116.0f; float fz = zr > CSPC_EPS ? pow(zr, 1.0f / 3.0f) : (CSPC_KAPPA * zr + 16.0f) / 116.0f; float L = 116.0f * fy - 16.0f; float a = 500.0f * (fx - fy); float b = 200.0f * (fy - fz); return Lab{ L, a, b }; }; __device__ inline sRGB as_sRGB(const Lab &c) { // For reference http://www.brucelindbloom.com/ // Lab -> XYZ float fy = (c.L + 16.0f) / 116.0f; float fz = (fy - c.b / 200.0f); float fx = (fy + c.a / 500.0f); float fx3 = fx * fx * fx; float fy3 = fy * fy * fy; float fz3 = fz * fz * fz; float xr = fx3 > CSPC_EPS ? fx3 : (116.0f * fx - 16.0f) / CSPC_KAPPA; float yr = c.L > (CSPC_KAPPA * CSPC_EPS) ? fy3 : c.L / CSPC_KAPPA; float zr = fz3 > CSPC_EPS ? fz3 : (116.0f * fz - 16.0f) / CSPC_KAPPA; float X = xr * CSPC_XR; float Y = yr * CSPC_YR; float Z = zr * CSPC_ZR; // XYZ -> sRGB float R = 3.2404542f * X - 1.5371385f * Y - 0.4985314f * Z; float G = -0.9692660f * X + 1.8760108f * Y + 0.0415560f * Z; float B = 0.0556434f * X - 0.2040259f * Y + 1.0572252f * Z; auto sRGB_compand = [](float val) { return (val > 0.0031308f ? 1.055f*pow(val, 1.0f / 2.4f) - 0.055f : val * 12.92f); }; R = sRGB_compand(R); G = sRGB_compand(G); B = sRGB_compand(B); return sRGB{ R, G, B }; }; } // namespace colorspace // clang-format off __host__ __device__ float3 inline operator * (const float a, const float3 &b) { return make_float3(a * b.x, a * b.y, a * b.z); }; __host__ __device__ float3 inline operator * (const float3 &b, const float a) { return a * b; }; __host__ __device__ float3 inline operator - (const float3 &a, const float3 &b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }; __host__ __device__ float3 inline operator + (const float3 &a, const float3 &b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); }; __host__ __device__ float3 inline operator * (const float3 &a, const float3 &b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }; __host__ __device__ float3 inline operator / (const float3 &a, const float3 &b) { return make_float3(a.x / b.x, a.y / b.y, a.z / b.z); }; __host__ __device__ float3 inline operator - (float3 a) { return make_float3(-a.x, -a.y, -a.z); }; __host__ __device__ inline float dot(const float3 &a, const float3 &b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __host__ __device__ inline float length(float3 a) { return sqrt(dot(a, a)); } __host__ __device__ inline float3 normalize(float3 a) { return (1.0f / length(a)) * a; } // clang-format on template<class T> __host__ __device__ T min(T a, T b) { return a < b ? a : b; } template<class T> __host__ __device__ T max(T a, T b) { return a > b ? a : b; } template<class T> __host__ __device__ T clamp(T val, T minVal, T maxVal) { return min(max(minVal, val), maxVal); } struct float3x3 { float3 c1, c2, c3; __host__ __device__ float3x3(const float3 &v1, const float3 &v2, const float3 &v3) : c1(v1), c2(v2), c3(v3) {}; __host__ __device__ inline const float3 operator * (const float3& v) const { return v.x * c1 + v.y * c2 + v.z * c3; } __host__ __device__ inline const float3x3 operator + (const float3x3& m) const { return float3x3(c1 + m.c1, c2 + m.c2, c3 + m.c3); } __host__ __device__ inline const float3x3 operator - (const float3x3& m) const { return float3x3(c1 - m.c1, c2 - m.c2, c3 - m.c3); } }; __host__ __device__ inline float trace(const float3x3 &m) { return m.c1.x + m.c2.y + m.c3.z; } __host__ __device__ inline const float3x3 operator * (const float s, const float3x3& m) { return float3x3(s * m.c1, s * m.c2, s * m.c3); } __host__ __device__ float3x3 make_float3x3(const float3 &v1, const float3 &v2, const float3 &v3) { return float3x3(v1, v2, v3); } __host__ __device__ float3x3 make_float3x3(float s) { return float3x3(make_float3(s, 0.0f, 0.0f), make_float3(0.0, s, 0.0f), make_float3(0.0f, 0.0f, s)); } template<class T> __host__ __device__ inline T compensatedSum(T val, T &sum, T &error) { T y = val - error; T t = sum + y; error = (t - sum) - y; sum = t; return sum; } // clang-format off ///////////////____R____/////////////// __device__ float3 r4_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xF) / 15.0f, 0.0f, 0.0f ); } __device__ float3 r8_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFF) / 255.0f, 0.0f, 0.0f ); } __device__ float3 r16_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFFFF) / 65535.0f, 0.0f, 0.0f ); } ///////////////____RG____/////////////// __device__ float3 rg88_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFF) / 255.0f, ((rgb >> 8) & 0xFF) / 255.0f, 0.0f ); } __device__ float3 rg1616_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFFFF) / 65535.0f, ((rgb >> 16) & 0xFFFF) / 65535.0f, 0.0f ); } ///////////////____RGB____/////////////// __device__ float3 rgb888_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFF) / 255.0f, ((rgb >> 8) & 0xFF) / 255.0f, ((rgb >> 16) & 0xFF) / 255.0f ); } __device__ float3 rgb101210_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0x3FF) / 1023.0f, ((rgb >> 10) & 0xFFF) / 4095.0f, ((rgb >> 22) & 0x3FF) / 1023.0f ); } __device__ float3 rgb565_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0x1F) / 31.0f, ((rgb >> 5) & 0x3F) / 63.0f, ((rgb >> 11) & 0x1F) / 31.0f ); } ///////////////____R____/////////////// __device__ uint32_t float3_to_r4(float3 c) { float R = min(1.0f, max(0.0f, c.x)); return (uint32_t(round(R * 15.0f)) << 0); } __device__ uint32_t float3_to_r8(float3 c) { float R = min(1.0f, max(0.0f, c.x)); return (uint32_t(round(R * 255.0f)) << 0); } __device__ uint32_t float3_to_r16(float3 c) { float R = min(1.0f, max(0.0f, c.x)); return (uint32_t(round(R * 65535.0f)) << 0); } ///////////////____RG____/////////////// __device__ uint32_t float3_to_rg88(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); return (uint32_t(round(R * 255.0f)) << 0) | (uint32_t(round(G * 255.0f)) << 8); } __device__ uint32_t float3_to_rg1616(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); return (uint32_t(round(R * 65535.0f)) << 0) | (uint32_t(round(G * 65535.0f)) << 16); } ///////////////____RGB____/////////////// __device__ uint32_t float3_to_rgb888(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); float B = min(1.0f, max(0.0f, c.z)); return (uint32_t(round(R * 255.0f)) << 0) | (uint32_t(round(G * 255.0f)) << 8) | (uint32_t(round(B * 255.0f)) << 16); } __device__ uint32_t float3_to_rgb101210(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); float B = min(1.0f, max(0.0f, c.z)); return (uint32_t(round(R * 1023.0f)) << 0) | (uint32_t(round(G * 4095.0f)) << 10) | (uint32_t(round(B * 1023.0f)) << 22); } __device__ uint32_t float3_to_rgb565(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); float B = min(1.0f, max(0.0f, c.z)); return (uint32_t(round(R * 31.0f)) << 0) | (uint32_t(round(G * 63.0f)) << 5) | (uint32_t(round(B * 31.0f)) << 11); } __device__ uint32_t float3_to_rgbxxx(float3 c, ColorLayout layout) { switch (layout) { case R_4: return float3_to_r4(c); case R_8: return float3_to_r8(c); case R_16: return float3_to_r16(c); case RG_8_8: return float3_to_rg88(c); case RG_16_16: return float3_to_rg1616(c); case RGB_8_8_8: return float3_to_rgb888(c); case RGB_10_12_10: return float3_to_rgb101210(c); case RGB_5_6_5: return float3_to_rgb565(c); default: break; } return 0; } __device__ float3 rgbxxx_to_float3(uint32_t rgb, ColorLayout layout) { switch (layout) { case R_4: return r4_to_float3(rgb); case R_8: return r8_to_float3(rgb); case R_16: return r16_to_float3(rgb); case RG_8_8: return rg88_to_float3(rgb); case RG_16_16: return rg1616_to_float3(rgb); case RGB_8_8_8: return rgb888_to_float3(rgb); case RGB_10_12_10: return rgb101210_to_float3(rgb); case RGB_5_6_5: return rgb565_to_float3(rgb); default: break; } return make_float3(0.f, 0.f, 0.f); } __device__ float3 minmaxCorrectedColor(const float3 &c, ColorLayout layout) { return rgbxxx_to_float3(float3_to_rgbxxx(c, layout), layout); } __device__ float3 minmaxSingleCorrectedColor(const float3 &c, ColorLayout layout) { ColorLayout single_color_layout; switch (layout) { case R_4: single_color_layout = R_8; break; case R_8: single_color_layout = R_16; break; case RG_8_8: single_color_layout = RG_16_16; break; case RGB_5_6_5: single_color_layout = RGB_10_12_10; break; default: single_color_layout = NONE; break; } return minmaxCorrectedColor(c, single_color_layout); } // clang-format on /////////////////////////////////////////////////////////////////////// // Get the "error" between two colors. Should be perceptually sane. /////////////////////////////////////////////////////////////////////// __device__ inline float3 minmax_correctred(const float3 &c) { return rgb888_to_float3(float3_to_rgb888(c)); } __device__ inline float getErrorSquared(const float3 & c1, const float3 & c2, bool minmax_correction) { const float3 err_vec = minmax_correction ? minmax_correctred(c1) - minmax_correctred(c2) : c1 - c2; return err_vec.x * err_vec.x + err_vec.y * err_vec.y + err_vec.z * err_vec.z; }; __device__ inline float getError(const float3 & c1, const float3 & c2, bool minmax_correction) { return sqrt(getErrorSquared(c1, c2, minmax_correction)); }; #define FULL_MASK 0xffffffff __device__ inline float warpMin(float x) { x = min(x, __shfl_down_sync(FULL_MASK, x, 16)); x = min(x, __shfl_down_sync(FULL_MASK, x, 8)); x = min(x, __shfl_down_sync(FULL_MASK, x, 4)); x = min(x, __shfl_down_sync(FULL_MASK, x, 2)); x = min(x, __shfl_down_sync(FULL_MASK, x, 1)); return __shfl_sync(FULL_MASK, x, 0); } __device__ inline float warpMax(float x) { x = max(x, __shfl_down_sync(FULL_MASK, x, 16)); x = max(x, __shfl_down_sync(FULL_MASK, x, 8)); x = max(x, __shfl_down_sync(FULL_MASK, x, 4)); x = max(x, __shfl_down_sync(FULL_MASK, x, 2)); x = max(x, __shfl_down_sync(FULL_MASK, x, 1)); return __shfl_sync(FULL_MASK, x, 0); } __device__ inline float warpSum(float x) { x += __shfl_down_sync(FULL_MASK, x, 16); x += __shfl_down_sync(FULL_MASK, x, 8); x += __shfl_down_sync(FULL_MASK, x, 4); x += __shfl_down_sync(FULL_MASK, x, 2); x += __shfl_down_sync(FULL_MASK, x, 1); return __shfl_sync(FULL_MASK, x, 0); } __device__ inline float3 warpSum(float3 v) { v.x = warpSum(v.x); v.y = warpSum(v.y); v.z = warpSum(v.z); return v; } __device__ inline float3x3 warpSum(float3x3 m) { m.c1 = warpSum(m.c1); m.c2 = warpSum(m.c2); m.c3 = warpSum(m.c3); return m; } template<bool minmaxcorrection, bool laberr> __global__ void scorefunction_gpu_warp( size_t numColors, size_t numBlocks, const float3 * colors, const BlockBuild * blocks, float * scores, uint8_t * weights, float3 * colorRanges, float error_treshold, ColorLayout layout, int K, int * globalJobQueue, bool finalEval = false ) { int laneId = threadIdx.x & 31; int jobId = INT32_MAX; if (laneId == 0) { jobId = atomicSub(globalJobQueue, 1); } jobId = __shfl_sync(FULL_MASK, jobId, 0); while (__any_sync(FULL_MASK, jobId >= 0)) { BlockBuild currblock = blocks[jobId]; BlockBuild nextblock = blocks[jobId + 1]; size_t start = currblock.blockStart; size_t range = currblock.blockLength + (finalEval ? 0 : nextblock.blockLength); // start + range <= numColors range = min(range, numColors - start); float3 minpoint, maxpoint; if (K > 1) { // fit if (range == 1) { minpoint = colors[start]; maxpoint = colors[start]; } else if (range == 2) { minpoint = colors[start]; maxpoint = colors[start + 1]; } else { float3 o, d; // least square fit { o = make_float3(0.0f, 0.0f, 0.0f); float3 error = o; for (size_t i = start + laneId; i < start + range; i += 32) { o = compensatedSum(colors[i], o, error); } o = warpSum(o); o = (1.0f / range) * o; float3 zeros = make_float3(0.0f, 0.0f, 0.0f); float3x3 scatterMatrix = make_float3x3(zeros, zeros, zeros); for (size_t i = start + laneId; i < start + range; i += 32) { float3 relpos = colors[i] - o; float3x3 outerProd = make_float3x3( relpos.x * relpos, relpos.y * relpos, relpos.z * relpos ); scatterMatrix = scatterMatrix + outerProd; } scatterMatrix = warpSum(scatterMatrix); // force dead-code elimination since matrix is symmetric scatterMatrix.c1.y = scatterMatrix.c2.x; scatterMatrix.c1.z = scatterMatrix.c3.x; scatterMatrix.c2.z = scatterMatrix.c3.y; if (trace(scatterMatrix) == 0.f) { d = normalize(o - minmaxCorrectedColor(o, layout)); minpoint = o; maxpoint = o; } else { // power method to find eigenvector of largest eigenvalue unsigned randidx = 0; float3 v = make_float3( rand_lut[randidx++ % 64], rand_lut[randidx++ % 64], rand_lut[randidx++ % 64] ); for (int i = 0; i < 20; i++) { if (length(v) == 0.f) { v = make_float3( rand_lut[randidx++ % 64], rand_lut[randidx++ % 64], rand_lut[randidx++ % 64] ); i = 0; } v = scatterMatrix * normalize(v); } float3 eigenvector = normalize(v); d = eigenvector; } } float mindist = FLT_MAX; float maxdist = -FLT_MAX; for (size_t i = start + laneId; i < start + range; i += 32) { float distance = dot(colors[i] - o, d); mindist = min(mindist, distance); maxdist = max(maxdist, distance); } mindist = warpMin(mindist); maxdist = warpMax(maxdist); minpoint = o + mindist * d; maxpoint = o + maxdist * d; } if (minmaxcorrection) { minpoint = minmaxCorrectedColor(minpoint, layout); maxpoint = minmaxCorrectedColor(maxpoint, layout); } } else { float3 o = make_float3(0.0f, 0.0f, 0.0f); float3 error = o; for (size_t i = start + laneId; i < start + range; i += 32) { o = compensatedSum(colors[i], o, error); } o = warpSum(o); o = (1.0f / range) * o; if (minmaxcorrection) { o = minmaxSingleCorrectedColor(o, layout); } minpoint = o; maxpoint = o; } if (finalEval && (laneId == 0)) { colorRanges[2 * jobId + 0] = minpoint; colorRanges[2 * jobId + 1] = maxpoint; } // ~fit // evaluate bool bEval = true; float mse = 0.0f; if (K > 1) { if (range == 1) { if (getError(minpoint, colors[start], minmaxcorrection) > error_treshold || getError(maxpoint, colors[start], minmaxcorrection) > error_treshold) { bEval = false; } } else if (range == 2) { if (getError(minpoint, colors[start], minmaxcorrection) > error_treshold || getError(maxpoint, colors[start + 1], minmaxcorrection) > error_treshold) { bEval = false; } } else { float msesum = 0.0f; const float3 & A = minpoint; const float3 & B = maxpoint; float colorRangeInvSq = 1.0f / dot(B - A, B - A); for (size_t i = start + laneId; i < start + range; i += 32) { const float3 & p = colors[i]; float distance = 0.0f; if (length(B - A) > (1e-4f)) { distance = colorRangeInvSq * dot(p - A, B - A); } float w = clamp(round(distance * float(K - 1)), 0.0f, float(K - 1)); float3 interpolated_color = A + w / float(K - 1) * (B - A); float error = getError(p, interpolated_color, minmaxcorrection); msesum += getErrorSquared(p, interpolated_color, minmaxcorrection); if (error > error_treshold) { bEval = false; } if (finalEval) { weights[i] = w; } } msesum = warpSum(msesum); // true iff all bEval are true bEval = __all_sync(FULL_MASK, bEval); mse = msesum / float(range * 3); } } else { float msesum = 0.0f; for (size_t i = start + laneId; i < start + range; i += 32) { const float3 & p = colors[i]; float3 interpolated_color = minpoint; float error = getError(p, interpolated_color, minmaxcorrection); msesum += getErrorSquared(p, interpolated_color, minmaxcorrection); if (error > error_treshold) { bEval = false; } } msesum = warpSum(msesum); // true iff all bEval are true bEval = __all_sync(FULL_MASK, bEval); mse = msesum / float(range * 3); } // ~evaluate float score = bEval ? 1.0f / (mse + 1.0f) : -1.0f; if (K > 1 && range == 2 && bEval) { score = 1.0f / (length(colors[start] - colors[start + 1]) + 1.0f); } if (!isfinite(score)) { score = -1.0f; } if (laneId == 0) { scores[jobId] = score; } // fetch next job if (laneId == 0) jobId = atomicSub(globalJobQueue, 1); jobId = __shfl_sync(FULL_MASK, jobId, 0); }// ~while(jobId >= 0) }// ~scorefunction_gpu_warp float3 * g_dev_colors = nullptr; uint8_t * g_dev_weights = nullptr; size_t g_numColors = 0; #include <csignal> void uploadColors(const vector<float3> &colors) { if (g_dev_weights) hipFree(g_dev_weights); if (g_dev_colors) hipFree(g_dev_colors); g_numColors = colors.size(); { hipError_t err = hipGetLastError(); if( hipSuccess != err ) { std::fprintf( stderr, "ERROR %s:%d: cuda error \"%s\"\n", __FILE__, __LINE__, hipGetErrorString(err) ); } } // printf("Allocating %zu weights (%fMB)\n", colors.size(), colors.size() * sizeof(uint8_t) / double(1 << 20)); // printf("Allocating %zu colors (%fMB)\n", colors.size(), colors.size() * sizeof(float3) / double(1 << 20)); // alloc per-color memory hipMallocManaged(&g_dev_weights, colors.size() * sizeof(uint8_t)); hipMallocManaged(&g_dev_colors, colors.size() * sizeof(float3)); hipMemcpy(g_dev_colors, &colors[0], colors.size() * sizeof(float3), hipMemcpyHostToDevice); hipError_t err = hipGetLastError(); if( hipSuccess != err ) { std::fprintf( stderr, "ERROR %s:%d: cuda error \"%s\"\n", __FILE__, __LINE__, hipGetErrorString(err) ); } } void scores_gpu( const vector<BlockBuild> &blocks, vector<float> &scores, vector<uint8_t> &weights, vector<float3> &colorRanges, float error_treshold, bool minmaxcorrection, bool laberr, ColorLayout layout, int K, bool finalEval ) { if (g_numColors == 0) { // no colors? nothing more to do. return; } static BlockBuild * pBlocks = nullptr; static float * pScores = nullptr; static float3 * pColorRanges = nullptr; // alloc per-block memory static size_t blockAllocSize = 0; if (blockAllocSize < blocks.size()) { if (pBlocks) hipFree(pBlocks); if (pScores) hipFree(pScores); if (pColorRanges) hipFree(pColorRanges); blockAllocSize = blocks.size(); // printf("Allocating %zu blocks (%fMB)\n", blockAllocSize, blockAllocSize * sizeof(BlockBuild) / double(1 << 20)); // printf("Allocating %zu blocks scores (%fMB)\n", blockAllocSize, blockAllocSize * sizeof(float) / double(1 << 20)); // printf("Allocating %zu blocks ranges (%fMB)\n", blockAllocSize, blockAllocSize * 2 * sizeof(float3) / double(1 << 20)); hipMallocManaged(&pBlocks, blockAllocSize * sizeof(BlockBuild)); hipMallocManaged(&pScores, blockAllocSize * sizeof(float)); hipMallocManaged(&pColorRanges, blockAllocSize * 2 * sizeof(float3)); } // upload blocks hipMemcpy(pBlocks, &blocks[0], blocks.size() * sizeof(BlockBuild), hipMemcpyHostToDevice); static int *jobQueue = nullptr; if (!jobQueue) { hipMalloc(&jobQueue, sizeof(int)); } assert(blocks.size() < std::numeric_limits<int32_t>::max()); int jobs = int(blocks.size()) - 1; // first job is the last valid idx. hipMemcpy(jobQueue, &jobs, sizeof(int), hipMemcpyHostToDevice); { dim3 blockDim(128); dim3 gridDim(20 * 16); // reduce register preassure via templates if (minmaxcorrection) { scorefunction_gpu_warp<true, false> << <gridDim, blockDim >> > ( g_numColors, blocks.size(), g_dev_colors, pBlocks, pScores, g_dev_weights, pColorRanges, error_treshold, layout, K, jobQueue, finalEval ); } } scores.resize(blocks.size()); hipMemcpy(&scores[0], pScores, blocks.size() * sizeof(float), hipMemcpyDeviceToHost); if (finalEval) { colorRanges.resize(2 * blocks.size()); hipMemcpy(&colorRanges[0], pColorRanges, blocks.size() * 2 * sizeof(float3), hipMemcpyDeviceToHost); weights.resize(g_numColors); hipMemcpy(&weights[0], g_dev_weights, g_numColors * sizeof(uint8_t), hipMemcpyDeviceToHost); } }
de61719bb1b815bfcfd430571a6a269048d1ff4b.cu
#include "ours.h" #include "ours_varbit.h" #include <cuda.h> #include <device_launch_parameters.h> #include <device_atomic_functions.h> #include <sm_35_intrinsics.h> #include <vector_functions.h> #include <vector> #include <cfloat> #include "BlockBuild.h" float __device__ rand_lut[64] = { 0.9486982, 0.972871958, 0.248168957, 0.493126931, 0.738212088, 0.653544012, 0.67056634, 0.204192427, 0.972804412, 0.991614247, 0.907730512, 0.826491797, 0.79865054, 0.94179941, 0.867766025, 0.280207877, 0.757674479, 0.184792714, 0.894972863, 0.700680464, 0.397279507, 0.827494222, 0.0977131338, 0.108998389, 0.503181245, 0.207843145, 0.828793763, 0.973948237, 0.791490856, 0.913345491, 0.859345573, 0.333875761, 0.250367264, 0.68947019, 0.08739105, 0.95076748, 0.934732119, 0.928425798, 0.199394464, 0.549738232, 0.0507203067, 0.0957588106, 0.0232692207, 0.611455875, 0.0713980492, 0.485231558, 0.162556085, 0.944551351, 0.0615243969, 0.0616311938, 0.0927325418, 0.450354735, 0.0980233341, 0.962107109, 0.411898038, 0.560993149, 0.997294696, 0.845310842, 0.522109665, 0.293706246, 0.542670523, 0.79422221, 0.0684990289, 0.410180829 }; using std::vector; //#include "colorspace.h" namespace colorspace { struct Lab { float L; float a; float b; }; struct sRGB { float R; float G; float B; }; // Whitepoints (D65) #define CSPC_XR 0.95047f #define CSPC_YR 1.00000f #define CSPC_ZR 1.08883f // Constants #define CSPC_EPS 0.008856f #define CSPC_KAPPA 903.3f __device__ inline Lab as_Lab(sRGB c) { // For reference http://www.brucelindbloom.com/ auto sRGB_compand = [](float val) { return (val > 0.04045f ? powf((val + 0.055f) / 1.055f, 2.4f) : val / 12.92f); }; c.R = sRGB_compand(c.R); c.G = sRGB_compand(c.G); c.B = sRGB_compand(c.B); // sRGB -> XYZ float X = 0.4124564f * c.R + 0.3575761f * c.G + 0.1804375f * c.B; float Y = 0.2126729f * c.R + 0.7151522f * c.G + 0.0721750f * c.B; float Z = 0.0193339f * c.R + 0.1191920f * c.G + 0.9503041f * c.B; // XYZ -> Lab float xr = X / CSPC_XR; float yr = Y / CSPC_YR; float zr = Z / CSPC_ZR; float fx = xr > CSPC_EPS ? pow(xr, 1.0f / 3.0f) : (CSPC_KAPPA * xr + 16.0f) / 116.0f; float fy = yr > CSPC_EPS ? pow(yr, 1.0f / 3.0f) : (CSPC_KAPPA * yr + 16.0f) / 116.0f; float fz = zr > CSPC_EPS ? pow(zr, 1.0f / 3.0f) : (CSPC_KAPPA * zr + 16.0f) / 116.0f; float L = 116.0f * fy - 16.0f; float a = 500.0f * (fx - fy); float b = 200.0f * (fy - fz); return Lab{ L, a, b }; }; __device__ inline sRGB as_sRGB(const Lab &c) { // For reference http://www.brucelindbloom.com/ // Lab -> XYZ float fy = (c.L + 16.0f) / 116.0f; float fz = (fy - c.b / 200.0f); float fx = (fy + c.a / 500.0f); float fx3 = fx * fx * fx; float fy3 = fy * fy * fy; float fz3 = fz * fz * fz; float xr = fx3 > CSPC_EPS ? fx3 : (116.0f * fx - 16.0f) / CSPC_KAPPA; float yr = c.L > (CSPC_KAPPA * CSPC_EPS) ? fy3 : c.L / CSPC_KAPPA; float zr = fz3 > CSPC_EPS ? fz3 : (116.0f * fz - 16.0f) / CSPC_KAPPA; float X = xr * CSPC_XR; float Y = yr * CSPC_YR; float Z = zr * CSPC_ZR; // XYZ -> sRGB float R = 3.2404542f * X - 1.5371385f * Y - 0.4985314f * Z; float G = -0.9692660f * X + 1.8760108f * Y + 0.0415560f * Z; float B = 0.0556434f * X - 0.2040259f * Y + 1.0572252f * Z; auto sRGB_compand = [](float val) { return (val > 0.0031308f ? 1.055f*pow(val, 1.0f / 2.4f) - 0.055f : val * 12.92f); }; R = sRGB_compand(R); G = sRGB_compand(G); B = sRGB_compand(B); return sRGB{ R, G, B }; }; } // namespace colorspace // clang-format off __host__ __device__ float3 inline operator * (const float a, const float3 &b) { return make_float3(a * b.x, a * b.y, a * b.z); }; __host__ __device__ float3 inline operator * (const float3 &b, const float a) { return a * b; }; __host__ __device__ float3 inline operator - (const float3 &a, const float3 &b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); }; __host__ __device__ float3 inline operator + (const float3 &a, const float3 &b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); }; __host__ __device__ float3 inline operator * (const float3 &a, const float3 &b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); }; __host__ __device__ float3 inline operator / (const float3 &a, const float3 &b) { return make_float3(a.x / b.x, a.y / b.y, a.z / b.z); }; __host__ __device__ float3 inline operator - (float3 a) { return make_float3(-a.x, -a.y, -a.z); }; __host__ __device__ inline float dot(const float3 &a, const float3 &b) { return a.x * b.x + a.y * b.y + a.z * b.z; } __host__ __device__ inline float length(float3 a) { return sqrt(dot(a, a)); } __host__ __device__ inline float3 normalize(float3 a) { return (1.0f / length(a)) * a; } // clang-format on template<class T> __host__ __device__ T min(T a, T b) { return a < b ? a : b; } template<class T> __host__ __device__ T max(T a, T b) { return a > b ? a : b; } template<class T> __host__ __device__ T clamp(T val, T minVal, T maxVal) { return min(max(minVal, val), maxVal); } struct float3x3 { float3 c1, c2, c3; __host__ __device__ float3x3(const float3 &v1, const float3 &v2, const float3 &v3) : c1(v1), c2(v2), c3(v3) {}; __host__ __device__ inline const float3 operator * (const float3& v) const { return v.x * c1 + v.y * c2 + v.z * c3; } __host__ __device__ inline const float3x3 operator + (const float3x3& m) const { return float3x3(c1 + m.c1, c2 + m.c2, c3 + m.c3); } __host__ __device__ inline const float3x3 operator - (const float3x3& m) const { return float3x3(c1 - m.c1, c2 - m.c2, c3 - m.c3); } }; __host__ __device__ inline float trace(const float3x3 &m) { return m.c1.x + m.c2.y + m.c3.z; } __host__ __device__ inline const float3x3 operator * (const float s, const float3x3& m) { return float3x3(s * m.c1, s * m.c2, s * m.c3); } __host__ __device__ float3x3 make_float3x3(const float3 &v1, const float3 &v2, const float3 &v3) { return float3x3(v1, v2, v3); } __host__ __device__ float3x3 make_float3x3(float s) { return float3x3(make_float3(s, 0.0f, 0.0f), make_float3(0.0, s, 0.0f), make_float3(0.0f, 0.0f, s)); } template<class T> __host__ __device__ inline T compensatedSum(T val, T &sum, T &error) { T y = val - error; T t = sum + y; error = (t - sum) - y; sum = t; return sum; } // clang-format off ///////////////____R____/////////////// __device__ float3 r4_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xF) / 15.0f, 0.0f, 0.0f ); } __device__ float3 r8_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFF) / 255.0f, 0.0f, 0.0f ); } __device__ float3 r16_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFFFF) / 65535.0f, 0.0f, 0.0f ); } ///////////////____RG____/////////////// __device__ float3 rg88_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFF) / 255.0f, ((rgb >> 8) & 0xFF) / 255.0f, 0.0f ); } __device__ float3 rg1616_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFFFF) / 65535.0f, ((rgb >> 16) & 0xFFFF) / 65535.0f, 0.0f ); } ///////////////____RGB____/////////////// __device__ float3 rgb888_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0xFF) / 255.0f, ((rgb >> 8) & 0xFF) / 255.0f, ((rgb >> 16) & 0xFF) / 255.0f ); } __device__ float3 rgb101210_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0x3FF) / 1023.0f, ((rgb >> 10) & 0xFFF) / 4095.0f, ((rgb >> 22) & 0x3FF) / 1023.0f ); } __device__ float3 rgb565_to_float3(uint32_t rgb) { return make_float3( ((rgb >> 0) & 0x1F) / 31.0f, ((rgb >> 5) & 0x3F) / 63.0f, ((rgb >> 11) & 0x1F) / 31.0f ); } ///////////////____R____/////////////// __device__ uint32_t float3_to_r4(float3 c) { float R = min(1.0f, max(0.0f, c.x)); return (uint32_t(round(R * 15.0f)) << 0); } __device__ uint32_t float3_to_r8(float3 c) { float R = min(1.0f, max(0.0f, c.x)); return (uint32_t(round(R * 255.0f)) << 0); } __device__ uint32_t float3_to_r16(float3 c) { float R = min(1.0f, max(0.0f, c.x)); return (uint32_t(round(R * 65535.0f)) << 0); } ///////////////____RG____/////////////// __device__ uint32_t float3_to_rg88(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); return (uint32_t(round(R * 255.0f)) << 0) | (uint32_t(round(G * 255.0f)) << 8); } __device__ uint32_t float3_to_rg1616(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); return (uint32_t(round(R * 65535.0f)) << 0) | (uint32_t(round(G * 65535.0f)) << 16); } ///////////////____RGB____/////////////// __device__ uint32_t float3_to_rgb888(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); float B = min(1.0f, max(0.0f, c.z)); return (uint32_t(round(R * 255.0f)) << 0) | (uint32_t(round(G * 255.0f)) << 8) | (uint32_t(round(B * 255.0f)) << 16); } __device__ uint32_t float3_to_rgb101210(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); float B = min(1.0f, max(0.0f, c.z)); return (uint32_t(round(R * 1023.0f)) << 0) | (uint32_t(round(G * 4095.0f)) << 10) | (uint32_t(round(B * 1023.0f)) << 22); } __device__ uint32_t float3_to_rgb565(float3 c) { float R = min(1.0f, max(0.0f, c.x)); float G = min(1.0f, max(0.0f, c.y)); float B = min(1.0f, max(0.0f, c.z)); return (uint32_t(round(R * 31.0f)) << 0) | (uint32_t(round(G * 63.0f)) << 5) | (uint32_t(round(B * 31.0f)) << 11); } __device__ uint32_t float3_to_rgbxxx(float3 c, ColorLayout layout) { switch (layout) { case R_4: return float3_to_r4(c); case R_8: return float3_to_r8(c); case R_16: return float3_to_r16(c); case RG_8_8: return float3_to_rg88(c); case RG_16_16: return float3_to_rg1616(c); case RGB_8_8_8: return float3_to_rgb888(c); case RGB_10_12_10: return float3_to_rgb101210(c); case RGB_5_6_5: return float3_to_rgb565(c); default: break; } return 0; } __device__ float3 rgbxxx_to_float3(uint32_t rgb, ColorLayout layout) { switch (layout) { case R_4: return r4_to_float3(rgb); case R_8: return r8_to_float3(rgb); case R_16: return r16_to_float3(rgb); case RG_8_8: return rg88_to_float3(rgb); case RG_16_16: return rg1616_to_float3(rgb); case RGB_8_8_8: return rgb888_to_float3(rgb); case RGB_10_12_10: return rgb101210_to_float3(rgb); case RGB_5_6_5: return rgb565_to_float3(rgb); default: break; } return make_float3(0.f, 0.f, 0.f); } __device__ float3 minmaxCorrectedColor(const float3 &c, ColorLayout layout) { return rgbxxx_to_float3(float3_to_rgbxxx(c, layout), layout); } __device__ float3 minmaxSingleCorrectedColor(const float3 &c, ColorLayout layout) { ColorLayout single_color_layout; switch (layout) { case R_4: single_color_layout = R_8; break; case R_8: single_color_layout = R_16; break; case RG_8_8: single_color_layout = RG_16_16; break; case RGB_5_6_5: single_color_layout = RGB_10_12_10; break; default: single_color_layout = NONE; break; } return minmaxCorrectedColor(c, single_color_layout); } // clang-format on /////////////////////////////////////////////////////////////////////// // Get the "error" between two colors. Should be perceptually sane. /////////////////////////////////////////////////////////////////////// __device__ inline float3 minmax_correctred(const float3 &c) { return rgb888_to_float3(float3_to_rgb888(c)); } __device__ inline float getErrorSquared(const float3 & c1, const float3 & c2, bool minmax_correction) { const float3 err_vec = minmax_correction ? minmax_correctred(c1) - minmax_correctred(c2) : c1 - c2; return err_vec.x * err_vec.x + err_vec.y * err_vec.y + err_vec.z * err_vec.z; }; __device__ inline float getError(const float3 & c1, const float3 & c2, bool minmax_correction) { return sqrt(getErrorSquared(c1, c2, minmax_correction)); }; #define FULL_MASK 0xffffffff __device__ inline float warpMin(float x) { x = min(x, __shfl_down_sync(FULL_MASK, x, 16)); x = min(x, __shfl_down_sync(FULL_MASK, x, 8)); x = min(x, __shfl_down_sync(FULL_MASK, x, 4)); x = min(x, __shfl_down_sync(FULL_MASK, x, 2)); x = min(x, __shfl_down_sync(FULL_MASK, x, 1)); return __shfl_sync(FULL_MASK, x, 0); } __device__ inline float warpMax(float x) { x = max(x, __shfl_down_sync(FULL_MASK, x, 16)); x = max(x, __shfl_down_sync(FULL_MASK, x, 8)); x = max(x, __shfl_down_sync(FULL_MASK, x, 4)); x = max(x, __shfl_down_sync(FULL_MASK, x, 2)); x = max(x, __shfl_down_sync(FULL_MASK, x, 1)); return __shfl_sync(FULL_MASK, x, 0); } __device__ inline float warpSum(float x) { x += __shfl_down_sync(FULL_MASK, x, 16); x += __shfl_down_sync(FULL_MASK, x, 8); x += __shfl_down_sync(FULL_MASK, x, 4); x += __shfl_down_sync(FULL_MASK, x, 2); x += __shfl_down_sync(FULL_MASK, x, 1); return __shfl_sync(FULL_MASK, x, 0); } __device__ inline float3 warpSum(float3 v) { v.x = warpSum(v.x); v.y = warpSum(v.y); v.z = warpSum(v.z); return v; } __device__ inline float3x3 warpSum(float3x3 m) { m.c1 = warpSum(m.c1); m.c2 = warpSum(m.c2); m.c3 = warpSum(m.c3); return m; } template<bool minmaxcorrection, bool laberr> __global__ void scorefunction_gpu_warp( size_t numColors, size_t numBlocks, const float3 * colors, const BlockBuild * blocks, float * scores, uint8_t * weights, float3 * colorRanges, float error_treshold, ColorLayout layout, int K, int * globalJobQueue, bool finalEval = false ) { int laneId = threadIdx.x & 31; int jobId = INT32_MAX; if (laneId == 0) { jobId = atomicSub(globalJobQueue, 1); } jobId = __shfl_sync(FULL_MASK, jobId, 0); while (__any_sync(FULL_MASK, jobId >= 0)) { BlockBuild currblock = blocks[jobId]; BlockBuild nextblock = blocks[jobId + 1]; size_t start = currblock.blockStart; size_t range = currblock.blockLength + (finalEval ? 0 : nextblock.blockLength); // start + range <= numColors range = min(range, numColors - start); float3 minpoint, maxpoint; if (K > 1) { // fit if (range == 1) { minpoint = colors[start]; maxpoint = colors[start]; } else if (range == 2) { minpoint = colors[start]; maxpoint = colors[start + 1]; } else { float3 o, d; // least square fit { o = make_float3(0.0f, 0.0f, 0.0f); float3 error = o; for (size_t i = start + laneId; i < start + range; i += 32) { o = compensatedSum(colors[i], o, error); } o = warpSum(o); o = (1.0f / range) * o; float3 zeros = make_float3(0.0f, 0.0f, 0.0f); float3x3 scatterMatrix = make_float3x3(zeros, zeros, zeros); for (size_t i = start + laneId; i < start + range; i += 32) { float3 relpos = colors[i] - o; float3x3 outerProd = make_float3x3( relpos.x * relpos, relpos.y * relpos, relpos.z * relpos ); scatterMatrix = scatterMatrix + outerProd; } scatterMatrix = warpSum(scatterMatrix); // force dead-code elimination since matrix is symmetric scatterMatrix.c1.y = scatterMatrix.c2.x; scatterMatrix.c1.z = scatterMatrix.c3.x; scatterMatrix.c2.z = scatterMatrix.c3.y; if (trace(scatterMatrix) == 0.f) { d = normalize(o - minmaxCorrectedColor(o, layout)); minpoint = o; maxpoint = o; } else { // power method to find eigenvector of largest eigenvalue unsigned randidx = 0; float3 v = make_float3( rand_lut[randidx++ % 64], rand_lut[randidx++ % 64], rand_lut[randidx++ % 64] ); for (int i = 0; i < 20; i++) { if (length(v) == 0.f) { v = make_float3( rand_lut[randidx++ % 64], rand_lut[randidx++ % 64], rand_lut[randidx++ % 64] ); i = 0; } v = scatterMatrix * normalize(v); } float3 eigenvector = normalize(v); d = eigenvector; } } float mindist = FLT_MAX; float maxdist = -FLT_MAX; for (size_t i = start + laneId; i < start + range; i += 32) { float distance = dot(colors[i] - o, d); mindist = min(mindist, distance); maxdist = max(maxdist, distance); } mindist = warpMin(mindist); maxdist = warpMax(maxdist); minpoint = o + mindist * d; maxpoint = o + maxdist * d; } if (minmaxcorrection) { minpoint = minmaxCorrectedColor(minpoint, layout); maxpoint = minmaxCorrectedColor(maxpoint, layout); } } else { float3 o = make_float3(0.0f, 0.0f, 0.0f); float3 error = o; for (size_t i = start + laneId; i < start + range; i += 32) { o = compensatedSum(colors[i], o, error); } o = warpSum(o); o = (1.0f / range) * o; if (minmaxcorrection) { o = minmaxSingleCorrectedColor(o, layout); } minpoint = o; maxpoint = o; } if (finalEval && (laneId == 0)) { colorRanges[2 * jobId + 0] = minpoint; colorRanges[2 * jobId + 1] = maxpoint; } // ~fit // evaluate bool bEval = true; float mse = 0.0f; if (K > 1) { if (range == 1) { if (getError(minpoint, colors[start], minmaxcorrection) > error_treshold || getError(maxpoint, colors[start], minmaxcorrection) > error_treshold) { bEval = false; } } else if (range == 2) { if (getError(minpoint, colors[start], minmaxcorrection) > error_treshold || getError(maxpoint, colors[start + 1], minmaxcorrection) > error_treshold) { bEval = false; } } else { float msesum = 0.0f; const float3 & A = minpoint; const float3 & B = maxpoint; float colorRangeInvSq = 1.0f / dot(B - A, B - A); for (size_t i = start + laneId; i < start + range; i += 32) { const float3 & p = colors[i]; float distance = 0.0f; if (length(B - A) > (1e-4f)) { distance = colorRangeInvSq * dot(p - A, B - A); } float w = clamp(round(distance * float(K - 1)), 0.0f, float(K - 1)); float3 interpolated_color = A + w / float(K - 1) * (B - A); float error = getError(p, interpolated_color, minmaxcorrection); msesum += getErrorSquared(p, interpolated_color, minmaxcorrection); if (error > error_treshold) { bEval = false; } if (finalEval) { weights[i] = w; } } msesum = warpSum(msesum); // true iff all bEval are true bEval = __all_sync(FULL_MASK, bEval); mse = msesum / float(range * 3); } } else { float msesum = 0.0f; for (size_t i = start + laneId; i < start + range; i += 32) { const float3 & p = colors[i]; float3 interpolated_color = minpoint; float error = getError(p, interpolated_color, minmaxcorrection); msesum += getErrorSquared(p, interpolated_color, minmaxcorrection); if (error > error_treshold) { bEval = false; } } msesum = warpSum(msesum); // true iff all bEval are true bEval = __all_sync(FULL_MASK, bEval); mse = msesum / float(range * 3); } // ~evaluate float score = bEval ? 1.0f / (mse + 1.0f) : -1.0f; if (K > 1 && range == 2 && bEval) { score = 1.0f / (length(colors[start] - colors[start + 1]) + 1.0f); } if (!isfinite(score)) { score = -1.0f; } if (laneId == 0) { scores[jobId] = score; } // fetch next job if (laneId == 0) jobId = atomicSub(globalJobQueue, 1); jobId = __shfl_sync(FULL_MASK, jobId, 0); }// ~while(jobId >= 0) }// ~scorefunction_gpu_warp float3 * g_dev_colors = nullptr; uint8_t * g_dev_weights = nullptr; size_t g_numColors = 0; #include <csignal> void uploadColors(const vector<float3> &colors) { if (g_dev_weights) cudaFree(g_dev_weights); if (g_dev_colors) cudaFree(g_dev_colors); g_numColors = colors.size(); { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err ) { std::fprintf( stderr, "ERROR %s:%d: cuda error \"%s\"\n", __FILE__, __LINE__, cudaGetErrorString(err) ); } } // printf("Allocating %zu weights (%fMB)\n", colors.size(), colors.size() * sizeof(uint8_t) / double(1 << 20)); // printf("Allocating %zu colors (%fMB)\n", colors.size(), colors.size() * sizeof(float3) / double(1 << 20)); // alloc per-color memory cudaMallocManaged(&g_dev_weights, colors.size() * sizeof(uint8_t)); cudaMallocManaged(&g_dev_colors, colors.size() * sizeof(float3)); cudaMemcpy(g_dev_colors, &colors[0], colors.size() * sizeof(float3), cudaMemcpyHostToDevice); cudaError_t err = cudaGetLastError(); if( cudaSuccess != err ) { std::fprintf( stderr, "ERROR %s:%d: cuda error \"%s\"\n", __FILE__, __LINE__, cudaGetErrorString(err) ); } } void scores_gpu( const vector<BlockBuild> &blocks, vector<float> &scores, vector<uint8_t> &weights, vector<float3> &colorRanges, float error_treshold, bool minmaxcorrection, bool laberr, ColorLayout layout, int K, bool finalEval ) { if (g_numColors == 0) { // no colors? nothing more to do. return; } static BlockBuild * pBlocks = nullptr; static float * pScores = nullptr; static float3 * pColorRanges = nullptr; // alloc per-block memory static size_t blockAllocSize = 0; if (blockAllocSize < blocks.size()) { if (pBlocks) cudaFree(pBlocks); if (pScores) cudaFree(pScores); if (pColorRanges) cudaFree(pColorRanges); blockAllocSize = blocks.size(); // printf("Allocating %zu blocks (%fMB)\n", blockAllocSize, blockAllocSize * sizeof(BlockBuild) / double(1 << 20)); // printf("Allocating %zu blocks scores (%fMB)\n", blockAllocSize, blockAllocSize * sizeof(float) / double(1 << 20)); // printf("Allocating %zu blocks ranges (%fMB)\n", blockAllocSize, blockAllocSize * 2 * sizeof(float3) / double(1 << 20)); cudaMallocManaged(&pBlocks, blockAllocSize * sizeof(BlockBuild)); cudaMallocManaged(&pScores, blockAllocSize * sizeof(float)); cudaMallocManaged(&pColorRanges, blockAllocSize * 2 * sizeof(float3)); } // upload blocks cudaMemcpy(pBlocks, &blocks[0], blocks.size() * sizeof(BlockBuild), cudaMemcpyHostToDevice); static int *jobQueue = nullptr; if (!jobQueue) { cudaMalloc(&jobQueue, sizeof(int)); } assert(blocks.size() < std::numeric_limits<int32_t>::max()); int jobs = int(blocks.size()) - 1; // first job is the last valid idx. cudaMemcpy(jobQueue, &jobs, sizeof(int), cudaMemcpyHostToDevice); { dim3 blockDim(128); dim3 gridDim(20 * 16); // reduce register preassure via templates if (minmaxcorrection) { scorefunction_gpu_warp<true, false> << <gridDim, blockDim >> > ( g_numColors, blocks.size(), g_dev_colors, pBlocks, pScores, g_dev_weights, pColorRanges, error_treshold, layout, K, jobQueue, finalEval ); } } scores.resize(blocks.size()); cudaMemcpy(&scores[0], pScores, blocks.size() * sizeof(float), cudaMemcpyDeviceToHost); if (finalEval) { colorRanges.resize(2 * blocks.size()); cudaMemcpy(&colorRanges[0], pColorRanges, blocks.size() * 2 * sizeof(float3), cudaMemcpyDeviceToHost); weights.resize(g_numColors); cudaMemcpy(&weights[0], g_dev_weights, g_numColors * sizeof(uint8_t), cudaMemcpyDeviceToHost); } }
56ab8623eb715516ed0702f18d85df8f400ec186.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction kernels */ /* * Uncomment #defines to compile in each reduce kernel */ //#define REDUCE0 //#define REDUCE1 //#define REDUCE2 //#define REDUCE3 #define REDUCE4 //#define REDUCE5 //#define REDUCE6 #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #include <stdio.h> // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T*() { extern __shared__ int __smem[]; return (T*)__smem; } __device__ inline operator const T*() const { extern __shared__ int __smem[]; return (T*)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double*() { extern __shared__ double __smem_d[]; return (double*)__smem_d; } __device__ inline operator const double*() const { extern __shared__ double __smem_d[]; return (double*)__smem_d; } }; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /* This reduction interleaves which threads are active by using the modulo operator. This operator is very expensive on GPUs, and the interleaved inactivity means that no whole warps are active, which is also very inefficient */ #ifdef REDUCE0 template <class T> __global__ void reduce0(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2*s)) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; __syncthreads(); } #endif /* This version uses contiguous threads, but its interleaved addressing results in many shared memory bank conflicts. */ #ifdef REDUCE1 template <class T> __global__ void reduce1(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version uses sequential addressing -- no divergence or bank conflicts. */ #ifdef REDUCE2 template <class T> __global__ void reduce2(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version uses n/2 threads -- it performs the first level of reduction when reading from global memory. */ #ifdef REDUCE3 template <class T> __global__ void reduce3(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version unrolls the last warp to avoid synchronization where it isn't needed. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ #ifdef REDUCE4 template <class T, unsigned int blockSize> __global__ void reduce4(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T *smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version is completely unrolled. It uses a template parameter to achieve optimal code for any (power of 2) number of threads. This requires a switch statement in the host code to handle all the different thread block sizes at compile time. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ #ifdef REDUCE5 template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ #ifdef REDUCE6 template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif extern "C" bool isPow2(unsigned int x); //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template <class T> void reduce(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch switch (whichKernel) { #ifdef REDUCE0 case 0: hipLaunchKernelGGL(( reduce0<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; #endif #ifdef REDUCE1 case 1: hipLaunchKernelGGL(( reduce1<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; #endif #ifdef REDUCE2 case 2: hipLaunchKernelGGL(( reduce2<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; #endif #ifdef REDUCE3 case 3: hipLaunchKernelGGL(( reduce3<T>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; #endif #ifdef REDUCE4 case 4: switch (threads) { case 512: //reduce4<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce4<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce4<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: //reduce4<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce4<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce4<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce4<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce4<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce4<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce4<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; ; } break; #endif #ifdef REDUCE5 case 5: switch (threads) { case 512: //reduce5<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce5<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce5<T, 128>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: //reduce5<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce5<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce5<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce5<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce5<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce5<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce5<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; ; } break; #endif #ifdef REDUCE6 case 6: default: if (isPow2(size)) { switch (threads) { case 512: //reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reduce6<T, 128, true>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_idata, d_odata, size); break; case 64: //reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; ; } } else { switch (threads) { case 512: //reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: //reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: //reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } break; #endif } } /* // Instantiate the reduction function for 3 types template void reduce<int>(int size, int threads, int blocks, int whichKernel, int *d_idata, int *d_odata); template void reduce<float>(int size, int threads, int blocks, int whichKernel, float *d_idata, float *d_odata); */ template void reduce<double>(int size, int threads, int blocks, int whichKernel, double *d_idata, double *d_odata); #endif // #ifndef _REDUCE_KERNEL_H_
56ab8623eb715516ed0702f18d85df8f400ec186.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction kernels */ /* * Uncomment #defines to compile in each reduce kernel */ //#define REDUCE0 //#define REDUCE1 //#define REDUCE2 //#define REDUCE3 #define REDUCE4 //#define REDUCE5 //#define REDUCE6 #ifndef _REDUCE_KERNEL_H_ #define _REDUCE_KERNEL_H_ #include <stdio.h> // Utility class used to avoid linker errors with extern // unsized shared memory arrays with templated type template<class T> struct SharedMemory { __device__ inline operator T*() { extern __shared__ int __smem[]; return (T*)__smem; } __device__ inline operator const T*() const { extern __shared__ int __smem[]; return (T*)__smem; } }; // specialize for double to avoid unaligned memory // access compile errors template<> struct SharedMemory<double> { __device__ inline operator double*() { extern __shared__ double __smem_d[]; return (double*)__smem_d; } __device__ inline operator const double*() const { extern __shared__ double __smem_d[]; return (double*)__smem_d; } }; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /* This reduction interleaves which threads are active by using the modulo operator. This operator is very expensive on GPUs, and the interleaved inactivity means that no whole warps are active, which is also very inefficient */ #ifdef REDUCE0 template <class T> __global__ void reduce0(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2*s)) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; __syncthreads(); } #endif /* This version uses contiguous threads, but its interleaved addressing results in many shared memory bank conflicts. */ #ifdef REDUCE1 template <class T> __global__ void reduce1(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version uses sequential addressing -- no divergence or bank conflicts. */ #ifdef REDUCE2 template <class T> __global__ void reduce2(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version uses n/2 threads -- it performs the first level of reduction when reading from global memory. */ #ifdef REDUCE3 template <class T> __global__ void reduce3(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version unrolls the last warp to avoid synchronization where it isn't needed. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ #ifdef REDUCE4 template <class T, unsigned int blockSize> __global__ void reduce4(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>32; s>>=1) { if (tid < s) { sdata[tid] = mySum = mySum + sdata[tid + s]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T *smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version is completely unrolled. It uses a template parameter to achieve optimal code for any (power of 2) number of threads. This requires a switch statement in the host code to handle all the different thread block sizes at compile time. Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ #ifdef REDUCE5 template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif /* This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) Note, this kernel needs a minimum of 64*sizeof(T) bytes of shared memory. In other words if blockSize <= 32, allocate 64*sizeof(T) bytes. If blockSize > 32, allocate blockSize*sizeof(T) bytes. */ #ifdef REDUCE6 template <class T, unsigned int blockSize, bool nIsPow2> __global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } #endif extern "C" bool isPow2(unsigned int x); //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// template <class T> void reduce(int size, int threads, int blocks, int whichKernel, T *d_idata, T *d_odata) { dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); // when there is only one warp per block, we need to allocate two warps // worth of shared memory so that we don't index shared memory out of bounds int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T); // choose which of the optimized versions of reduction to launch switch (whichKernel) { #ifdef REDUCE0 case 0: reduce0<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; #endif #ifdef REDUCE1 case 1: reduce1<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; #endif #ifdef REDUCE2 case 2: reduce2<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; #endif #ifdef REDUCE3 case 3: reduce3<T><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; #endif #ifdef REDUCE4 case 4: switch (threads) { case 512: //reduce4<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce4<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce4<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: //reduce4<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce4<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce4<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce4<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce4<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce4<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce4<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; ; } break; #endif #ifdef REDUCE5 case 5: switch (threads) { case 512: //reduce5<T, 512><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce5<T, 256><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce5<T, 128><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: //reduce5<T, 64><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce5<T, 32><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce5<T, 16><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce5<T, 8><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce5<T, 4><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce5<T, 2><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce5<T, 1><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; ; } break; #endif #ifdef REDUCE6 case 6: default: if (isPow2(size)) { switch (threads) { case 512: //reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: //reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; ; } } else { switch (threads) { case 512: //reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 256: //reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 128: //reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 64: //reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 32: //reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 16: //reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 8: //reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 4: //reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 2: //reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; case 1: //reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size); break; } } break; #endif } } /* // Instantiate the reduction function for 3 types template void reduce<int>(int size, int threads, int blocks, int whichKernel, int *d_idata, int *d_odata); template void reduce<float>(int size, int threads, int blocks, int whichKernel, float *d_idata, float *d_odata); */ template void reduce<double>(int size, int threads, int blocks, int whichKernel, double *d_idata, double *d_odata); #endif // #ifndef _REDUCE_KERNEL_H_
efc9d3e95dea8333851a9395e3dd9e59b5782210.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <sstream> #include <stdlib.h> #include <string.h> #include <iostream> #include <fstream> #include <assert.h> #include "tuningParameters.h" #include "qtclib.h" #include "OptionParser.h" #include "ResultDatabase.h" #include "Timer.h" #include "support.h" #include "libdata.h" #include "cudacommon.h" #define _USE_MATH_DEFINES #include <float.h> #include <hip/hip_runtime.h> #include "PMSMemMgmt.h" #include "comm.h" texture<float, 2, hipReadModeElementType> texDistance; using namespace std; #include "kernels_common.h" #include "kernels_full_storage.h" #include "kernels_compact_storage.h" // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. The user is allowed to specify // the size of the input data in megabytes if they are not using a // predefined size (i.e. the -s option). // // Arguments: // op: the options parser / parameter database // // Programmer: Anthony Danalis // Creation: February 04, 2011 // Returns: nothing // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op){ op.addOption("PointCount", OPT_INT, "4096", "point count"); op.addOption("DataFile", OPT_STRING, "///", "BLAST data input file name"); op.addOption("Threshold", OPT_FLOAT, "1", "cluster diameter threshold"); op.addOption("SaveOutput", OPT_BOOL, "", "BLAST data input file name"); op.addOption("Verbose", OPT_BOOL, "", "Print cluster cardinalities"); op.addOption("TextureMem", OPT_BOOL, "0", "Use Texture memory for distance matrix"); op.addOption("CompactStorage", OPT_BOOL, "0", "Use compact storage distance matrix regardless of problem size"); } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Calls single precision and, if viable, double precision QT-Clustering // benchmark. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, ResultDatabase &resultDB, OptionParser& op); void RunBenchmark(ResultDatabase &resultDB, OptionParser &op){ // Test to see if this device supports double precision hipGetDevice(&qtcDevice); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, qtcDevice); runTest("QTC", resultDB, op); } // **************************************************************************** // Function: calculate_participants // // Purpose: // This function decides how many GPUs (up to the maximum requested by the user) // and threadblocks per GPU will be used. It also returns the total number of // thread-blocks across all GPUs and the number of thread-blocks that are in nodes // before the current one. // In the future, the behavior of this function should be decided based on // auto-tuning instead of arbitrary decisions. // // Arguments: // The number of nodes requested by the user and the four // variables that the function computes (passed by reference) // // // Returns: nothing // // Programmer: Anthony Danalis // Creation: May 25, 2011 // // **************************************************************************** void calculate_participants(int point_count, int node_count, int cwrank, int *thread_block_count, int *total_thread_block_count, int *active_node_count){ int ac_nd_cnt, thr_blc_cnt, total_thr_blc_cnt; ac_nd_cnt = node_count; if( point_count <= (node_count-1) * SM_COUNT * GPU_MIN_SATURATION_FACTOR ){ int K = SM_COUNT * GPU_MIN_SATURATION_FACTOR; ac_nd_cnt = (point_count+K-1) / K; } if( point_count >= ac_nd_cnt * SM_COUNT * OVR_SBSCR_FACTOR ){ thr_blc_cnt = SM_COUNT * OVR_SBSCR_FACTOR; total_thr_blc_cnt = thr_blc_cnt * ac_nd_cnt; }else{ thr_blc_cnt = point_count/ac_nd_cnt; if( cwrank < point_count%ac_nd_cnt ){ thr_blc_cnt++; } total_thr_blc_cnt = point_count; } *active_node_count = ac_nd_cnt; *thread_block_count = thr_blc_cnt; *total_thread_block_count = total_thr_blc_cnt; return; } unsigned long int estimate_memory_for_full_storage(unsigned long int pnt_cnt, float d){ unsigned long total, thread_block_count, max_degree; float density; thread_block_count = (unsigned long int)SM_COUNT * OVR_SBSCR_FACTOR; // The density calculations assume that we are dealing with generated Euclidean points // (as opposed to externally provided scientific data) that are constraint in a 20x20 2D square. density = 3.14159*(d*d)/(20.0*20.0); if(density > 1.0 ) density = 1.0; max_degree = (unsigned long int)((float)pnt_cnt*density); // average number of points in a cirlce with radius d. max_degree *= 10; // The distribution of points is not uniform, so throw in a factor of 10 for max/average. if( max_degree > pnt_cnt ) max_degree = pnt_cnt; // Due to the point generation algorithm, a cluster can have up to N/30 elements in an arbitratiry small radius. if( max_degree < pnt_cnt/30 ) max_degree = pnt_cnt/30; total = 0; total += pnt_cnt*pnt_cnt*sizeof(float); // Sparse distance matrix total += pnt_cnt*max_degree*sizeof(int); // Indirection matrix total += pnt_cnt*thread_block_count*sizeof(char); // Current candidate cluster mask total += pnt_cnt*sizeof(int); // Ungrouped elements indirection vector total += pnt_cnt*sizeof(int); // Degrees vector total += pnt_cnt*sizeof(int); // Result return total; } void findMemCharacteristics(unsigned long int *gmem, unsigned long int *text){ int device; hipDeviceProp_t deviceProp; hipGetDevice(&device); CHECK_CUDA_ERROR(); hipGetDeviceProperties(&deviceProp, device); CHECK_CUDA_ERROR(); *gmem = (unsigned long int)(0.75*(float)deviceProp.totalGlobalMem); *text = (unsigned long int)deviceProp.maxTexture2D[1]; return; } // **************************************************************************** // Function: runTest // // Purpose: // This benchmark measures the performance of applying QT-clustering on // single precision data. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, ResultDatabase &resultDB, OptionParser& op) { unsigned long int point_count, max_avail_memory, max_texture_dimension, needed_mem; int def_size = -1, matrix_type = 0x0; float threshold; bool use_texture = true, use_compact_storage = false; def_size = op.getOptionInt("size"); point_count = op.getOptionInt("PointCount"); threshold = op.getOptionFloat("Threshold"); use_texture = op.getOptionBool("TextureMem"); use_compact_storage = op.getOptionBool("CompactStorage"); if( use_compact_storage ){ use_texture = false; } switch( def_size ){ case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; use_texture = false; use_compact_storage = false; break; case 2: point_count = 8*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 3: point_count = 16*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 4: point_count = 16*1024; threshold = 4; use_texture = true; use_compact_storage = false; break; case 5: point_count = 26*1024; threshold = 1; use_texture = false; use_compact_storage = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } if( 0 == comm_get_rank() ){ // Make a reasonable estimate of the actual memory I can allocate // as well as the max texture size. findMemCharacteristics(&max_avail_memory, &max_texture_dimension); needed_mem = estimate_memory_for_full_storage(point_count, threshold); // see if we can fit the distance matrix in texture memory if( (point_count >= max_texture_dimension) || !use_texture ){ printf("Using global memory for distance matrix\n"); matrix_type |= GLOBAL_MEMORY; }else{ printf("Using texture memory for distance matrix\n"); matrix_type |= TEXTUR_MEMORY; } // find out what type of distance matrix we will be using. if( (max_avail_memory > needed_mem) && !use_compact_storage ){ printf("Using full storage distance matrix algorithm\n"); matrix_type |= FULL_STORAGE_MATRIX; }else{ printf("Using compact storage distance matrix algorithm\n"); matrix_type |= COMPACT_STORAGE_MATRIX; } } comm_broadcast ( &matrix_type, 1, COMM_TYPE_INT, 0); QTC(name, resultDB, op, matrix_type); } //////////////////////////////////////////////////////////////////////////////// // void QTC(const string& name, ResultDatabase &resultDB, OptionParser& op, int matrix_type){ ofstream debug_out, seeds_out; void *Ai_mask, *cardnl, *ungrpd_pnts_indr, *clustered_pnts_mask, *result, *dist_to_clust; void *indr_mtrx, *degrees; int *indr_mtrx_host, *ungrpd_pnts_indr_host, *cardinalities, *output; bool save_clusters, be_verbose, can_use_texture, synthetic_data; hipArray *distance_matrix_txt; void *distance_matrix_gmem, *distance_matrix; float *dist_source, *pnts; float threshold; int i, max_degree, thread_block_count, total_thread_block_count, active_node_count; int cwrank=0, node_count=1, tpb, max_card, iter=0; double t_krn=0, t_comm=0, t_trim=0, t_updt=0, t_redc=0, t_sync=0; unsigned long int dst_matrix_elems, point_count, max_point_count; string fname; point_count = op.getOptionInt("PointCount"); threshold = op.getOptionFloat("Threshold"); save_clusters = op.getOptionBool("SaveOutput"); be_verbose = op.getOptionBool("Verbose"); fname = op.getOptionString("DataFile"); if( fname.compare("///") == 0 ){ synthetic_data = true; }else{ synthetic_data = false; save_clusters = false; } can_use_texture = !!(matrix_type & TEXTUR_MEMORY); // TODO - only deal with this size-switch once int def_size = op.getOptionInt("size"); switch( def_size ) { case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 2: point_count = 8*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 3: point_count = 16*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 4: point_count = 16*1024; threshold = 4; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 5: point_count = 26*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } cwrank = comm_get_rank(); node_count = comm_get_size(); if( cwrank == 0 ){ if( synthetic_data ) pnts = generate_synthetic_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, point_count, matrix_type); else (void)read_BLAST_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, fname.c_str(), point_count, matrix_type); } comm_broadcast ( &point_count, 1, COMM_TYPE_INT, 0); comm_broadcast ( &max_degree, 1, COMM_TYPE_INT, 0); if( matrix_type & FULL_STORAGE_MATRIX ){ dst_matrix_elems = point_count*point_count; }else{ dst_matrix_elems = point_count*max_degree; } if( cwrank != 0 ){ // For all nodes except zero, in a distributed run. dist_source = pmsAllocHostBuffer<float>( dst_matrix_elems ); indr_mtrx_host = pmsAllocHostBuffer<int>( point_count*max_degree ); } // If we need to print the actual clusters later on, we'll need to have all points in all nodes. if( save_clusters ){ if( cwrank != 0 ){ pnts = (float *)malloc( 2*point_count*sizeof(float) ); } comm_broadcast ( pnts, 2*point_count, COMM_TYPE_FLOAT, 0); } comm_broadcast ( dist_source, dst_matrix_elems, COMM_TYPE_FLOAT, 0); comm_broadcast ( indr_mtrx_host, point_count*max_degree, COMM_TYPE_INT, 0); assert( max_degree > 0 ); init(op); calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); ungrpd_pnts_indr_host = pmsAllocHostBuffer<int>( point_count ); for(int i=0; i<point_count; i++){ ungrpd_pnts_indr_host[i] = i; } cardinalities = pmsAllocHostBuffer<int>(2); output = pmsAllocHostBuffer<int>(max_degree); if( can_use_texture ){ texDistance.addressMode[0] = hipAddressModeClamp; texDistance.addressMode[1] = hipAddressModeClamp; texDistance.filterMode = hipFilterModePoint; texDistance.normalized = false; // do not normalize coordinates // This is the actual distance matrix (dst_matrix_elems should be "point_count^2, or point_count*max_degree) printf("Allocating: %luMB (%lux%lux%lu) bytes in texture memory\n", dst_matrix_elems*sizeof(float)/(1024*1024), dst_matrix_elems/point_count, point_count, (long unsigned int)sizeof(float)); hipMallocArray(&distance_matrix_txt, &texDistance.channelDesc, dst_matrix_elems/point_count, point_count); }else{ allocDeviceBuffer(&distance_matrix_gmem, dst_matrix_elems*sizeof(float)); } CHECK_CUDA_ERROR(); // This is the N*Delta indirection matrix allocDeviceBuffer(&indr_mtrx, point_count*max_degree*sizeof(int)); allocDeviceBuffer(&degrees, point_count*sizeof(int)); allocDeviceBuffer(&ungrpd_pnts_indr, point_count*sizeof(int)); allocDeviceBuffer(&Ai_mask, thread_block_count*point_count*sizeof(char)); allocDeviceBuffer(&dist_to_clust, thread_block_count*max_degree*sizeof(float)); allocDeviceBuffer(&clustered_pnts_mask, point_count*sizeof(char)); allocDeviceBuffer(&cardnl, thread_block_count*2*sizeof(int)); allocDeviceBuffer(&result, point_count*sizeof(int)); // Copy to device, and record transfer time int pcie_TH = Timer::Start(); if( can_use_texture ){ hipMemcpyToArray(distance_matrix_txt, 0, 0, dist_source, dst_matrix_elems*sizeof(float), hipMemcpyHostToDevice); CHECK_CUDA_ERROR(); hipBindTextureToArray(texDistance, distance_matrix_txt); }else{ copyToDevice(distance_matrix_gmem, dist_source, dst_matrix_elems*sizeof(float)); } copyToDevice(indr_mtrx, indr_mtrx_host, point_count*max_degree*sizeof(int)); copyToDevice(ungrpd_pnts_indr, ungrpd_pnts_indr_host, point_count*sizeof(int)); hipMemset(clustered_pnts_mask, 0, point_count*sizeof(char)); hipMemset(dist_to_clust, 0, max_degree*thread_block_count*sizeof(float)); double transfer_time = Timer::Stop(pcie_TH, "PCIe Transfer Time"); tpb = ( point_count > THREADSPERBLOCK )? THREADSPERBLOCK : point_count; hipLaunchKernelGGL(( compute_degrees), dim3(grid2D(thread_block_count)), dim3(tpb), 0, 0, (int *)indr_mtrx, (int *)degrees, point_count, max_degree); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); const char *sizeStr; stringstream ss; ss << "PointCount=" << (long)point_count; sizeStr = strdup(ss.str().c_str()); if( 0 == cwrank ){ if( save_clusters ){ debug_out.open("p"); for(i=0; i<point_count; i++){ debug_out << pnts[2*i] << " " << pnts[2*i+1] << endl; } debug_out.close(); seeds_out.open("p_seeds"); } cout << "\nInitial ThreadBlockCount: " << thread_block_count; cout << " PointCount: " << point_count; cout << " Max degree: " << max_degree << "\n" << endl; cout.flush(); } max_point_count = point_count; tpb = THREADSPERBLOCK; if( can_use_texture ){ distance_matrix = distance_matrix_txt; }else{ distance_matrix = distance_matrix_gmem; } ////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////// // // Kernel execution int TH = Timer::Start(); do{ stringstream ss; int winner_node=-1; int winner_index=-1; bool this_node_participates = true; ++iter; calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); // If there are only a few elements left to cluster, reduce the number of participating nodes (GPUs). if( cwrank >= active_node_count ){ this_node_participates = false; } comm_update_communicator(cwrank, active_node_count); if( !this_node_participates ) break; cwrank = comm_get_rank(); dim3 grid = grid2D(thread_block_count); int Tkernel = Timer::Start(); //////////////////////////////////////////////////////////////////////////////////////////////// ///////// ----------------- Main kernel ----------------- ///////// hipLaunchKernelGGL(( QTC_device), dim3(grid), dim3(tpb), 0, 0, (float*)distance_matrix, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (int *)ungrpd_pnts_indr, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, cwrank, active_node_count, total_thread_block_count, matrix_type, can_use_texture); ///////// ----------------- Main kernel ----------------- ///////// //////////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); CHECK_CUDA_ERROR(); t_krn += Timer::Stop(Tkernel, "Kernel Only"); int Tredc = Timer::Start(); if( thread_block_count > 1 ){ // We are reducing 128 numbers or less, so one thread should be sufficient. hipLaunchKernelGGL(( reduce_card_device), dim3(grid2D(1)), dim3(1), 0, 0, (int *)cardnl, thread_block_count); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); } copyFromDevice( cardinalities, cardnl, 2*sizeof(int) ); max_card = cardinalities[0]; winner_index = cardinalities[1]; t_redc += Timer::Stop(Tredc, "Reduce Only"); int Tsync = Timer::Start(); comm_barrier(); t_sync += Timer::Stop(Tsync, "Sync Only"); int Tcomm = Timer::Start(); comm_find_winner(&max_card, &winner_node, &winner_index, cwrank, max_point_count+1); t_comm += Timer::Stop(Tcomm, "Comm Only"); if( be_verbose && cwrank == winner_node){ // for non-parallel cases, both "cwrank" and "winner_node" should be zero. cout << "[" << cwrank << "] Cluster Cardinality: " << max_card << " (Node: " << cwrank << ", index: " << winner_index << ")" << endl; } int Ttrim = Timer::Start(); hipLaunchKernelGGL(( trim_ungrouped_pnts_indr_array), dim3(grid2D(1)), dim3(tpb), 0, 0, winner_index, (int*)ungrpd_pnts_indr, (float*)distance_matrix, (int *)result, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, matrix_type, can_use_texture ); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); t_trim += Timer::Stop(Ttrim, "Trim Only"); if( cwrank == winner_node){ // for non-parallel cases, these should both be zero. if( save_clusters ){ ss << "p." << iter; debug_out.open(ss.str().c_str()); } copyFromDevice(output, (void *)result, max_card*sizeof(int) ); if( save_clusters ){ for(int i=0; i<max_card; i++){ debug_out << pnts[2*output[i]] << " " << pnts[2*output[i]+1] << endl; } seeds_out << pnts[2*winner_index] << " " << pnts[2*winner_index+1] << endl; debug_out.close(); } } int Tupdt = Timer::Start(); hipLaunchKernelGGL(( update_clustered_pnts_mask), dim3(grid2D(1)), dim3(tpb), 0, 0, (char *)clustered_pnts_mask, (char *)Ai_mask, max_point_count); hipDeviceSynchronize(); CHECK_CUDA_ERROR(); t_updt += Timer::Stop(Tupdt, "Update Only"); point_count -= max_card; }while( max_card > 1 && point_count ); double t = Timer::Stop(TH, "QT_Clustering"); if( save_clusters ){ seeds_out.close(); } // //////////////////////////////////////////////////////////////////////////////// if( cwrank == 0){ cout << "Cluster count: " << iter << endl; cout.flush(); } resultDB.AddResult(name+"_Synchron.", sizeStr, "s", t_sync); resultDB.AddResult(name+"_Communic.", sizeStr, "s", t_comm); resultDB.AddResult(name+"_Kernel", sizeStr, "s", t_krn); resultDB.AddResult(name+"_Trimming", sizeStr, "s", t_trim); resultDB.AddResult(name+"_Update", sizeStr, "s", t_updt); resultDB.AddResult(name+"_Reduction", sizeStr, "s", t_redc); resultDB.AddResult(name+"_Algorithm", sizeStr, "s", t); resultDB.AddResult(name+"+PCI_Trans.", sizeStr, "s", t+transfer_time); pmsFreeHostBuffer(dist_source); pmsFreeHostBuffer(indr_mtrx_host); if( can_use_texture ){ hipFreeArray(distance_matrix_txt); hipUnbindTexture(texDistance); }else{ freeDeviceBuffer(distance_matrix_gmem); } CHECK_CUDA_ERROR(); freeDeviceBuffer(indr_mtrx); freeDeviceBuffer(Ai_mask); freeDeviceBuffer(cardnl); freeDeviceBuffer(result); pmsFreeHostBuffer(output); return; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// int qtcDevice = -1; void init(OptionParser& op) { if (qtcDevice == -1) { if (op.getOptionVecInt("device").size() > 0) { qtcDevice = op.getOptionVecInt("device")[0]; } else { qtcDevice = 0; } hipSetDevice(qtcDevice); hipGetDevice(&qtcDevice); } } void allocDeviceBuffer(void** bufferp, unsigned long bytes) { hipMalloc(bufferp, bytes); CHECK_CUDA_ERROR(); } void freeDeviceBuffer(void* buffer) { hipFree(buffer); } void copyToDevice(void* to_device, void* from_host, unsigned long bytes) { hipMemcpy(to_device, from_host, bytes, hipMemcpyHostToDevice); CHECK_CUDA_ERROR(); } void copyFromDevice(void* to_host, void* from_device, unsigned long bytes) { hipMemcpy(to_host, from_device, bytes, hipMemcpyDeviceToHost); CHECK_CUDA_ERROR(); }
efc9d3e95dea8333851a9395e3dd9e59b5782210.cu
#include <math.h> #include <sstream> #include <stdlib.h> #include <string.h> #include <iostream> #include <fstream> #include <assert.h> #include "tuningParameters.h" #include "qtclib.h" #include "OptionParser.h" #include "ResultDatabase.h" #include "Timer.h" #include "support.h" #include "libdata.h" #include "cudacommon.h" #define _USE_MATH_DEFINES #include <float.h> #include <cuda_runtime.h> #include "PMSMemMgmt.h" #include "comm.h" texture<float, 2, cudaReadModeElementType> texDistance; using namespace std; #include "kernels_common.h" #include "kernels_full_storage.h" #include "kernels_compact_storage.h" // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing. The user is allowed to specify // the size of the input data in megabytes if they are not using a // predefined size (i.e. the -s option). // // Arguments: // op: the options parser / parameter database // // Programmer: Anthony Danalis // Creation: February 04, 2011 // Returns: nothing // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op){ op.addOption("PointCount", OPT_INT, "4096", "point count"); op.addOption("DataFile", OPT_STRING, "///", "BLAST data input file name"); op.addOption("Threshold", OPT_FLOAT, "1", "cluster diameter threshold"); op.addOption("SaveOutput", OPT_BOOL, "", "BLAST data input file name"); op.addOption("Verbose", OPT_BOOL, "", "Print cluster cardinalities"); op.addOption("TextureMem", OPT_BOOL, "0", "Use Texture memory for distance matrix"); op.addOption("CompactStorage", OPT_BOOL, "0", "Use compact storage distance matrix regardless of problem size"); } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Calls single precision and, if viable, double precision QT-Clustering // benchmark. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, ResultDatabase &resultDB, OptionParser& op); void RunBenchmark(ResultDatabase &resultDB, OptionParser &op){ // Test to see if this device supports double precision cudaGetDevice(&qtcDevice); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, qtcDevice); runTest("QTC", resultDB, op); } // **************************************************************************** // Function: calculate_participants // // Purpose: // This function decides how many GPUs (up to the maximum requested by the user) // and threadblocks per GPU will be used. It also returns the total number of // thread-blocks across all GPUs and the number of thread-blocks that are in nodes // before the current one. // In the future, the behavior of this function should be decided based on // auto-tuning instead of arbitrary decisions. // // Arguments: // The number of nodes requested by the user and the four // variables that the function computes (passed by reference) // // // Returns: nothing // // Programmer: Anthony Danalis // Creation: May 25, 2011 // // **************************************************************************** void calculate_participants(int point_count, int node_count, int cwrank, int *thread_block_count, int *total_thread_block_count, int *active_node_count){ int ac_nd_cnt, thr_blc_cnt, total_thr_blc_cnt; ac_nd_cnt = node_count; if( point_count <= (node_count-1) * SM_COUNT * GPU_MIN_SATURATION_FACTOR ){ int K = SM_COUNT * GPU_MIN_SATURATION_FACTOR; ac_nd_cnt = (point_count+K-1) / K; } if( point_count >= ac_nd_cnt * SM_COUNT * OVR_SBSCR_FACTOR ){ thr_blc_cnt = SM_COUNT * OVR_SBSCR_FACTOR; total_thr_blc_cnt = thr_blc_cnt * ac_nd_cnt; }else{ thr_blc_cnt = point_count/ac_nd_cnt; if( cwrank < point_count%ac_nd_cnt ){ thr_blc_cnt++; } total_thr_blc_cnt = point_count; } *active_node_count = ac_nd_cnt; *thread_block_count = thr_blc_cnt; *total_thread_block_count = total_thr_blc_cnt; return; } unsigned long int estimate_memory_for_full_storage(unsigned long int pnt_cnt, float d){ unsigned long total, thread_block_count, max_degree; float density; thread_block_count = (unsigned long int)SM_COUNT * OVR_SBSCR_FACTOR; // The density calculations assume that we are dealing with generated Euclidean points // (as opposed to externally provided scientific data) that are constraint in a 20x20 2D square. density = 3.14159*(d*d)/(20.0*20.0); if(density > 1.0 ) density = 1.0; max_degree = (unsigned long int)((float)pnt_cnt*density); // average number of points in a cirlce with radius d. max_degree *= 10; // The distribution of points is not uniform, so throw in a factor of 10 for max/average. if( max_degree > pnt_cnt ) max_degree = pnt_cnt; // Due to the point generation algorithm, a cluster can have up to N/30 elements in an arbitratiry small radius. if( max_degree < pnt_cnt/30 ) max_degree = pnt_cnt/30; total = 0; total += pnt_cnt*pnt_cnt*sizeof(float); // Sparse distance matrix total += pnt_cnt*max_degree*sizeof(int); // Indirection matrix total += pnt_cnt*thread_block_count*sizeof(char); // Current candidate cluster mask total += pnt_cnt*sizeof(int); // Ungrouped elements indirection vector total += pnt_cnt*sizeof(int); // Degrees vector total += pnt_cnt*sizeof(int); // Result return total; } void findMemCharacteristics(unsigned long int *gmem, unsigned long int *text){ int device; cudaDeviceProp deviceProp; cudaGetDevice(&device); CHECK_CUDA_ERROR(); cudaGetDeviceProperties(&deviceProp, device); CHECK_CUDA_ERROR(); *gmem = (unsigned long int)(0.75*(float)deviceProp.totalGlobalMem); *text = (unsigned long int)deviceProp.maxTexture2D[1]; return; } // **************************************************************************** // Function: runTest // // Purpose: // This benchmark measures the performance of applying QT-clustering on // single precision data. // // Arguments: // resultDB: the benchmark stores its results in this ResultDatabase // op: the options parser / parameter database // // Returns: nothing // // Programmer: Anthony Danalis // Creation: February 04, 2011 // // **************************************************************************** void runTest(const string& name, ResultDatabase &resultDB, OptionParser& op) { unsigned long int point_count, max_avail_memory, max_texture_dimension, needed_mem; int def_size = -1, matrix_type = 0x0; float threshold; bool use_texture = true, use_compact_storage = false; def_size = op.getOptionInt("size"); point_count = op.getOptionInt("PointCount"); threshold = op.getOptionFloat("Threshold"); use_texture = op.getOptionBool("TextureMem"); use_compact_storage = op.getOptionBool("CompactStorage"); if( use_compact_storage ){ use_texture = false; } switch( def_size ){ case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; use_texture = false; use_compact_storage = false; break; case 2: point_count = 8*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 3: point_count = 16*1024; threshold = 1; use_texture = true; use_compact_storage = false; break; case 4: point_count = 16*1024; threshold = 4; use_texture = true; use_compact_storage = false; break; case 5: point_count = 26*1024; threshold = 1; use_texture = false; use_compact_storage = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } if( 0 == comm_get_rank() ){ // Make a reasonable estimate of the actual memory I can allocate // as well as the max texture size. findMemCharacteristics(&max_avail_memory, &max_texture_dimension); needed_mem = estimate_memory_for_full_storage(point_count, threshold); // see if we can fit the distance matrix in texture memory if( (point_count >= max_texture_dimension) || !use_texture ){ printf("Using global memory for distance matrix\n"); matrix_type |= GLOBAL_MEMORY; }else{ printf("Using texture memory for distance matrix\n"); matrix_type |= TEXTUR_MEMORY; } // find out what type of distance matrix we will be using. if( (max_avail_memory > needed_mem) && !use_compact_storage ){ printf("Using full storage distance matrix algorithm\n"); matrix_type |= FULL_STORAGE_MATRIX; }else{ printf("Using compact storage distance matrix algorithm\n"); matrix_type |= COMPACT_STORAGE_MATRIX; } } comm_broadcast ( &matrix_type, 1, COMM_TYPE_INT, 0); QTC(name, resultDB, op, matrix_type); } //////////////////////////////////////////////////////////////////////////////// // void QTC(const string& name, ResultDatabase &resultDB, OptionParser& op, int matrix_type){ ofstream debug_out, seeds_out; void *Ai_mask, *cardnl, *ungrpd_pnts_indr, *clustered_pnts_mask, *result, *dist_to_clust; void *indr_mtrx, *degrees; int *indr_mtrx_host, *ungrpd_pnts_indr_host, *cardinalities, *output; bool save_clusters, be_verbose, can_use_texture, synthetic_data; cudaArray *distance_matrix_txt; void *distance_matrix_gmem, *distance_matrix; float *dist_source, *pnts; float threshold; int i, max_degree, thread_block_count, total_thread_block_count, active_node_count; int cwrank=0, node_count=1, tpb, max_card, iter=0; double t_krn=0, t_comm=0, t_trim=0, t_updt=0, t_redc=0, t_sync=0; unsigned long int dst_matrix_elems, point_count, max_point_count; string fname; point_count = op.getOptionInt("PointCount"); threshold = op.getOptionFloat("Threshold"); save_clusters = op.getOptionBool("SaveOutput"); be_verbose = op.getOptionBool("Verbose"); fname = op.getOptionString("DataFile"); if( fname.compare("///") == 0 ){ synthetic_data = true; }else{ synthetic_data = false; save_clusters = false; } can_use_texture = !!(matrix_type & TEXTUR_MEMORY); // TODO - only deal with this size-switch once int def_size = op.getOptionInt("size"); switch( def_size ) { case 1: // size == 1 should match default values of PointCount, // Threshold, TextureMem, and CompactStorage parameters. // (i.e., -s 1 is the default) point_count = 4*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 2: point_count = 8*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 3: point_count = 16*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 4: point_count = 16*1024; threshold = 4; save_clusters = false; be_verbose = false; synthetic_data = true; break; case 5: point_count = 26*1024; threshold = 1; save_clusters = false; be_verbose = false; synthetic_data = true; break; default: fprintf( stderr, "unsupported size %d given; terminating\n", def_size ); return; } cwrank = comm_get_rank(); node_count = comm_get_size(); if( cwrank == 0 ){ if( synthetic_data ) pnts = generate_synthetic_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, point_count, matrix_type); else (void)read_BLAST_data(&dist_source, &indr_mtrx_host, &max_degree, threshold, fname.c_str(), point_count, matrix_type); } comm_broadcast ( &point_count, 1, COMM_TYPE_INT, 0); comm_broadcast ( &max_degree, 1, COMM_TYPE_INT, 0); if( matrix_type & FULL_STORAGE_MATRIX ){ dst_matrix_elems = point_count*point_count; }else{ dst_matrix_elems = point_count*max_degree; } if( cwrank != 0 ){ // For all nodes except zero, in a distributed run. dist_source = pmsAllocHostBuffer<float>( dst_matrix_elems ); indr_mtrx_host = pmsAllocHostBuffer<int>( point_count*max_degree ); } // If we need to print the actual clusters later on, we'll need to have all points in all nodes. if( save_clusters ){ if( cwrank != 0 ){ pnts = (float *)malloc( 2*point_count*sizeof(float) ); } comm_broadcast ( pnts, 2*point_count, COMM_TYPE_FLOAT, 0); } comm_broadcast ( dist_source, dst_matrix_elems, COMM_TYPE_FLOAT, 0); comm_broadcast ( indr_mtrx_host, point_count*max_degree, COMM_TYPE_INT, 0); assert( max_degree > 0 ); init(op); calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); ungrpd_pnts_indr_host = pmsAllocHostBuffer<int>( point_count ); for(int i=0; i<point_count; i++){ ungrpd_pnts_indr_host[i] = i; } cardinalities = pmsAllocHostBuffer<int>(2); output = pmsAllocHostBuffer<int>(max_degree); if( can_use_texture ){ texDistance.addressMode[0] = cudaAddressModeClamp; texDistance.addressMode[1] = cudaAddressModeClamp; texDistance.filterMode = cudaFilterModePoint; texDistance.normalized = false; // do not normalize coordinates // This is the actual distance matrix (dst_matrix_elems should be "point_count^2, or point_count*max_degree) printf("Allocating: %luMB (%lux%lux%lu) bytes in texture memory\n", dst_matrix_elems*sizeof(float)/(1024*1024), dst_matrix_elems/point_count, point_count, (long unsigned int)sizeof(float)); cudaMallocArray(&distance_matrix_txt, &texDistance.channelDesc, dst_matrix_elems/point_count, point_count); }else{ allocDeviceBuffer(&distance_matrix_gmem, dst_matrix_elems*sizeof(float)); } CHECK_CUDA_ERROR(); // This is the N*Delta indirection matrix allocDeviceBuffer(&indr_mtrx, point_count*max_degree*sizeof(int)); allocDeviceBuffer(&degrees, point_count*sizeof(int)); allocDeviceBuffer(&ungrpd_pnts_indr, point_count*sizeof(int)); allocDeviceBuffer(&Ai_mask, thread_block_count*point_count*sizeof(char)); allocDeviceBuffer(&dist_to_clust, thread_block_count*max_degree*sizeof(float)); allocDeviceBuffer(&clustered_pnts_mask, point_count*sizeof(char)); allocDeviceBuffer(&cardnl, thread_block_count*2*sizeof(int)); allocDeviceBuffer(&result, point_count*sizeof(int)); // Copy to device, and record transfer time int pcie_TH = Timer::Start(); if( can_use_texture ){ cudaMemcpyToArray(distance_matrix_txt, 0, 0, dist_source, dst_matrix_elems*sizeof(float), cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(); cudaBindTextureToArray(texDistance, distance_matrix_txt); }else{ copyToDevice(distance_matrix_gmem, dist_source, dst_matrix_elems*sizeof(float)); } copyToDevice(indr_mtrx, indr_mtrx_host, point_count*max_degree*sizeof(int)); copyToDevice(ungrpd_pnts_indr, ungrpd_pnts_indr_host, point_count*sizeof(int)); cudaMemset(clustered_pnts_mask, 0, point_count*sizeof(char)); cudaMemset(dist_to_clust, 0, max_degree*thread_block_count*sizeof(float)); double transfer_time = Timer::Stop(pcie_TH, "PCIe Transfer Time"); tpb = ( point_count > THREADSPERBLOCK )? THREADSPERBLOCK : point_count; compute_degrees<<<grid2D(thread_block_count), tpb>>>((int *)indr_mtrx, (int *)degrees, point_count, max_degree); cudaThreadSynchronize(); CHECK_CUDA_ERROR(); const char *sizeStr; stringstream ss; ss << "PointCount=" << (long)point_count; sizeStr = strdup(ss.str().c_str()); if( 0 == cwrank ){ if( save_clusters ){ debug_out.open("p"); for(i=0; i<point_count; i++){ debug_out << pnts[2*i] << " " << pnts[2*i+1] << endl; } debug_out.close(); seeds_out.open("p_seeds"); } cout << "\nInitial ThreadBlockCount: " << thread_block_count; cout << " PointCount: " << point_count; cout << " Max degree: " << max_degree << "\n" << endl; cout.flush(); } max_point_count = point_count; tpb = THREADSPERBLOCK; if( can_use_texture ){ distance_matrix = distance_matrix_txt; }else{ distance_matrix = distance_matrix_gmem; } ////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////// // // Kernel execution int TH = Timer::Start(); do{ stringstream ss; int winner_node=-1; int winner_index=-1; bool this_node_participates = true; ++iter; calculate_participants(point_count, node_count, cwrank, &thread_block_count, &total_thread_block_count, &active_node_count); // If there are only a few elements left to cluster, reduce the number of participating nodes (GPUs). if( cwrank >= active_node_count ){ this_node_participates = false; } comm_update_communicator(cwrank, active_node_count); if( !this_node_participates ) break; cwrank = comm_get_rank(); dim3 grid = grid2D(thread_block_count); int Tkernel = Timer::Start(); //////////////////////////////////////////////////////////////////////////////////////////////// ///////// ----------------- Main kernel ----------------- ///////// QTC_device<<<grid, tpb>>>((float*)distance_matrix, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (int *)ungrpd_pnts_indr, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, cwrank, active_node_count, total_thread_block_count, matrix_type, can_use_texture); ///////// ----------------- Main kernel ----------------- ///////// //////////////////////////////////////////////////////////////////////////////////////////////// cudaThreadSynchronize(); CHECK_CUDA_ERROR(); t_krn += Timer::Stop(Tkernel, "Kernel Only"); int Tredc = Timer::Start(); if( thread_block_count > 1 ){ // We are reducing 128 numbers or less, so one thread should be sufficient. reduce_card_device<<<grid2D(1), 1>>>((int *)cardnl, thread_block_count); cudaThreadSynchronize(); CHECK_CUDA_ERROR(); } copyFromDevice( cardinalities, cardnl, 2*sizeof(int) ); max_card = cardinalities[0]; winner_index = cardinalities[1]; t_redc += Timer::Stop(Tredc, "Reduce Only"); int Tsync = Timer::Start(); comm_barrier(); t_sync += Timer::Stop(Tsync, "Sync Only"); int Tcomm = Timer::Start(); comm_find_winner(&max_card, &winner_node, &winner_index, cwrank, max_point_count+1); t_comm += Timer::Stop(Tcomm, "Comm Only"); if( be_verbose && cwrank == winner_node){ // for non-parallel cases, both "cwrank" and "winner_node" should be zero. cout << "[" << cwrank << "] Cluster Cardinality: " << max_card << " (Node: " << cwrank << ", index: " << winner_index << ")" << endl; } int Ttrim = Timer::Start(); trim_ungrouped_pnts_indr_array<<<grid2D(1), tpb>>>(winner_index, (int*)ungrpd_pnts_indr, (float*)distance_matrix, (int *)result, (char *)Ai_mask, (char *)clustered_pnts_mask, (int *)indr_mtrx, (int *)cardnl, (float *)dist_to_clust, (int *)degrees, point_count, max_point_count, max_degree, threshold, matrix_type, can_use_texture ); cudaThreadSynchronize(); CHECK_CUDA_ERROR(); t_trim += Timer::Stop(Ttrim, "Trim Only"); if( cwrank == winner_node){ // for non-parallel cases, these should both be zero. if( save_clusters ){ ss << "p." << iter; debug_out.open(ss.str().c_str()); } copyFromDevice(output, (void *)result, max_card*sizeof(int) ); if( save_clusters ){ for(int i=0; i<max_card; i++){ debug_out << pnts[2*output[i]] << " " << pnts[2*output[i]+1] << endl; } seeds_out << pnts[2*winner_index] << " " << pnts[2*winner_index+1] << endl; debug_out.close(); } } int Tupdt = Timer::Start(); update_clustered_pnts_mask<<<grid2D(1), tpb>>>((char *)clustered_pnts_mask, (char *)Ai_mask, max_point_count); cudaThreadSynchronize(); CHECK_CUDA_ERROR(); t_updt += Timer::Stop(Tupdt, "Update Only"); point_count -= max_card; }while( max_card > 1 && point_count ); double t = Timer::Stop(TH, "QT_Clustering"); if( save_clusters ){ seeds_out.close(); } // //////////////////////////////////////////////////////////////////////////////// if( cwrank == 0){ cout << "Cluster count: " << iter << endl; cout.flush(); } resultDB.AddResult(name+"_Synchron.", sizeStr, "s", t_sync); resultDB.AddResult(name+"_Communic.", sizeStr, "s", t_comm); resultDB.AddResult(name+"_Kernel", sizeStr, "s", t_krn); resultDB.AddResult(name+"_Trimming", sizeStr, "s", t_trim); resultDB.AddResult(name+"_Update", sizeStr, "s", t_updt); resultDB.AddResult(name+"_Reduction", sizeStr, "s", t_redc); resultDB.AddResult(name+"_Algorithm", sizeStr, "s", t); resultDB.AddResult(name+"+PCI_Trans.", sizeStr, "s", t+transfer_time); pmsFreeHostBuffer(dist_source); pmsFreeHostBuffer(indr_mtrx_host); if( can_use_texture ){ cudaFreeArray(distance_matrix_txt); cudaUnbindTexture(texDistance); }else{ freeDeviceBuffer(distance_matrix_gmem); } CHECK_CUDA_ERROR(); freeDeviceBuffer(indr_mtrx); freeDeviceBuffer(Ai_mask); freeDeviceBuffer(cardnl); freeDeviceBuffer(result); pmsFreeHostBuffer(output); return; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// int qtcDevice = -1; void init(OptionParser& op) { if (qtcDevice == -1) { if (op.getOptionVecInt("device").size() > 0) { qtcDevice = op.getOptionVecInt("device")[0]; } else { qtcDevice = 0; } cudaSetDevice(qtcDevice); cudaGetDevice(&qtcDevice); } } void allocDeviceBuffer(void** bufferp, unsigned long bytes) { cudaMalloc(bufferp, bytes); CHECK_CUDA_ERROR(); } void freeDeviceBuffer(void* buffer) { cudaFree(buffer); } void copyToDevice(void* to_device, void* from_host, unsigned long bytes) { cudaMemcpy(to_device, from_host, bytes, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(); } void copyFromDevice(void* to_host, void* from_device, unsigned long bytes) { cudaMemcpy(to_host, from_device, bytes, cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(); }
6a065a8d1403b8dee749f93126b5a0b15236c9c1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/relayout_format/relayout_format.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #include "src/cuda/query_blocksize.cuh" #include "src/cuda/relayout_format/relayout_format.cuh" using namespace megdnn; using namespace cuda; namespace { template <typename SrcType, typename DstType, bool same_scale> struct CudaPostProcess; template <> struct CudaPostProcess<dtype::Uint8, dtype::QuantizedS8, true> { CudaPostProcess(float, uint8_t, float, uint8_t){}; inline __device__ int8_t operator()(uint8_t val) { return val - 128; } }; template <> struct CudaPostProcess<dtype::Uint8, dtype::QuantizedS8, false> { CudaDTypeParamImpl<dt_qint8> m_dst_type_cvt; CudaPostProcess(float, uint8_t, float dst_scale, uint8_t) { m_dst_type_cvt = CudaDTypeParamImpl<dt_qint8>(dst_scale); }; inline __device__ int8_t operator()(uint8_t val) { return m_dst_type_cvt.quantize((float)val - 128.f).as_int8(); } }; template <> struct CudaPostProcess<dtype::Quantized8Asymm, dtype::QuantizedS8, false> { CudaDTypeParamImpl<dt_qint8> m_dst_type_cvt; CudaDTypeParamImpl<dt_quint8> m_src_type_cvt; CudaPostProcess(float src_scale, uint8_t src_zero_point, float dst_scale, uint8_t) { m_dst_type_cvt = CudaDTypeParamImpl<dt_qint8>(dst_scale); m_src_type_cvt = CudaDTypeParamImpl<dt_quint8>(src_scale, src_zero_point); }; inline __device__ int8_t operator()(uint8_t val) { float med_var = m_src_type_cvt.dequantize(dt_quint8(val)); return m_dst_type_cvt.quantize(med_var).as_int8(); } }; template <> struct CudaPostProcess<dtype::Quantized8Asymm, dtype::QuantizedS8, true> { uint8_t m_src_zero_point = 0; CudaPostProcess(float, uint8_t src_zero_point, float, uint8_t) { m_src_zero_point = src_zero_point; }; inline __device__ int8_t operator()(uint8_t val) { return val - m_src_zero_point; } }; template <> struct CudaPostProcess<dtype::QuantizedS8, dtype::QuantizedS8, false> { CudaDTypeParamImpl<dt_qint8> m_dst_type_cvt; CudaDTypeParamImpl<dt_qint8> m_src_type_cvt; CudaPostProcess(float src_scale, uint8_t, float dst_scale, uint8_t) { m_dst_type_cvt = CudaDTypeParamImpl<dt_qint8>(dst_scale); m_src_type_cvt = CudaDTypeParamImpl<dt_qint8>(src_scale); }; inline __device__ int8_t operator()(int8_t val) { float med_var = m_src_type_cvt.dequantize(dt_qint8(val)); return m_dst_type_cvt.quantize(med_var).as_int8(); } }; template <> struct CudaPostProcess<dtype::QuantizedS8, dtype::QuantizedS8, true> { CudaPostProcess(float, uint8_t, float, uint8_t){}; inline __device__ int8_t operator()(int8_t val) { return val; } }; template <typename SrcType, int pack_w> struct DTypeRWHelper; template <> struct DTypeRWHelper<char, 1> { using InnerDtype = char; using DstDtype = char4; }; template <> struct DTypeRWHelper<char, 4> { using InnerDtype = char4; using DstDtype = char4; }; template <int pack_w, int pack_c, typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale> struct Translayout { using InnerDtype = typename DTypeRWHelper<SrcType, pack_w>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, pack_w>::DstDtype; static inline __device__ void trans(DstDtype (&dst_width)[pack_w], InnerDtype (&read_channel)[pack_c], const char zero_point); }; template <typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale> struct Translayout<1, 4, SrcType, DnnSrcType, DnnDstType, same_scale> { using InnerDtype = typename DTypeRWHelper<SrcType, 1>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, 1>::DstDtype; static inline __device__ void trans( DstDtype (&dst_width)[1], InnerDtype (&read_channel)[4], CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process, const char zero_point) { dst_width[0].x = post_process(read_channel[0]); dst_width[0].y = post_process(read_channel[1]); dst_width[0].z = post_process(read_channel[2]); dst_width[0].w = post_process(read_channel[3]); } }; template <typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale> struct Translayout<4, 4, SrcType, DnnSrcType, DnnDstType, same_scale> { using InnerDtype = typename DTypeRWHelper<SrcType, 4>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, 4>::DstDtype; static inline __device__ void trans( DstDtype (&dst_width)[4], InnerDtype (&read_channel)[4], CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process, const char zero_point) { dst_width[0].x = post_process(read_channel[0].x); dst_width[0].y = post_process(read_channel[1].x); dst_width[0].z = post_process(read_channel[2].x); dst_width[0].w = post_process(read_channel[3].x); dst_width[1].x = post_process(read_channel[0].y); dst_width[1].y = post_process(read_channel[1].y); dst_width[1].z = post_process(read_channel[2].y); dst_width[1].w = post_process(read_channel[3].y); dst_width[2].x = post_process(read_channel[0].z); dst_width[2].y = post_process(read_channel[1].z); dst_width[2].z = post_process(read_channel[2].z); dst_width[2].w = post_process(read_channel[3].z); dst_width[3].x = post_process(read_channel[0].w); dst_width[3].y = post_process(read_channel[1].w); dst_width[3].z = post_process(read_channel[2].w); dst_width[3].w = post_process(read_channel[3].w); } }; template <typename DstType> inline __device__ DstType make_zero_pad(const char zero_point) { return zero_point; } template <> inline __device__ char4 make_zero_pad<char4>(const char zero_point) { return {zero_point, zero_point, zero_point, zero_point}; } template <typename DstDtype> inline __device__ void write_helper(DstDtype* ptr, DstDtype val) { *ptr = val; } template <> inline __device__ void write_helper<char4>(char4* ptr, char4 val) { int32_t* rel_ptr = (int32_t*)ptr; *rel_ptr = *(int32_t*)(&val); } template <bool with_pad, int pack_w, int pack_c, bool same_scale, typename SrcType, typename DstType, typename DnnSrcType, typename DnnDstType> struct RelayoutKern { using InnerDtype = typename DTypeRWHelper<SrcType, pack_w>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, pack_w>::DstDtype; static inline __device__ void write(DstType* dst_ptr, char4 (&dst_width)[pack_w]) { DstDtype* dst_inner_ptr = (DstDtype*)dst_ptr; #pragma unroll for (int iw_idx = 0; iw_idx < pack_w; ++iw_idx) { write_helper(dst_inner_ptr + iw_idx, dst_width[iw_idx]); } } static inline __device__ void read(const SrcType* src_ptr, InnerDtype (&read_channel)[pack_c], const int ic_stride) { #pragma unroll for (int ic_idx = 0; ic_idx < pack_c; ++ic_idx) { read_channel[ic_idx] = *(InnerDtype*)(src_ptr + ic_idx * ic_stride); } } static inline __device__ void read_with_pad( const SrcType* src_ptr, InnerDtype (&read_channel)[pack_c], const int ic_stride, const int remain_ic, const InnerDtype zero_point) { #pragma unroll for (int ic_idx = 0; ic_idx < pack_c; ++ic_idx) { read_channel[ic_idx] = ic_idx < remain_ic ? *(InnerDtype*)(src_ptr + ic_idx * ic_stride) : zero_point; } } static inline __device__ void core_relayout_kern( const SrcType* src, DstType* dst, const int src_offset_base, const int dst_offset_base, const int ic_offset, const int ic_stride, const int remain_ic, CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process, const char zero_point) { InnerDtype read_channel[pack_c]; if (with_pad) { const InnerDtype zero_pad = make_zero_pad<InnerDtype>(zero_point); read_with_pad(src + ic_offset + src_offset_base, read_channel, ic_stride, remain_ic, zero_pad); } else { read(src + ic_offset + src_offset_base, read_channel, ic_stride); } DstDtype dst_width[pack_w]; Translayout<pack_w, pack_c, SrcType, DnnSrcType, DnnDstType, same_scale>::trans(dst_width, read_channel, post_process, zero_point); write(dst + ic_offset + dst_offset_base, dst_width); } }; template <int pack_w, bool same_scale, typename SrcType, typename DstType, typename DnnSrcType, typename DnnDstType> __global__ void kern_nchw_nchw4( const SrcType* src, DstType* dst, int ic, int ihw, int n_stride_src, int ic_stride, int n_stride_dst, CudaPostProcess<DnnSrcType, DnnDstType, same_scale> post_process, const char zero_point) { constexpr int pack_c = 4; const int n_idx = blockIdx.y; const int ihw_block_idx = blockIdx.x * blockDim.x + threadIdx.x; const int ihw_offset = ihw_block_idx * pack_w; if (ihw_offset < ihw) { const int ic_block = ic / pack_c; const int remain_ic = ic % pack_c; const int src_offset_base = n_idx * n_stride_src + ihw_offset; const int dst_offset_base = n_idx * n_stride_dst + ihw_offset * pack_c; for (int ic_blk_idx = 0; ic_blk_idx < ic_block; ++ic_blk_idx) { const int ic_offset = ic_blk_idx * pack_c * ic_stride; RelayoutKern<false, pack_w, pack_c, same_scale, SrcType, DstType, DnnSrcType, DnnDstType>::core_relayout_kern(src, dst, src_offset_base, dst_offset_base, ic_offset, ic_stride, remain_ic, post_process, zero_point); } if (remain_ic > 0) { const int ic_offset = ic_block * pack_c * ic_stride; RelayoutKern<true, pack_w, pack_c, same_scale, SrcType, DstType, DnnSrcType, DnnDstType>::core_relayout_kern(src, dst, src_offset_base, dst_offset_base, ic_offset, ic_stride, remain_ic, post_process, zero_point); } } } } // namespace template <int pack_w = 1> void relayout_format::relayout_format_cuda_exec( const TensorND& src, const TensorND& dst, const hipStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point) { constexpr int pack_oc = 4; const int n = src.layout[0]; const int c = src.layout[1]; const int h = src.layout[2]; const int w = src.layout[3]; const int hw = h * w; const int oc_block = DIVUP(c, pack_oc); const int n_stride_src = c * hw; const int ic_stride = hw; const int n_stride_dst = oc_block * pack_oc * h * w; auto& src_layout = src.layout; auto& dst_layout = dst.layout; bool same_scale = src_scale == dst_scale; #define RUN_KERNEL(same_scale, SRC_TYPE, DST_TYPE, SRC_C_TYPE, DST_C_TYPE) \ if (same_scale) { \ int nr_threads = query_blocksize_for_kernel( \ kern_nchw_nchw4<pack_w, true, SRC_C_TYPE, DST_C_TYPE, \ SRC_TYPE, DST_TYPE>); \ const dim3 block_dim(DIVUP(hw, nr_threads* pack_w), n); \ const dim3 thread_dim(nr_threads); \ hipLaunchKernelGGL(( kern_nchw_nchw4<pack_w, true>), dim3(block_dim), dim3(thread_dim), 0, stream, \ (SRC_C_TYPE*)src.raw_ptr, (DST_C_TYPE*)dst.raw_ptr, c, hw, \ n_stride_src, ic_stride, n_stride_dst, \ CudaPostProcess<SRC_TYPE, DST_TYPE, true>( \ src_scale, src_zero_point, dst_scale, dst_zero_point), \ src_zero_point); \ } else { \ int nr_threads = query_blocksize_for_kernel( \ kern_nchw_nchw4<pack_w, false, SRC_C_TYPE, DST_C_TYPE, \ SRC_TYPE, DST_TYPE>); \ const dim3 block_dim(DIVUP(hw, nr_threads* pack_w), n); \ const dim3 thread_dim(nr_threads); \ hipLaunchKernelGGL(( kern_nchw_nchw4<pack_w, false>), dim3(block_dim), dim3(thread_dim), 0, stream, \ (SRC_C_TYPE*)src.raw_ptr, (DST_C_TYPE*)dst.raw_ptr, c, hw, \ n_stride_src, ic_stride, n_stride_dst, \ CudaPostProcess<SRC_TYPE, DST_TYPE, false>( \ src_scale, src_zero_point, dst_scale, dst_zero_point), \ src_zero_point); \ } if (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Uint8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) { RUN_KERNEL(same_scale, dtype::Uint8, dtype::QuantizedS8, char, char); } else if (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Quantized8Asymm && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) { RUN_KERNEL(same_scale, dtype::Quantized8Asymm, dtype::QuantizedS8, char, char); } else if (src_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) { RUN_KERNEL(same_scale, dtype::QuantizedS8, dtype::QuantizedS8, char, char); } else { megdnn_assert(0, "not support dtype %s %s", src_layout.dtype.name(), dst_layout.dtype.name()); } } bool relayout_format::relayout_format_cuda_usable( const TensorLayout& src_layout, const TensorLayout& dst_layout) { bool is_all_continue = src_layout.is_contiguous() && dst_layout.is_contiguous(); bool is_all_int8 = (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Uint8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) || (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Quantized8Asymm && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) || (src_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8); return is_all_continue && is_all_int8; } template void relayout_format::relayout_format_cuda_exec<1>( const TensorND& src, const TensorND& dst, const hipStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point); template void relayout_format::relayout_format_cuda_exec<4>( const TensorND& src, const TensorND& dst, const hipStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point);
6a065a8d1403b8dee749f93126b5a0b15236c9c1.cu
/** * \file dnn/src/cuda/relayout_format/relayout_format.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. */ #include "src/cuda/query_blocksize.cuh" #include "src/cuda/relayout_format/relayout_format.cuh" using namespace megdnn; using namespace cuda; namespace { template <typename SrcType, typename DstType, bool same_scale> struct CudaPostProcess; template <> struct CudaPostProcess<dtype::Uint8, dtype::QuantizedS8, true> { CudaPostProcess(float, uint8_t, float, uint8_t){}; inline __device__ int8_t operator()(uint8_t val) { return val - 128; } }; template <> struct CudaPostProcess<dtype::Uint8, dtype::QuantizedS8, false> { CudaDTypeParamImpl<dt_qint8> m_dst_type_cvt; CudaPostProcess(float, uint8_t, float dst_scale, uint8_t) { m_dst_type_cvt = CudaDTypeParamImpl<dt_qint8>(dst_scale); }; inline __device__ int8_t operator()(uint8_t val) { return m_dst_type_cvt.quantize((float)val - 128.f).as_int8(); } }; template <> struct CudaPostProcess<dtype::Quantized8Asymm, dtype::QuantizedS8, false> { CudaDTypeParamImpl<dt_qint8> m_dst_type_cvt; CudaDTypeParamImpl<dt_quint8> m_src_type_cvt; CudaPostProcess(float src_scale, uint8_t src_zero_point, float dst_scale, uint8_t) { m_dst_type_cvt = CudaDTypeParamImpl<dt_qint8>(dst_scale); m_src_type_cvt = CudaDTypeParamImpl<dt_quint8>(src_scale, src_zero_point); }; inline __device__ int8_t operator()(uint8_t val) { float med_var = m_src_type_cvt.dequantize(dt_quint8(val)); return m_dst_type_cvt.quantize(med_var).as_int8(); } }; template <> struct CudaPostProcess<dtype::Quantized8Asymm, dtype::QuantizedS8, true> { uint8_t m_src_zero_point = 0; CudaPostProcess(float, uint8_t src_zero_point, float, uint8_t) { m_src_zero_point = src_zero_point; }; inline __device__ int8_t operator()(uint8_t val) { return val - m_src_zero_point; } }; template <> struct CudaPostProcess<dtype::QuantizedS8, dtype::QuantizedS8, false> { CudaDTypeParamImpl<dt_qint8> m_dst_type_cvt; CudaDTypeParamImpl<dt_qint8> m_src_type_cvt; CudaPostProcess(float src_scale, uint8_t, float dst_scale, uint8_t) { m_dst_type_cvt = CudaDTypeParamImpl<dt_qint8>(dst_scale); m_src_type_cvt = CudaDTypeParamImpl<dt_qint8>(src_scale); }; inline __device__ int8_t operator()(int8_t val) { float med_var = m_src_type_cvt.dequantize(dt_qint8(val)); return m_dst_type_cvt.quantize(med_var).as_int8(); } }; template <> struct CudaPostProcess<dtype::QuantizedS8, dtype::QuantizedS8, true> { CudaPostProcess(float, uint8_t, float, uint8_t){}; inline __device__ int8_t operator()(int8_t val) { return val; } }; template <typename SrcType, int pack_w> struct DTypeRWHelper; template <> struct DTypeRWHelper<char, 1> { using InnerDtype = char; using DstDtype = char4; }; template <> struct DTypeRWHelper<char, 4> { using InnerDtype = char4; using DstDtype = char4; }; template <int pack_w, int pack_c, typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale> struct Translayout { using InnerDtype = typename DTypeRWHelper<SrcType, pack_w>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, pack_w>::DstDtype; static inline __device__ void trans(DstDtype (&dst_width)[pack_w], InnerDtype (&read_channel)[pack_c], const char zero_point); }; template <typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale> struct Translayout<1, 4, SrcType, DnnSrcType, DnnDstType, same_scale> { using InnerDtype = typename DTypeRWHelper<SrcType, 1>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, 1>::DstDtype; static inline __device__ void trans( DstDtype (&dst_width)[1], InnerDtype (&read_channel)[4], CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process, const char zero_point) { dst_width[0].x = post_process(read_channel[0]); dst_width[0].y = post_process(read_channel[1]); dst_width[0].z = post_process(read_channel[2]); dst_width[0].w = post_process(read_channel[3]); } }; template <typename SrcType, typename DnnSrcType, typename DnnDstType, bool same_scale> struct Translayout<4, 4, SrcType, DnnSrcType, DnnDstType, same_scale> { using InnerDtype = typename DTypeRWHelper<SrcType, 4>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, 4>::DstDtype; static inline __device__ void trans( DstDtype (&dst_width)[4], InnerDtype (&read_channel)[4], CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process, const char zero_point) { dst_width[0].x = post_process(read_channel[0].x); dst_width[0].y = post_process(read_channel[1].x); dst_width[0].z = post_process(read_channel[2].x); dst_width[0].w = post_process(read_channel[3].x); dst_width[1].x = post_process(read_channel[0].y); dst_width[1].y = post_process(read_channel[1].y); dst_width[1].z = post_process(read_channel[2].y); dst_width[1].w = post_process(read_channel[3].y); dst_width[2].x = post_process(read_channel[0].z); dst_width[2].y = post_process(read_channel[1].z); dst_width[2].z = post_process(read_channel[2].z); dst_width[2].w = post_process(read_channel[3].z); dst_width[3].x = post_process(read_channel[0].w); dst_width[3].y = post_process(read_channel[1].w); dst_width[3].z = post_process(read_channel[2].w); dst_width[3].w = post_process(read_channel[3].w); } }; template <typename DstType> inline __device__ DstType make_zero_pad(const char zero_point) { return zero_point; } template <> inline __device__ char4 make_zero_pad<char4>(const char zero_point) { return {zero_point, zero_point, zero_point, zero_point}; } template <typename DstDtype> inline __device__ void write_helper(DstDtype* ptr, DstDtype val) { *ptr = val; } template <> inline __device__ void write_helper<char4>(char4* ptr, char4 val) { int32_t* rel_ptr = (int32_t*)ptr; *rel_ptr = *(int32_t*)(&val); } template <bool with_pad, int pack_w, int pack_c, bool same_scale, typename SrcType, typename DstType, typename DnnSrcType, typename DnnDstType> struct RelayoutKern { using InnerDtype = typename DTypeRWHelper<SrcType, pack_w>::InnerDtype; using DstDtype = typename DTypeRWHelper<SrcType, pack_w>::DstDtype; static inline __device__ void write(DstType* dst_ptr, char4 (&dst_width)[pack_w]) { DstDtype* dst_inner_ptr = (DstDtype*)dst_ptr; #pragma unroll for (int iw_idx = 0; iw_idx < pack_w; ++iw_idx) { write_helper(dst_inner_ptr + iw_idx, dst_width[iw_idx]); } } static inline __device__ void read(const SrcType* src_ptr, InnerDtype (&read_channel)[pack_c], const int ic_stride) { #pragma unroll for (int ic_idx = 0; ic_idx < pack_c; ++ic_idx) { read_channel[ic_idx] = *(InnerDtype*)(src_ptr + ic_idx * ic_stride); } } static inline __device__ void read_with_pad( const SrcType* src_ptr, InnerDtype (&read_channel)[pack_c], const int ic_stride, const int remain_ic, const InnerDtype zero_point) { #pragma unroll for (int ic_idx = 0; ic_idx < pack_c; ++ic_idx) { read_channel[ic_idx] = ic_idx < remain_ic ? *(InnerDtype*)(src_ptr + ic_idx * ic_stride) : zero_point; } } static inline __device__ void core_relayout_kern( const SrcType* src, DstType* dst, const int src_offset_base, const int dst_offset_base, const int ic_offset, const int ic_stride, const int remain_ic, CudaPostProcess<DnnSrcType, DnnDstType, same_scale>& post_process, const char zero_point) { InnerDtype read_channel[pack_c]; if (with_pad) { const InnerDtype zero_pad = make_zero_pad<InnerDtype>(zero_point); read_with_pad(src + ic_offset + src_offset_base, read_channel, ic_stride, remain_ic, zero_pad); } else { read(src + ic_offset + src_offset_base, read_channel, ic_stride); } DstDtype dst_width[pack_w]; Translayout<pack_w, pack_c, SrcType, DnnSrcType, DnnDstType, same_scale>::trans(dst_width, read_channel, post_process, zero_point); write(dst + ic_offset + dst_offset_base, dst_width); } }; template <int pack_w, bool same_scale, typename SrcType, typename DstType, typename DnnSrcType, typename DnnDstType> __global__ void kern_nchw_nchw4( const SrcType* src, DstType* dst, int ic, int ihw, int n_stride_src, int ic_stride, int n_stride_dst, CudaPostProcess<DnnSrcType, DnnDstType, same_scale> post_process, const char zero_point) { constexpr int pack_c = 4; const int n_idx = blockIdx.y; const int ihw_block_idx = blockIdx.x * blockDim.x + threadIdx.x; const int ihw_offset = ihw_block_idx * pack_w; if (ihw_offset < ihw) { const int ic_block = ic / pack_c; const int remain_ic = ic % pack_c; const int src_offset_base = n_idx * n_stride_src + ihw_offset; const int dst_offset_base = n_idx * n_stride_dst + ihw_offset * pack_c; for (int ic_blk_idx = 0; ic_blk_idx < ic_block; ++ic_blk_idx) { const int ic_offset = ic_blk_idx * pack_c * ic_stride; RelayoutKern<false, pack_w, pack_c, same_scale, SrcType, DstType, DnnSrcType, DnnDstType>::core_relayout_kern(src, dst, src_offset_base, dst_offset_base, ic_offset, ic_stride, remain_ic, post_process, zero_point); } if (remain_ic > 0) { const int ic_offset = ic_block * pack_c * ic_stride; RelayoutKern<true, pack_w, pack_c, same_scale, SrcType, DstType, DnnSrcType, DnnDstType>::core_relayout_kern(src, dst, src_offset_base, dst_offset_base, ic_offset, ic_stride, remain_ic, post_process, zero_point); } } } } // namespace template <int pack_w = 1> void relayout_format::relayout_format_cuda_exec( const TensorND& src, const TensorND& dst, const cudaStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point) { constexpr int pack_oc = 4; const int n = src.layout[0]; const int c = src.layout[1]; const int h = src.layout[2]; const int w = src.layout[3]; const int hw = h * w; const int oc_block = DIVUP(c, pack_oc); const int n_stride_src = c * hw; const int ic_stride = hw; const int n_stride_dst = oc_block * pack_oc * h * w; auto& src_layout = src.layout; auto& dst_layout = dst.layout; bool same_scale = src_scale == dst_scale; #define RUN_KERNEL(same_scale, SRC_TYPE, DST_TYPE, SRC_C_TYPE, DST_C_TYPE) \ if (same_scale) { \ int nr_threads = query_blocksize_for_kernel( \ kern_nchw_nchw4<pack_w, true, SRC_C_TYPE, DST_C_TYPE, \ SRC_TYPE, DST_TYPE>); \ const dim3 block_dim(DIVUP(hw, nr_threads* pack_w), n); \ const dim3 thread_dim(nr_threads); \ kern_nchw_nchw4<pack_w, true><<<block_dim, thread_dim, 0, stream>>>( \ (SRC_C_TYPE*)src.raw_ptr, (DST_C_TYPE*)dst.raw_ptr, c, hw, \ n_stride_src, ic_stride, n_stride_dst, \ CudaPostProcess<SRC_TYPE, DST_TYPE, true>( \ src_scale, src_zero_point, dst_scale, dst_zero_point), \ src_zero_point); \ } else { \ int nr_threads = query_blocksize_for_kernel( \ kern_nchw_nchw4<pack_w, false, SRC_C_TYPE, DST_C_TYPE, \ SRC_TYPE, DST_TYPE>); \ const dim3 block_dim(DIVUP(hw, nr_threads* pack_w), n); \ const dim3 thread_dim(nr_threads); \ kern_nchw_nchw4<pack_w, false><<<block_dim, thread_dim, 0, stream>>>( \ (SRC_C_TYPE*)src.raw_ptr, (DST_C_TYPE*)dst.raw_ptr, c, hw, \ n_stride_src, ic_stride, n_stride_dst, \ CudaPostProcess<SRC_TYPE, DST_TYPE, false>( \ src_scale, src_zero_point, dst_scale, dst_zero_point), \ src_zero_point); \ } if (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Uint8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) { RUN_KERNEL(same_scale, dtype::Uint8, dtype::QuantizedS8, char, char); } else if (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Quantized8Asymm && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) { RUN_KERNEL(same_scale, dtype::Quantized8Asymm, dtype::QuantizedS8, char, char); } else if (src_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) { RUN_KERNEL(same_scale, dtype::QuantizedS8, dtype::QuantizedS8, char, char); } else { megdnn_assert(0, "not support dtype %s %s", src_layout.dtype.name(), dst_layout.dtype.name()); } } bool relayout_format::relayout_format_cuda_usable( const TensorLayout& src_layout, const TensorLayout& dst_layout) { bool is_all_continue = src_layout.is_contiguous() && dst_layout.is_contiguous(); bool is_all_int8 = (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Uint8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) || (src_layout.dtype.enumv().ev == DTypeEnum::Ev::Quantized8Asymm && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8) || (src_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8 && dst_layout.dtype.enumv().ev == DTypeEnum::Ev::QuantizedS8); return is_all_continue && is_all_int8; } template void relayout_format::relayout_format_cuda_exec<1>( const TensorND& src, const TensorND& dst, const cudaStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point); template void relayout_format::relayout_format_cuda_exec<4>( const TensorND& src, const TensorND& dst, const cudaStream_t& stream, const float src_scale, const float dst_scale, const uint8_t src_zero_point, const uint8_t dst_zero_point);
16570124a40c421ae433a778b5fe5c728fb926fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <opencv2/core/cuda/common.hpp> #include <opencv2/core/cuda/vec_traits.hpp> #include <opencv2/core/cuda/vec_math.hpp> #include <opencv2/core/cuda/emulation.hpp> #include <iostream> #include <stdio.h> namespace cv { namespace cuda { namespace device { namespace ccl { enum { WARP_SIZE = 32, WARP_LOG = 5, CTA_SIZE_X = 32, CTA_SIZE_Y = 8, STA_SIZE_MERGE_Y = 4, STA_SIZE_MERGE_X = 32, TPB_X = 1, TPB_Y = 4, TILE_COLS = CTA_SIZE_X * TPB_X, TILE_ROWS = CTA_SIZE_Y * TPB_Y }; template<typename T> struct IntervalsTraits { typedef T elem_type; }; template<> struct IntervalsTraits<unsigned char> { typedef int dist_type; enum {ch = 1}; }; template<> struct IntervalsTraits<uchar3> { typedef int3 dist_type; enum {ch = 3}; }; template<> struct IntervalsTraits<uchar4> { typedef int4 dist_type; enum {ch = 4}; }; template<> struct IntervalsTraits<unsigned short> { typedef int dist_type; enum {ch = 1}; }; template<> struct IntervalsTraits<ushort3> { typedef int3 dist_type; enum {ch = 3}; }; template<> struct IntervalsTraits<ushort4> { typedef int4 dist_type; enum {ch = 4}; }; template<> struct IntervalsTraits<float> { typedef float dist_type; enum {ch = 1}; }; template<> struct IntervalsTraits<int> { typedef int dist_type; enum {ch = 1}; }; typedef unsigned char component; enum Edges { UP = 1, DOWN = 2, LEFT = 4, RIGHT = 8, EMPTY = 0xF0 }; template<typename T, int CH> struct InInterval {}; template<typename T> struct InInterval<T, 1> { typedef typename VecTraits<T>::elem_type E; __host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo((E)(-_lo.x)), hi((E)_hi.x) {}; T lo, hi; template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const { I d = a - b; return lo <= d && d <= hi; } }; template<typename T> struct InInterval<T, 3> { typedef typename VecTraits<T>::elem_type E; __host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo (VecTraits<T>::make((E)(-_lo.x), (E)(-_lo.y), (E)(-_lo.z))), hi (VecTraits<T>::make((E)_hi.x, (E)_hi.y, (E)_hi.z)){}; T lo, hi; template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const { I d = saturate_cast<I>(a - b); return lo.x <= d.x && d.x <= hi.x && lo.y <= d.y && d.y <= hi.y && lo.z <= d.z && d.z <= hi.z; } }; template<typename T> struct InInterval<T, 4> { typedef typename VecTraits<T>::elem_type E; __host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo (VecTraits<T>::make((E)(-_lo.x), (E)(-_lo.y), (E)(-_lo.z), (E)(-_lo.w))), hi (VecTraits<T>::make((E)_hi.x, (E)_hi.y, (E)_hi.z, (E)_hi.w)){}; T lo, hi; template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const { I d = saturate_cast<I>(a - b); return lo.x <= d.x && d.x <= hi.x && lo.y <= d.y && d.y <= hi.y && lo.z <= d.z && d.z <= hi.z && lo.w <= d.w && d.w <= hi.w; } }; template<typename T, typename F> __global__ void computeConnectivity(const PtrStepSz<T> image, PtrStepSzb components, F connected) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= image.cols || y >= image.rows) return; T intensity = image(y, x); component c = 0; if ( x > 0 && connected(intensity, image(y, x - 1))) c |= LEFT; if ( y > 0 && connected(intensity, image(y - 1, x))) c |= UP; if ( x + 1 < image.cols && connected(intensity, image(y, x + 1))) c |= RIGHT; if ( y + 1 < image.rows && connected(intensity, image(y + 1, x))) c |= DOWN; components(y, x) = c; } template< typename T> void computeEdges(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream) { dim3 block(CTA_SIZE_X, CTA_SIZE_Y); dim3 grid(divUp(image.cols, block.x), divUp(image.rows, block.y)); typedef InInterval<typename IntervalsTraits<T>::dist_type, IntervalsTraits<T>::ch> Int_t; Int_t inInt(lo, hi); hipLaunchKernelGGL(( computeConnectivity<T, Int_t>), dim3(grid), dim3(block), 0, stream, static_cast<const PtrStepSz<T> >(image), edges, inInt); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } template void computeEdges<uchar> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); template void computeEdges<uchar3> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); template void computeEdges<uchar4> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); template void computeEdges<ushort> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); template void computeEdges<ushort3>(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); template void computeEdges<ushort4>(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); template void computeEdges<int> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); template void computeEdges<float> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, hipStream_t stream); __global__ void lableTiles(const PtrStepSzb edges, PtrStepSzi comps) { int x = threadIdx.x + blockIdx.x * TILE_COLS; int y = threadIdx.y + blockIdx.y * TILE_ROWS; if (x >= edges.cols || y >= edges.rows) return; //currently x is 1 int bounds = ((y + TPB_Y) < edges.rows); __shared__ int labelsTile[TILE_ROWS][TILE_COLS]; __shared__ int edgesTile[TILE_ROWS][TILE_COLS]; int new_labels[TPB_Y][TPB_X]; int old_labels[TPB_Y][TPB_X]; #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int yloc = threadIdx.y + CTA_SIZE_Y * i; int xloc = threadIdx.x + CTA_SIZE_X * j; component c = edges(bounds * (y + CTA_SIZE_Y * i), x + CTA_SIZE_X * j); if (!xloc) c &= ~LEFT; if (!yloc) c &= ~UP; if (xloc == TILE_COLS -1) c &= ~RIGHT; if (yloc == TILE_ROWS -1) c &= ~DOWN; new_labels[i][j] = yloc * TILE_COLS + xloc; edgesTile[yloc][xloc] = c; } for (int k = 0; ;++k) { //1. backup #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int yloc = threadIdx.y + CTA_SIZE_Y * i; int xloc = threadIdx.x + CTA_SIZE_X * j; old_labels[i][j] = new_labels[i][j]; labelsTile[yloc][xloc] = new_labels[i][j]; } __syncthreads(); //2. compare local arrays #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int yloc = threadIdx.y + CTA_SIZE_Y * i; int xloc = threadIdx.x + CTA_SIZE_X * j; component c = edgesTile[yloc][xloc]; int label = new_labels[i][j]; if (c & UP) label = ::min(label, labelsTile[yloc - 1][xloc]); if (c & DOWN) label = ::min(label, labelsTile[yloc + 1][xloc]); if (c & LEFT) label = ::min(label, labelsTile[yloc][xloc - 1]); if (c & RIGHT) label = ::min(label, labelsTile[yloc][xloc + 1]); new_labels[i][j] = label; } __syncthreads(); //3. determine: Is any value changed? int changed = 0; #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { if (new_labels[i][j] < old_labels[i][j]) { changed = 1; Emulation::smem::atomicMin(&labelsTile[0][0] + old_labels[i][j], new_labels[i][j]); } } changed = Emulation::syncthreadsOr(changed); if (!changed) break; //4. Compact paths const int *labels = &labelsTile[0][0]; #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int label = new_labels[i][j]; while( labels[label] < label ) label = labels[label]; new_labels[i][j] = label; } __syncthreads(); } #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int label = new_labels[i][j]; int yloc = label / TILE_COLS; int xloc = label - yloc * TILE_COLS; xloc += blockIdx.x * TILE_COLS; yloc += blockIdx.y * TILE_ROWS; label = yloc * edges.cols + xloc; // do it for x too. if (y + CTA_SIZE_Y * i < comps.rows) comps(y + CTA_SIZE_Y * i, x + CTA_SIZE_X * j) = label; } } __device__ __forceinline__ int root(const PtrStepSzi& comps, int label) { while(1) { int y = label / comps.cols; int x = label - y * comps.cols; int parent = comps(y, x); if (label == parent) break; label = parent; } return label; } __device__ __forceinline__ void isConnected(PtrStepSzi& comps, int l1, int l2, bool& changed) { int r1 = root(comps, l1); int r2 = root(comps, l2); if (r1 == r2) return; int mi = ::min(r1, r2); int ma = ::max(r1, r2); int y = ma / comps.cols; int x = ma - y * comps.cols; atomicMin(&comps.ptr(y)[x], mi); changed = true; } __global__ void crossMerge(const int tilesNumY, const int tilesNumX, int tileSizeY, int tileSizeX, const PtrStepSzb edges, PtrStepSzi comps, const int yIncomplete, int xIncomplete) { int tid = threadIdx.y * blockDim.x + threadIdx.x; int stride = blockDim.y * blockDim.x; int ybegin = blockIdx.y * (tilesNumY * tileSizeY); int yend = ybegin + tilesNumY * tileSizeY; if (blockIdx.y == gridDim.y - 1) { yend -= yIncomplete * tileSizeY; yend -= tileSizeY; tileSizeY = (edges.rows % tileSizeY); yend += tileSizeY; } int xbegin = blockIdx.x * tilesNumX * tileSizeX; int xend = xbegin + tilesNumX * tileSizeX; if (blockIdx.x == gridDim.x - 1) { if (xIncomplete) yend = ybegin; xend -= xIncomplete * tileSizeX; xend -= tileSizeX; tileSizeX = (edges.cols % tileSizeX); xend += tileSizeX; } if (blockIdx.y == (gridDim.y - 1) && yIncomplete) { xend = xbegin; } int tasksV = (tilesNumX - 1) * (yend - ybegin); int tasksH = (tilesNumY - 1) * (xend - xbegin); int total = tasksH + tasksV; bool changed; do { changed = false; for (int taskIdx = tid; taskIdx < total; taskIdx += stride) { if (taskIdx < tasksH) { int indexH = taskIdx; int row = indexH / (xend - xbegin); int col = indexH - row * (xend - xbegin); int y = ybegin + (row + 1) * tileSizeY; int x = xbegin + col; component e = edges( x, y); if (e & UP) { int lc = comps(y,x); int lu = comps(y - 1, x); isConnected(comps, lc, lu, changed); } } else { int indexV = taskIdx - tasksH; int col = indexV / (yend - ybegin); int row = indexV - col * (yend - ybegin); int x = xbegin + (col + 1) * tileSizeX; int y = ybegin + row; component e = edges(x, y); if (e & LEFT) { int lc = comps(y, x); int ll = comps(y, x - 1); isConnected(comps, lc, ll, changed); } } } } while (Emulation::syncthreadsOr(changed)); } __global__ void flatten(const PtrStepSzb edges, PtrStepSzi comps) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if( x < comps.cols && y < comps.rows) comps(y, x) = root(comps, comps(y, x)); } enum {CC_NO_COMPACT = 0, CC_COMPACT_LABELS = 1}; void labelComponents(const PtrStepSzb& edges, PtrStepSzi comps, int flags, hipStream_t stream) { (void) flags; dim3 block(CTA_SIZE_X, CTA_SIZE_Y); dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS)); hipLaunchKernelGGL(( lableTiles), dim3(grid), dim3(block), 0, stream, edges, comps); cudaSafeCall( hipGetLastError() ); int tileSizeX = TILE_COLS, tileSizeY = TILE_ROWS; while (grid.x > 1 || grid.y > 1) { dim3 mergeGrid((int)ceilf(grid.x / 2.f), (int)ceilf(grid.y / 2.f)); dim3 mergeBlock(STA_SIZE_MERGE_X, STA_SIZE_MERGE_Y); // debug log // std::cout << "merging: " << grid.y << " x " << grid.x << " ---> " << mergeGrid.y << " x " << mergeGrid.x << " for tiles: " << tileSizeY << " x " << tileSizeX << std::endl; hipLaunchKernelGGL(( crossMerge), dim3(mergeGrid), dim3(mergeBlock), 0, stream, 2, 2, tileSizeY, tileSizeX, edges, comps, (int)ceilf(grid.y / 2.f) - grid.y / 2, (int)ceilf(grid.x / 2.f) - grid.x / 2); tileSizeX <<= 1; tileSizeY <<= 1; grid = mergeGrid; cudaSafeCall( hipGetLastError() ); } grid.x = divUp(edges.cols, block.x); grid.y = divUp(edges.rows, block.y); hipLaunchKernelGGL(( flatten), dim3(grid), dim3(block), 0, stream, edges, comps); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } } } } } #endif /* CUDA_DISABLER */
16570124a40c421ae433a778b5fe5c728fb926fe.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <opencv2/core/cuda/common.hpp> #include <opencv2/core/cuda/vec_traits.hpp> #include <opencv2/core/cuda/vec_math.hpp> #include <opencv2/core/cuda/emulation.hpp> #include <iostream> #include <stdio.h> namespace cv { namespace cuda { namespace device { namespace ccl { enum { WARP_SIZE = 32, WARP_LOG = 5, CTA_SIZE_X = 32, CTA_SIZE_Y = 8, STA_SIZE_MERGE_Y = 4, STA_SIZE_MERGE_X = 32, TPB_X = 1, TPB_Y = 4, TILE_COLS = CTA_SIZE_X * TPB_X, TILE_ROWS = CTA_SIZE_Y * TPB_Y }; template<typename T> struct IntervalsTraits { typedef T elem_type; }; template<> struct IntervalsTraits<unsigned char> { typedef int dist_type; enum {ch = 1}; }; template<> struct IntervalsTraits<uchar3> { typedef int3 dist_type; enum {ch = 3}; }; template<> struct IntervalsTraits<uchar4> { typedef int4 dist_type; enum {ch = 4}; }; template<> struct IntervalsTraits<unsigned short> { typedef int dist_type; enum {ch = 1}; }; template<> struct IntervalsTraits<ushort3> { typedef int3 dist_type; enum {ch = 3}; }; template<> struct IntervalsTraits<ushort4> { typedef int4 dist_type; enum {ch = 4}; }; template<> struct IntervalsTraits<float> { typedef float dist_type; enum {ch = 1}; }; template<> struct IntervalsTraits<int> { typedef int dist_type; enum {ch = 1}; }; typedef unsigned char component; enum Edges { UP = 1, DOWN = 2, LEFT = 4, RIGHT = 8, EMPTY = 0xF0 }; template<typename T, int CH> struct InInterval {}; template<typename T> struct InInterval<T, 1> { typedef typename VecTraits<T>::elem_type E; __host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo((E)(-_lo.x)), hi((E)_hi.x) {}; T lo, hi; template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const { I d = a - b; return lo <= d && d <= hi; } }; template<typename T> struct InInterval<T, 3> { typedef typename VecTraits<T>::elem_type E; __host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo (VecTraits<T>::make((E)(-_lo.x), (E)(-_lo.y), (E)(-_lo.z))), hi (VecTraits<T>::make((E)_hi.x, (E)_hi.y, (E)_hi.z)){}; T lo, hi; template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const { I d = saturate_cast<I>(a - b); return lo.x <= d.x && d.x <= hi.x && lo.y <= d.y && d.y <= hi.y && lo.z <= d.z && d.z <= hi.z; } }; template<typename T> struct InInterval<T, 4> { typedef typename VecTraits<T>::elem_type E; __host__ __device__ __forceinline__ InInterval(const float4& _lo, const float4& _hi) : lo (VecTraits<T>::make((E)(-_lo.x), (E)(-_lo.y), (E)(-_lo.z), (E)(-_lo.w))), hi (VecTraits<T>::make((E)_hi.x, (E)_hi.y, (E)_hi.z, (E)_hi.w)){}; T lo, hi; template<typename I> __device__ __forceinline__ bool operator() (const I& a, const I& b) const { I d = saturate_cast<I>(a - b); return lo.x <= d.x && d.x <= hi.x && lo.y <= d.y && d.y <= hi.y && lo.z <= d.z && d.z <= hi.z && lo.w <= d.w && d.w <= hi.w; } }; template<typename T, typename F> __global__ void computeConnectivity(const PtrStepSz<T> image, PtrStepSzb components, F connected) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= image.cols || y >= image.rows) return; T intensity = image(y, x); component c = 0; if ( x > 0 && connected(intensity, image(y, x - 1))) c |= LEFT; if ( y > 0 && connected(intensity, image(y - 1, x))) c |= UP; if ( x + 1 < image.cols && connected(intensity, image(y, x + 1))) c |= RIGHT; if ( y + 1 < image.rows && connected(intensity, image(y + 1, x))) c |= DOWN; components(y, x) = c; } template< typename T> void computeEdges(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream) { dim3 block(CTA_SIZE_X, CTA_SIZE_Y); dim3 grid(divUp(image.cols, block.x), divUp(image.rows, block.y)); typedef InInterval<typename IntervalsTraits<T>::dist_type, IntervalsTraits<T>::ch> Int_t; Int_t inInt(lo, hi); computeConnectivity<T, Int_t><<<grid, block, 0, stream>>>(static_cast<const PtrStepSz<T> >(image), edges, inInt); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } template void computeEdges<uchar> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); template void computeEdges<uchar3> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); template void computeEdges<uchar4> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); template void computeEdges<ushort> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); template void computeEdges<ushort3>(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); template void computeEdges<ushort4>(const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); template void computeEdges<int> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); template void computeEdges<float> (const PtrStepSzb& image, PtrStepSzb edges, const float4& lo, const float4& hi, cudaStream_t stream); __global__ void lableTiles(const PtrStepSzb edges, PtrStepSzi comps) { int x = threadIdx.x + blockIdx.x * TILE_COLS; int y = threadIdx.y + blockIdx.y * TILE_ROWS; if (x >= edges.cols || y >= edges.rows) return; //currently x is 1 int bounds = ((y + TPB_Y) < edges.rows); __shared__ int labelsTile[TILE_ROWS][TILE_COLS]; __shared__ int edgesTile[TILE_ROWS][TILE_COLS]; int new_labels[TPB_Y][TPB_X]; int old_labels[TPB_Y][TPB_X]; #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int yloc = threadIdx.y + CTA_SIZE_Y * i; int xloc = threadIdx.x + CTA_SIZE_X * j; component c = edges(bounds * (y + CTA_SIZE_Y * i), x + CTA_SIZE_X * j); if (!xloc) c &= ~LEFT; if (!yloc) c &= ~UP; if (xloc == TILE_COLS -1) c &= ~RIGHT; if (yloc == TILE_ROWS -1) c &= ~DOWN; new_labels[i][j] = yloc * TILE_COLS + xloc; edgesTile[yloc][xloc] = c; } for (int k = 0; ;++k) { //1. backup #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int yloc = threadIdx.y + CTA_SIZE_Y * i; int xloc = threadIdx.x + CTA_SIZE_X * j; old_labels[i][j] = new_labels[i][j]; labelsTile[yloc][xloc] = new_labels[i][j]; } __syncthreads(); //2. compare local arrays #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int yloc = threadIdx.y + CTA_SIZE_Y * i; int xloc = threadIdx.x + CTA_SIZE_X * j; component c = edgesTile[yloc][xloc]; int label = new_labels[i][j]; if (c & UP) label = ::min(label, labelsTile[yloc - 1][xloc]); if (c & DOWN) label = ::min(label, labelsTile[yloc + 1][xloc]); if (c & LEFT) label = ::min(label, labelsTile[yloc][xloc - 1]); if (c & RIGHT) label = ::min(label, labelsTile[yloc][xloc + 1]); new_labels[i][j] = label; } __syncthreads(); //3. determine: Is any value changed? int changed = 0; #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { if (new_labels[i][j] < old_labels[i][j]) { changed = 1; Emulation::smem::atomicMin(&labelsTile[0][0] + old_labels[i][j], new_labels[i][j]); } } changed = Emulation::syncthreadsOr(changed); if (!changed) break; //4. Compact paths const int *labels = &labelsTile[0][0]; #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int label = new_labels[i][j]; while( labels[label] < label ) label = labels[label]; new_labels[i][j] = label; } __syncthreads(); } #pragma unroll for (int i = 0; i < TPB_Y; ++i) #pragma unroll for (int j = 0; j < TPB_X; ++j) { int label = new_labels[i][j]; int yloc = label / TILE_COLS; int xloc = label - yloc * TILE_COLS; xloc += blockIdx.x * TILE_COLS; yloc += blockIdx.y * TILE_ROWS; label = yloc * edges.cols + xloc; // do it for x too. if (y + CTA_SIZE_Y * i < comps.rows) comps(y + CTA_SIZE_Y * i, x + CTA_SIZE_X * j) = label; } } __device__ __forceinline__ int root(const PtrStepSzi& comps, int label) { while(1) { int y = label / comps.cols; int x = label - y * comps.cols; int parent = comps(y, x); if (label == parent) break; label = parent; } return label; } __device__ __forceinline__ void isConnected(PtrStepSzi& comps, int l1, int l2, bool& changed) { int r1 = root(comps, l1); int r2 = root(comps, l2); if (r1 == r2) return; int mi = ::min(r1, r2); int ma = ::max(r1, r2); int y = ma / comps.cols; int x = ma - y * comps.cols; atomicMin(&comps.ptr(y)[x], mi); changed = true; } __global__ void crossMerge(const int tilesNumY, const int tilesNumX, int tileSizeY, int tileSizeX, const PtrStepSzb edges, PtrStepSzi comps, const int yIncomplete, int xIncomplete) { int tid = threadIdx.y * blockDim.x + threadIdx.x; int stride = blockDim.y * blockDim.x; int ybegin = blockIdx.y * (tilesNumY * tileSizeY); int yend = ybegin + tilesNumY * tileSizeY; if (blockIdx.y == gridDim.y - 1) { yend -= yIncomplete * tileSizeY; yend -= tileSizeY; tileSizeY = (edges.rows % tileSizeY); yend += tileSizeY; } int xbegin = blockIdx.x * tilesNumX * tileSizeX; int xend = xbegin + tilesNumX * tileSizeX; if (blockIdx.x == gridDim.x - 1) { if (xIncomplete) yend = ybegin; xend -= xIncomplete * tileSizeX; xend -= tileSizeX; tileSizeX = (edges.cols % tileSizeX); xend += tileSizeX; } if (blockIdx.y == (gridDim.y - 1) && yIncomplete) { xend = xbegin; } int tasksV = (tilesNumX - 1) * (yend - ybegin); int tasksH = (tilesNumY - 1) * (xend - xbegin); int total = tasksH + tasksV; bool changed; do { changed = false; for (int taskIdx = tid; taskIdx < total; taskIdx += stride) { if (taskIdx < tasksH) { int indexH = taskIdx; int row = indexH / (xend - xbegin); int col = indexH - row * (xend - xbegin); int y = ybegin + (row + 1) * tileSizeY; int x = xbegin + col; component e = edges( x, y); if (e & UP) { int lc = comps(y,x); int lu = comps(y - 1, x); isConnected(comps, lc, lu, changed); } } else { int indexV = taskIdx - tasksH; int col = indexV / (yend - ybegin); int row = indexV - col * (yend - ybegin); int x = xbegin + (col + 1) * tileSizeX; int y = ybegin + row; component e = edges(x, y); if (e & LEFT) { int lc = comps(y, x); int ll = comps(y, x - 1); isConnected(comps, lc, ll, changed); } } } } while (Emulation::syncthreadsOr(changed)); } __global__ void flatten(const PtrStepSzb edges, PtrStepSzi comps) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if( x < comps.cols && y < comps.rows) comps(y, x) = root(comps, comps(y, x)); } enum {CC_NO_COMPACT = 0, CC_COMPACT_LABELS = 1}; void labelComponents(const PtrStepSzb& edges, PtrStepSzi comps, int flags, cudaStream_t stream) { (void) flags; dim3 block(CTA_SIZE_X, CTA_SIZE_Y); dim3 grid(divUp(edges.cols, TILE_COLS), divUp(edges.rows, TILE_ROWS)); lableTiles<<<grid, block, 0, stream>>>(edges, comps); cudaSafeCall( cudaGetLastError() ); int tileSizeX = TILE_COLS, tileSizeY = TILE_ROWS; while (grid.x > 1 || grid.y > 1) { dim3 mergeGrid((int)ceilf(grid.x / 2.f), (int)ceilf(grid.y / 2.f)); dim3 mergeBlock(STA_SIZE_MERGE_X, STA_SIZE_MERGE_Y); // debug log // std::cout << "merging: " << grid.y << " x " << grid.x << " ---> " << mergeGrid.y << " x " << mergeGrid.x << " for tiles: " << tileSizeY << " x " << tileSizeX << std::endl; crossMerge<<<mergeGrid, mergeBlock, 0, stream>>>(2, 2, tileSizeY, tileSizeX, edges, comps, (int)ceilf(grid.y / 2.f) - grid.y / 2, (int)ceilf(grid.x / 2.f) - grid.x / 2); tileSizeX <<= 1; tileSizeY <<= 1; grid = mergeGrid; cudaSafeCall( cudaGetLastError() ); } grid.x = divUp(edges.cols, block.x); grid.y = divUp(edges.rows, block.y); flatten<<<grid, block, 0, stream>>>(edges, comps); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } } } } #endif /* CUDA_DISABLER */
c8f90b64d1e263aed6cf76ba0827f8fbbce147da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2007-2012 Computational Electromagnetic Group (CEM), Dept. ECE, UC San Diego. All rights reserved. * Author: Shaojing Li, March 2012 */ /* * nufft_static_vector_gpu_kernel.cu: kernels used by NufftStaticVectorGpu */ #include "interp_kernel.h" #include "nufft_static_vector_gpu_kernel.h" //#define BEN_DEBUG //#define BEN_DEBUG_MULTI //#define BEN_NEW_METHOD //#define BEN_DEBUG_FFT namespace NBODYFAST_NS{ /* * most kernels have a cubic verison and a linear version, corresponding to two different interpolation scheme */ __global__ void nufft_project_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; // thread index within a block unsigned int _bdim = blockDim.x; // number of threads per block unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; // block index (the grid might be 2D) unsigned int _local_box_idx; // local box index (index of the box among all boxes on the same device) unsigned int _box_idx; // global box index (index of the box as in the entire computational domain) unsigned int _box_idx_dim[3]; // 3D index of boxes (used to determine the location of box) unsigned int _box_sub_idx = 0; // sub index of a box (used when a box is treated by multiple blocks) unsigned int _obs_idx; // observer index (this is the index of grid points in this kernel) unsigned int _obs_idx_dim[3]; // 3D index of observers (used to determin the coordinates of each grid points) __shared__ FP_TYPE s_src_coord[BLOCK_SIZE_PROJ_INTERP * 3]; // shared memory array to store the source coordinates __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_PROJ_INTERP * FIELD_DIM]; //shared memory array to store the source amplitudes // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; int const src_size_dev = nufft_const->src_size_dev; // number of sources on this device (so the length of source arrays) int * const num_boxes = nufft_const->num_boxes; // number of total boxes (across the entire domain) FP_TYPE * const box_size = nufft_const->box_size; // as it is shown...the box size FP_TYPE * const src_domain_range = nufft_const->src_domain_range; // computational domain boundaries and sizes FP_TYPE _P[FIELD_DIM]; // used to store final fields. declared as an array as it might have more than 1 components for vector fields for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; int _global_src_idx_start2 = 0; // the offset of source coord/amp in the global source arrays int _num_src2 = 0; // number of sources in the current box FP_TYPE _box_origin[3]; // the front-top-left corner of the current box FP_TYPE _interp_coeff; // interpolation coefficients FP_TYPE _r_norm[3]; // normalized coordinates of a source within a box. used to do the Lagrange interpolation int _shared_start = 0; // a temporary variable for serializing tasks while number of threads is less than number of sources int _shared_offset; // indicate the range of shared memory for current box (when multiple boxes are handled by the same block) if (param->num_blk_per_box > 0) // number of block per box is greater than or equal to 1 { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; _shared_offset = 0; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) // number of boxes per block is greater than 1 { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_array->d_src_box_list_inv[0]) return; // if _local_box_idx is greater than total number of boxes the current device should process, then just terminate the current thread _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; // otherwise, get the global box idx from d_src_box_list if (_box_idx >= nufft_const->total_num_boxes) return; // I think this is an unnecessary check idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); // get 3D observer index from 1D observer index // get 3D box index from 1D box index _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; // calculate the position of the box } // The starting index of sources in the current box _global_src_idx_start2 = nufft_array->d_src_box_map[_box_idx]; // Number of sources in the current box _num_src2 = nufft_array->d_src_box_map[_box_idx + nufft_const->total_num_boxes * 2]; while (_shared_start < _num_src2) { // Load/Calculate the normalized coordinates and amplitudes of sources to shared memory if (_shared_start + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // s_src_coord stores the coordinates that have been normalized to [0, 1]; s_src_coord[_tidx + m * _bdim] = (nufft_array->d_src_coord[_global_src_idx_start2 + (_shared_start + _obs_idx) + src_size_dev * m] - _box_origin[m]) / box_size[m]; } for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx + j * BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_src_amp[_global_src_idx_start2 + _shared_start + _obs_idx + j * src_size_dev]; } __syncthreads(); // Loop around current piece of source (not more than number of threads per block) // From source to source grid for (int i = 0; (i < interp_nodes_per_box) && (i + _shared_start < _num_src2); i++) { for (int m = 0; m < 3; m++) { _r_norm[m] = s_src_coord[i + _shared_offset + m * _bdim]; } lagrange_project_cubic(_interp_coeff, _r_norm, _obs_idx_dim); for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += _interp_coeff * s_src_amp[i + _shared_offset + j * BLOCK_SIZE_PROJ_INTERP]; } // i __syncthreads(); _shared_start += interp_nodes_per_box; } // _shared_start for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_src_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j * nufft_const->total_num_boxes_dev * interp_nodes_per_box] = _P[j]; #ifdef _GPU_D_TEST _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] = 0.00f; #endif #ifdef _GPU_D_TEST _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] += _P[0]; // * _bdim;//_interp_coeff; #endif } __global__ void nufft_project_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _local_box_idx; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; __shared__ FP_TYPE s_src_coord[BLOCK_SIZE_PROJ_INTERP * 3]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_PROJ_INTERP * FIELD_DIM]; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; int const src_size_dev = nufft_const->src_size_dev; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; FP_TYPE _P[FIELD_DIM]; for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; int _global_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _box_origin[3]; FP_TYPE _interp_coeff; FP_TYPE _r_norm[3]; int _shared_start = 0; int _shared_offset; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; _shared_offset = 0; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_array->d_src_box_list_inv[0]) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; if (_box_idx >= nufft_const->total_num_boxes) return; idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } // The starting index of sources in the current box _global_src_idx_start2 = nufft_array->d_src_box_map[_box_idx]; // Number of sources in the current box _num_src2 = nufft_array->d_src_box_map[_box_idx + nufft_const->total_num_boxes * 2]; while (_shared_start < _num_src2) { // Load/Calculate the normalized coordinates and amplitudes of sources to shared memory if (_shared_start + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // s_src_coord stores the coordinates that have been normalized to [0, 1]; s_src_coord[_tidx + m * _bdim] = (nufft_array->d_src_coord[_global_src_idx_start2 + (_shared_start + _obs_idx) + src_size_dev * m] - _box_origin[m]) / box_size[m]; } for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx + j * BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_src_amp[_global_src_idx_start2 + _shared_start + _obs_idx + j * src_size_dev]; } __syncthreads(); // Loop around current piece of source (not more than number of threads per block) // From source to source grid for (int i = 0; (i < interp_nodes_per_box) && (i + _shared_start < _num_src2); i++) { for (int m = 0; m < 3; m++) { _r_norm[m] = s_src_coord[i + _shared_offset + m * _bdim]; } lagrange_project_linear(_interp_coeff, _r_norm, _obs_idx_dim); for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += _interp_coeff * s_src_amp[i + _shared_offset + j * BLOCK_SIZE_PROJ_INTERP]; } // i __syncthreads(); _shared_start += interp_nodes_per_box; } // _shared_start for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_src_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j * nufft_const->total_num_boxes_dev * interp_nodes_per_box] = _P[j]; #ifdef _GPU_D_TEST _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] = 9.99f; _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] = _P[0]; #endif } __global__ void nufft_fft_prep_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { const unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; // global thread index const unsigned int interp_nodes_per_box = 64; FP_TYPE _P[FIELD_DIM]; for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; if (_gid >= nufft_const->total_num_grid_pts) return; // terminate extra threads launched int _grid_idx_dim[3]; // 3D global grid point index int _local_idx_dim[6]; // 3D local grid point index (for each grid point, there could be two different boxes contributing to the field along each dimension) int _box_idx_dim[6]; // for each grid point, there could be two boxes contributing to it along each dimension int _box_idx[9]; // _box_idx[8] stores number of boxes contributing to the current grid points. _box_idx[0~7] stores the contributing box index int _local_idx[9]; // _box_idx[0~7] stores the contributing index of the grid points of the contributing box for (int i = 0; i < 9; i++) { _box_idx[i] = -1; _local_idx[i] = -1; } // 3D index of current grid points in the entire projection grid _grid_idx_dim[2] = _gid / (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1]); _grid_idx_dim[1] = (_gid - _grid_idx_dim[2] * (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1])) / nufft_const->num_grid_pts[0]; _grid_idx_dim[0] = _gid % nufft_const->num_grid_pts[0]; // the position of current grid points in the FFT grid (FFT grid is padded so is larger than the projection grid) int _cufft_in_addr = _grid_idx_dim[2] * nufft_const->fft_size[0] * nufft_const->fft_size[1] + _grid_idx_dim[1] * nufft_const->fft_size[0] + _grid_idx_dim[0]; int _u_src_grid_addr = 0; grid_idx_to_local_idx_cubic(_grid_idx_dim, _box_idx_dim, _local_idx_dim, nufft_const->num_boxes); // get local index int _dim_idx[3]; int _cnt1 = 0; int _box_idx_temp; // there will be 8 possible boxes that overlaps at the current grid points // however, there might be less if the current grid points is not at the corner of a box, or the box is at the corner, on the edge or surface of the entire computational domain. // the following triple loops judges how many boxes are valid and really contributing to the current grid point for (_dim_idx[0] = 0; _dim_idx[0] < 2; _dim_idx[0]++) for (_dim_idx[1] = 0; _dim_idx[1] < 2; _dim_idx[1]++) for (_dim_idx[2] = 0; _dim_idx[2] < 2; _dim_idx[2]++) { if (_box_idx_dim[_dim_idx[0]] >= 0 && _box_idx_dim[2 + _dim_idx[1]] >= 0 && _box_idx_dim[4 + _dim_idx[2]] >= 0) { _box_idx_temp = _box_idx_dim[_dim_idx[0]] + _box_idx_dim[2 + _dim_idx[1]] * nufft_const->num_boxes[0] + _box_idx_dim[4 + _dim_idx[2]] * nufft_const->num_boxes[0] * nufft_const->num_boxes[1]; _box_idx[_cnt1] = _box_idx_temp; int _local_idx_dim_temp[3]; _local_idx_dim_temp[0] = _local_idx_dim[_dim_idx[0]]; _local_idx_dim_temp[1] = _local_idx_dim[2 + _dim_idx[1]]; _local_idx_dim_temp[2] = _local_idx_dim[4 + _dim_idx[2]]; local_idx_dim_to_local_idx_cubic(&_local_idx[_cnt1], _local_idx_dim_temp); _cnt1++; } } _box_idx[8] = _cnt1; #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif // add the projected amplitudes of all overlapping grid points together for (int i = 0; i < _box_idx[8]; i++) { _u_src_grid_addr = _box_idx[i] * interp_nodes_per_box + _local_idx[i]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += nufft_array->d_u_src_grid[_u_src_grid_addr + j * nufft_const->total_num_boxes * interp_nodes_per_box]; #ifdef _GPU_D_TEST _d_test[_gid] = size_t(_u_src_grid_addr); #endif } for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_fft_inplace_r2c_FP[_cufft_in_addr + j*nufft_const->total_num_fft_pts] = _P[j]; } ///FFT CHANGE __global__ void nufft_fft_prep_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { const unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; const unsigned int interp_nodes_per_box = 8; FP_TYPE _P[FIELD_DIM]; for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; if (_gid >= nufft_const->total_num_grid_pts) return; int _grid_idx_dim[3]; int _local_idx_dim[6]; int _box_idx_dim[6]; int _box_idx[9]; int _local_idx[9]; for (int i = 0; i < 9; i++) { _box_idx[i] = -1; _local_idx[i] = -1; } _grid_idx_dim[2] = _gid / (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1]); _grid_idx_dim[1] = (_gid - _grid_idx_dim[2] * (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1])) / nufft_const->num_grid_pts[0]; _grid_idx_dim[0] = _gid % nufft_const->num_grid_pts[0]; int _cufft_in_addr = _grid_idx_dim[2] * nufft_const->fft_size[0] * nufft_const->fft_size[1] + _grid_idx_dim[1] * nufft_const->fft_size[0] + _grid_idx_dim[0]; int _u_src_grid_addr = 0; grid_idx_to_local_idx_linear(_grid_idx_dim, _box_idx_dim, _local_idx_dim, nufft_const->num_boxes); int _dim_idx[3]; int _cnt1 = 0; int _box_idx_temp; for (_dim_idx[0] = 0; _dim_idx[0] < 2; _dim_idx[0]++) for (_dim_idx[1] = 0; _dim_idx[1] < 2; _dim_idx[1]++) for (_dim_idx[2] = 0; _dim_idx[2] < 2; _dim_idx[2]++) { if (_box_idx_dim[_dim_idx[0]] >= 0 && _box_idx_dim[2 + _dim_idx[1]] >= 0 && _box_idx_dim[4 + _dim_idx[2]] >= 0) { _box_idx_temp = _box_idx_dim[_dim_idx[0]] + _box_idx_dim[2 + _dim_idx[1]] * nufft_const->num_boxes[0] + _box_idx_dim[4 + _dim_idx[2]] * nufft_const->num_boxes[0] * nufft_const->num_boxes[1]; _box_idx[_cnt1] = _box_idx_temp; int _local_idx_dim_temp[3]; _local_idx_dim_temp[0] = _local_idx_dim[_dim_idx[0]]; _local_idx_dim_temp[1] = _local_idx_dim[2 + _dim_idx[1]]; _local_idx_dim_temp[2] = _local_idx_dim[4 + _dim_idx[2]]; local_idx_dim_to_local_idx_linear(&_local_idx[_cnt1], _local_idx_dim_temp); _cnt1++; } } _box_idx[8] = _cnt1; for (int i = 0; i < _box_idx[8]; i++) { _u_src_grid_addr = _box_idx[i] * interp_nodes_per_box + _local_idx[i]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += nufft_array->d_u_src_grid[_u_src_grid_addr + j * nufft_const->total_num_boxes * interp_nodes_per_box]; } for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_fft_inplace_r2c_FP[_cufft_in_addr + j*nufft_const->total_num_fft_pts] = _P[j]; #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; // _d_test[_gid] = nufft_array->d_u_src_grid[_gid]; #endif } //BEN ELIMINATED THE d_fft_inplace_b __global__ void nufft_convolution_static_vector(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // convolution is simple. just multiply the transformed matrix and impedance matrix, entry-by-entry const unsigned int _tid = threadIdx.x; const unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; //const unsigned int S_GREEN_SIZE = __shared__ CUFFT_COMPLEX_TYPE s_u_src_grid_k[FIELD_DIM * BLOCK_SIZE_CONV]; __shared__ FP_TYPE s_g_grid_k[ FIELD_DIM*(FIELD_DIM+1)/2 * BLOCK_SIZE_CONV];//IMP MAT CHANGE!!!SHOULD BE LESS!!!!IF THIS SUBROUTINE IS LIMITED BY SHARED MEMORY, THEN //WE SHOULD SIMPLY NOT USE SHARED MEMORY HERE if (_gid >= (nufft_const->total_num_fft_r2c_pts)) return; int i = _gid % nufft_const->fft_r2c_size[0]; int k = _gid / (nufft_const->fft_r2c_size[0]*nufft_const->fft_r2c_size[1]); int j = (_gid - k*nufft_const->fft_r2c_size[0]*nufft_const->fft_r2c_size[1])/nufft_const->fft_r2c_size[0]; int flag_y = 1; int flag_z = 1; if( j >= nufft_const->green_size[1] ) { j = nufft_const->fft_size[1] - j; flag_y = -1;} if( k >= nufft_const->green_size[2]) { k = nufft_const->fft_size[2] - k; flag_z = -1;} int _green_id = i + j*nufft_const->green_size[0] + k*nufft_const->green_size[0]*nufft_const->green_size[1]; for(unsigned int j = 0; j < FIELD_DIM; j++) s_u_src_grid_k[_tid+j*BLOCK_SIZE_CONV] = nufft_array->d_fft_inplace_r2c[_gid+j*nufft_const->total_num_fft_r2c_pts]; //the Green's func assignment in shared memory is not compatible to FIELD_DIM != 3 s_g_grid_k[_tid ] = nufft_array->d_k_imp_mat_data_gpu[_green_id]; //xx s_g_grid_k[_tid+ BLOCK_SIZE_CONV] = flag_y * nufft_array->d_k_imp_mat_data_gpu[_green_id + nufft_const->total_num_green_pts]; //xy s_g_grid_k[_tid+2*BLOCK_SIZE_CONV] = flag_z * nufft_array->d_k_imp_mat_data_gpu[_green_id + 2*nufft_const->total_num_green_pts]; //xz s_g_grid_k[_tid+3*BLOCK_SIZE_CONV] = nufft_array->d_k_imp_mat_data_gpu[_green_id + 3*nufft_const->total_num_green_pts]; //yy s_g_grid_k[_tid+4*BLOCK_SIZE_CONV] = flag_y*flag_z * nufft_array->d_k_imp_mat_data_gpu[_green_id + 4*nufft_const->total_num_green_pts]; //yz s_g_grid_k[_tid+5*BLOCK_SIZE_CONV] = nufft_array->d_k_imp_mat_data_gpu[_green_id + 5*nufft_const->total_num_green_pts]; //zz FP_TYPE real0 = s_u_src_grid_k[_tid].x*s_g_grid_k[_tid] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV]; FP_TYPE img0 = s_u_src_grid_k[_tid].y*s_g_grid_k[_tid] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV]; FP_TYPE real1 = s_u_src_grid_k[_tid].x*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+3*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV]; FP_TYPE img1 = s_u_src_grid_k[_tid].y*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+3*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV]; FP_TYPE real2 = s_u_src_grid_k[_tid].x*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+5*BLOCK_SIZE_CONV]; FP_TYPE img2 = s_u_src_grid_k[_tid].y*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+5*BLOCK_SIZE_CONV]; //the division here can be put into the Greens'func preprocessing nufft_array->d_fft_inplace_r2c[_gid].x = real0/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid].y = img0/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+nufft_const->total_num_fft_r2c_pts].x = real1/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+nufft_const->total_num_fft_r2c_pts].y = img1/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+2*nufft_const->total_num_fft_r2c_pts].x = real2/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+2*nufft_const->total_num_fft_r2c_pts].y = img2/FP_TYPE(nufft_const->total_num_fft_pts); #ifdef BEN_DEBUG_FFT _d_test[_gid] = nufft_const->green_size[0]*1000+nufft_const->fft_size[0];//s_g_grid_k[_tid]; #endif #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = nufft_array->d_fft_inplace_b[_gid].x; #endif } //BEN ELIMINATED THE d_fft_inplace_b __global__ void nufft_fft_postp_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // post processing does opposite thing as the pre processing unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; const unsigned int interp_nodes_per_box = 64; if (param->num_blk_per_box > 0) { _box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) { _box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_box_idx >= nufft_const->total_num_boxes) return; // Get the Index number of the observer box _box_idx_dim[2] = _box_idx / (nufft_const->num_boxes[0] * nufft_const->num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (nufft_const->num_boxes[0] * nufft_const->num_boxes[1])) / nufft_const->num_boxes[0]; _box_idx_dim[0] = _box_idx % nufft_const->num_boxes[0]; // Get the global index number of the grid point unsigned int obs_idx_glb; idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); obs_idx_dim_to_obs_idx_glb_cubic(obs_idx_glb, _box_idx_dim, _obs_idx_dim, nufft_const->fft_size); /*nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_b[obs_idx_glb].x;*/ //nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_f[obs_idx_glb].x; for(unsigned int j = 0; j < FIELD_DIM; j++) nufft_array->d_u_obs_grid[_gid+j*nufft_const->total_num_boxes*interp_nodes_per_box] = nufft_array->d_fft_inplace_r2c_FP[obs_idx_glb+j*nufft_const->total_num_fft_pts]; #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = nufft_array->d_u_obs_grid[_gid]; #endif } //BEN ELIMINATED THE d_fft_inplace_b __global__ void nufft_fft_postp_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; const unsigned int interp_nodes_per_box = 8; if (param->num_blk_per_box > 0) { _box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) { _box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_box_idx >= nufft_const->total_num_boxes) return; // Get the Index number of the observer box _box_idx_dim[2] = _box_idx / (nufft_const->num_boxes[0] * nufft_const->num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (nufft_const->num_boxes[0] * nufft_const->num_boxes[1])) / nufft_const->num_boxes[0]; _box_idx_dim[0] = _box_idx % nufft_const->num_boxes[0]; // Get the global index number of the grid point unsigned int obs_idx_glb; idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); obs_idx_dim_to_obs_idx_glb_linear(obs_idx_glb, _box_idx_dim, _obs_idx_dim, nufft_const->fft_size); //nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_b[obs_idx_glb].x; //nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_f[obs_idx_glb].x; for(unsigned int j = 0; j < FIELD_DIM; j++) nufft_array->d_u_obs_grid[_gid+j*nufft_const->total_num_boxes*interp_nodes_per_box] = nufft_array->d_fft_inplace_r2c_FP[obs_idx_glb+j*nufft_const->total_num_fft_pts]; #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = nufft_array->d_u_obs_grid[_gid]; #endif } //!!!!!!!!!!!!!NEAR CHANGE!!!!!!!!!!!!! #ifndef BEN_NEW_METHOD __global__ void nufft_correct_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // nufft_correct_static_vector_cubic calculate the inaccurate field generated by near field sources again and subtract them from the total field // we don't have to project the amplitudes from source to source grid again since we already have them stored in the array d_u_src_grid_dev unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_CORRECT*FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int i = 0; i < FIELD_DIM; i++) _Q1[i] = 0.0f; FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif // Ben new bound method //for (_near_box_idx_dim[2] = nufft_array->d_near_bound_list[6*_box_idx+2]; _near_box_idx_dim[2] <= nufft_array->d_near_bound_list[6*_box_idx+5]; _near_box_idx_dim[2]++){ // for(_near_box_idx_dim[1] = nufft_array->d_near_bound_list[6*_box_idx+1]; _near_box_idx_dim[1] <= nufft_array->d_near_bound_list[6*_box_idx+4]; _near_box_idx_dim[1]++){ // for(_near_box_idx_dim[0] = nufft_array->d_near_bound_list[6*_box_idx+0]; _near_box_idx_dim[0] <= nufft_array->d_near_bound_list[6*_box_idx+3]; _near_box_idx_dim[0]++){ // // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // // the total number of near boxes. // _near_box_idx = _near_box_idx_dim[0] + _near_box_idx_dim[1]*num_boxes[0] + _near_box_idx_dim[2]*num_boxes[0]*num_boxes[1];//near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); // // Current near box origins // for (int m = 0; m < 3; m++) // _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; // __syncthreads(); // _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; // s_src_amp[_tidx] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx]; // __syncthreads(); // /////////////////////////////////////////////////////////////////////////////////////// // //// Get the inaccurate near field. // //// Source: near-list box grid points // //// Observer: actual box grid points // //// Interaction: direct Green's function // ///////////////////////////////////////////////////////////////////////////////////////// // for (int m = 0; m < 3; m++) // { // // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. // _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; // } // // cubic : 28% of computational time of this subroutine goes here // direct_grid_interact_cubic_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon); // //__syncthreads(); // } // } //}// _near_box_counter // Loop around all near boxes, presumbly 27 !!!!CHANGED BY BEN for (int _near_box_counter = 0; _near_box_counter < total_num_near_box_per_box; _near_box_counter++) { // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // the total number of near boxes. _near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 || nufft_array->d_src_box_map[_near_box_idx + nufft_const->total_num_boxes * 2] == 0) continue; // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } __syncthreads(); _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// //// Get the inaccurate near field. //// Source: near-list box grid points //// Observer: actual box grid points //// Interaction: direct Green's function ///////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // cubic : 28% of computational time of this subroutine goes here direct_grid_interact_cubic_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon, BLOCK_SIZE_CORRECT); //__syncthreads(); #ifdef _GPU_D_TEST _d_test[_gid] += _Q1[0]; #endif } // _near_box_counter __syncthreads(); // Field amplitudes on the observer grid // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb] - _Q1[0]; // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; #ifdef BEN_DEBUG_MULTI //_d_test[_gid] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx]; #endif } #else __global__ void nufft_correct_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // nufft_correct_static_vector_cubic calculate the inaccurate field generated by near field sources again and subtract them from the total field // we don't have to project the amplitudes from source to source grid again since we already have them stored in the array d_u_src_grid_dev unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; _local_box_idx = _bid; _obs_idx = _tidx % interp_nodes_per_box; //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[64]; __shared__ int s_near_box_list[BLOCK_SIZE_CORRECT]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = FP_TYPE(0.0f); FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif int near_box_left = total_num_near_box_per_box; int s_near_list_start = 0; #ifdef BEN_DEBUG int tmp_cnt = 0; #endif while( near_box_left > 0 ){ if( _tidx < near_box_left ){ s_near_box_list[_tidx] = near_to_glb_idx(s_near_list_start + _tidx, _bid, near_correct_layer, num_boxes);//_bid can be replaced by _box_idx_dim, because it should be only computed once } else{ // IS IT NECESSARY??? s_near_box_list[_tidx] = -1; } __syncthreads(); // Loop around all near boxes, presumbly 27 !!!!CHANGED BY BEN for (int _near_box_counter = 0; _near_box_counter < near_box_left && _near_box_counter < BLOCK_SIZE_CORRECT; _near_box_counter++) { // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // the total number of near boxes. _near_box_idx = s_near_box_list[_near_box_counter]; //_near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 ) continue; #ifdef BEN_DEBUG tmp_cnt++; #endif // Current near box index number _near_box_idx_dim[2] = _near_box_idx / (num_boxes[0] * num_boxes[1]); _near_box_idx_dim[1] = (_near_box_idx - _near_box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _near_box_idx_dim[0] = _near_box_idx % num_boxes[0]; // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// //// Get the inaccurate near field. //// Source: near-list box grid points //// Observer: actual box grid points //// Interaction: direct Green's function ///////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // cubic : 28% of computational time of this subroutine goes here direct_grid_interact_cubic_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon, BLOCK_SIZE_CORRECT); __syncthreads(); #ifdef _GPU_D_TEST _d_test[_gid] += _Q1[0]; #endif } // _near_box_counter near_box_left -= BLOCK_SIZE_CORRECT; s_near_list_start += BLOCK_SIZE_CORRECT; }//while(near_box_left > 0) __syncthreads(); #ifdef BEN_DEBUG //if( s_near_list_start != 0 ) _d_test[_gid] = tmp_cnt;//s_near_box_list[_tidx];//_Q1[0];// #endif if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; } } //!!!!!!!!!!!!!NEAR CHANGE!!!!!!!!!!!!! #endif #ifndef BEN_NEW_METHOD __global__ void nufft_correct_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_const->total_num_boxes_dev) //nufft_const->total_num_boxes is the number of boxes in current device return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_CORRECT*FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = FP_TYPE(0.0f); FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); // Loop around all near boxes, presumbly 27 /*for (int _near_box_counter = 0; _near_box_counter < nufft_array->d_near_box_list[_local_box_idx * total_num_near_box_per_box_p1]; _near_box_counter++) {*/ for( int _near_box_counter = 0; _near_box_counter < total_num_near_box_per_box; _near_box_counter++){ // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // the total number of near boxes. //_near_box_idx = nufft_array->d_near_box_list[_near_box_counter + 1 + _local_box_idx * total_num_near_box_per_box_p1]; _near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 || nufft_array->d_src_box_map[_near_box_idx + nufft_const->total_num_boxes * 2] == 0) continue; // Current near box index number /*_near_box_idx_dim[2] = _near_box_idx / (num_boxes[0] * num_boxes[1]); _near_box_idx_dim[1] = (_near_box_idx - _near_box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _near_box_idx_dim[0] = _near_box_idx % num_boxes[0];*/ // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } // Field amplitudes on the source grid _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1];//d_src_box_list_inv has a length of total_num_boxes+1 for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// // Get the inaccurate near field. // Source: near-list box grid points // Observer: actual box grid points // Interaction: direct Green's function /////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // linear: 0% of computational time of this subroutine goes here direct_grid_interact_linear_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon, BLOCK_SIZE_CORRECT); } // _near_box_counter __syncthreads(); // Field amplitudes on the observer grid // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb] - _Q1[0]; // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; #ifdef BEN_DEBUG_MULTI //_d_test[_gid] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx]; #endif #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = _local_box_idx * interp_nodes_per_box + _obs_idx; #endif } #else __global__ void nufft_correct_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; _local_box_idx = _bid; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[8]; __shared__ int s_near_box_list[BLOCK_SIZE_CORRECT]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = FP_TYPE(0.0f); FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); int near_box_left = total_num_near_box_per_box; int s_near_list_start = 0; #ifdef BEN_DEBUG int tmp_cnt = 0; #endif while( near_box_left > 0 ){ if( _tidx < near_box_left ){ s_near_box_list[_tidx] = near_to_glb_idx(s_near_list_start + _tidx, _bid, near_correct_layer, num_boxes);//_bid can be replaced by _box_idx_dim, because it should be only computed once } else{ // IS IT NECESSARY??? s_near_box_list[_tidx] = -1; } __syncthreads(); // Loop around all possible near boxes for( int _near_box_counter = 0; _near_box_counter < near_box_left && _near_box_counter < BLOCK_SIZE_CORRECT; _near_box_counter++){ _near_box_idx = s_near_box_list[_near_box_counter]; //_near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 ) continue; //tmp_cnt ++; // Current near box index number _near_box_idx_dim[2] = _near_box_idx / (num_boxes[0] * num_boxes[1]); _near_box_idx_dim[1] = (_near_box_idx - _near_box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _near_box_idx_dim[0] = _near_box_idx % num_boxes[0]; // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } // Field amplitudes on the source grid _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// // Get the inaccurate near field. // Source: near-list box grid points // Observer: actual box grid points // Interaction: direct Green's function /////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // linear: 0% of computational time of this subroutine goes here //if( _tidx < interp_nodes_per_box) direct_grid_interact_linear_static_vector(_r1, _Q1, s_src_amp, cell_size, epsilon, BLOCK_SIZE_CORRECT); __syncthreads(); } // _near_box_counter near_box_left -= BLOCK_SIZE_CORRECT; s_near_list_start += BLOCK_SIZE_CORRECT; } //while( near_box_left > 0 ) __syncthreads(); #ifdef BEN_DEBUG //if( s_near_list_start != 0 ) _d_test[_gid] = tmp_cnt;//s_near_box_list[_tidx];//_Q1[0];// #endif // Field amplitudes on the observer grid // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb] - _Q1[0]; // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb]; if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; } #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = _local_box_idx * interp_nodes_per_box + _obs_idx; #endif } #endif __global__ void nufft_interp_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; unsigned int const total_num_boxes = nufft_const->total_num_boxes; unsigned int const total_num_boxes_dev = nufft_const->total_num_boxes_dev; int const obs_size_dev = nufft_const->obs_size_dev; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const obs_domain_range = nufft_const->obs_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_obs_coord[BLOCK_SIZE_PROJ_INTERP * 3]; __shared__ FP_TYPE s_obs_amp[BLOCK_SIZE_PROJ_INTERP*FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = 0.0f; FP_TYPE _r_norm[3]; int _glb_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _box_origin[3]; int _shared_start2; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + obs_domain_range[m]; } idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); // Field amplitudes on the observer grid, with inaccurate near field subtracted for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_obs_amp[_tidx+j*BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); // Number of observers in the current box _num_src2 = nufft_array->d_obs_box_map[_box_idx + 2 * total_num_boxes]; _glb_src_idx_start2 = nufft_array->d_obs_box_map[_box_idx]; _shared_start2 = 0; while (_shared_start2 < _num_src2) // loop around all sources { /////////////////////////////////////////////////////////////////////////////////////// // Doing interpolations from grid to actual observers /////////////////////////////////////////////////////////////////////////////////////// // // Calculating far field, interpolation from the grid points to the observers for(unsigned int j = 0; j < FIELD_DIM; j++ ) _Q1[j] = 0.0f; if (_shared_start2 + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // Unnormalized coordinates of observers, used by near field calculation s_obs_coord[_tidx + m * _bdim] = nufft_array->d_obs_coord[_glb_src_idx_start2 + _shared_start2 + _obs_idx + obs_size_dev * m]; // Normalized coordinates of observers, used by far field interpolation _r_norm[m] = (s_obs_coord[_tidx + m * _bdim] - _box_origin[m]) / box_size[m]; } // m } // lagrange_interp_cubic_vector(_Q1, _r_norm, s_obs_amp + _shared_offset, BLOCK_SIZE_PROJ_INTERP); if (_shared_start2 + _obs_idx < _num_src2) { for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_field_amp[_glb_src_idx_start2 + _shared_start2 + _obs_idx + j*obs_size_dev] = _Q1[j]; } __syncthreads(); _shared_start2 += interp_nodes_per_box; } // shared_start2 #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; _d_test[_gid] =s_obs_amp[_tidx]; #endif } __global__ void nufft_interp_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; unsigned int const total_num_boxes = nufft_const->total_num_boxes; unsigned int const total_num_boxes_dev = nufft_const->total_num_boxes_dev; // int const problem_size = nufft_const->problem_size; // int const src_size_dev = nufft_const->src_size_dev; int const obs_size_dev = nufft_const->obs_size_dev; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const obs_domain_range = nufft_const->obs_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_obs_coord[BLOCK_SIZE_PROJ_INTERP * 3]; __shared__ FP_TYPE s_obs_amp[BLOCK_SIZE_PROJ_INTERP * FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++ ) _Q1[j] = 0.0f; FP_TYPE _r_norm[3]; int _glb_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _box_origin[3]; int _shared_start2; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + obs_domain_range[m]; } idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); // Field amplitudes on the observer grid for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_obs_amp[_tidx+j*BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// // The following is a combined loop to get far field // Sources: far-field grid // Observers: actual observers // Operations: Lagrange interpolation /////////////////////////////////////////////////////////////////////////////////////// // Number of observers in the current box _num_src2 = nufft_array->d_obs_box_map[_box_idx + 2 * total_num_boxes]; _glb_src_idx_start2 = nufft_array->d_obs_box_map[_box_idx]; _shared_start2 = 0; while (_shared_start2 < _num_src2) { /////////////////////////////////////////////////////////////////////////////////////// // Doing interpolations from grid to actual observers /////////////////////////////////////////////////////////////////////////////////////// // Calculating far field, interpolation from the grid points to the observers for(unsigned int j = 0; j < FIELD_DIM; j++ ) _Q1[j] = 0.0f; if (_shared_start2 + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // Unnormalized coordinates of observers, used by near field calculation s_obs_coord[_tidx + m * _bdim] = nufft_array->d_obs_coord[_glb_src_idx_start2 + _shared_start2 + _obs_idx + obs_size_dev * m]; // Normalized coordinates of observers, used by far field interpolation _r_norm[m] = (s_obs_coord[_tidx + m * _bdim] - _box_origin[m]) / box_size[m]; } } lagrange_interp_linear_vector(_Q1, _r_norm, s_obs_amp + _shared_offset, BLOCK_SIZE_PROJ_INTERP); if (_shared_start2 + _obs_idx < _num_src2) { for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_field_amp[_glb_src_idx_start2 + _shared_start2 + _obs_idx + j*obs_size_dev] = _Q1[j]; } __syncthreads(); _shared_start2 += interp_nodes_per_box; } // shared_start2 #ifdef BEN_DEBUG_MULTI //_d_test[_gid] = _Q1[0]; #endif #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = _Q1[0]; #endif } //BEN ELIMINATED THE D_NEAR_BOX_LIST IN THIS SUBROUTINE //THE MULTIPROCESSOR OCCUPANCY LIMITATION IS REGISTER=40 -> 75% OCCUPANCY. REGISTER = 32 WILL LEAD TO 100% OCCUPANCY __global__ void nufft_exact_near_field(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _box_idx; int const _block_size = BLOCK_SIZE_NEAR; int const src_size_dev = nufft_const->src_size_dev; int const obs_size_dev = src_size_dev; // int const obs_size_dev = nufft_const->obs_size_dev; FP_TYPE const epsilon = nufft_const->epsilon; _local_box_idx = _bid; if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_coord[BLOCK_SIZE_NEAR * 3]; __shared__ FP_TYPE s_obs_coord[BLOCK_SIZE_NEAR * 3]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_NEAR * FIELD_DIM]; __shared__ int s_near_box_list[BLOCK_SIZE_NEAR]; //Stores near field on observer FP_TYPE _Q2[FIELD_DIM]; for(unsigned int j = 0; j < FIELD_DIM; j++) _Q2[j] = 0.0f; FP_TYPE _r1[3]; int _glb_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _magn[FIELD_DIM]; int _shared_start2 = 0; //BEN ADDED THESE TWO PARAMETERS FOR NEAR BOX LIST unsigned int const near_correct_layer = nufft_const->near_correct_layer; int _total_num_near_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); int _near_box_idx = 0; //int _total_near_box = nufft_array->d_near_box_list[_local_box_idx * total_num_near_box_per_box_p1]; int _near_box_counter = 0; int _g_addr_start = 0; int _s_addr_start = 0; int _s_addr_end = 0; int _near_src = 0; bool _fetch; ///////////////////////////////////////////////////////////////////////////////////// // The following is a loop to get accurate near field on observers // Sources: sources in near-field boxes // Observers: actual observers // Pay attention that the sources in either source box or observer box or both // can be greater than number of thread per box /////////////////////////////////////////////////////////////////////////////////////// // Number of observers in the current box _num_src2 = nufft_array->d_obs_box_map[_box_idx + 2 * nufft_const->total_num_boxes]; _glb_src_idx_start2 = nufft_array->d_obs_box_map[_box_idx]; while (_shared_start2 < _num_src2)//if shared start is smaller than the num of srcs inside current box //empty src box will stop working here { #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif for(unsigned int j = 0; j < FIELD_DIM; j++) _Q2[j] = 0.0f; if (_shared_start2 + _tidx < _num_src2) { for (int m = 0; m < 3; m++) { // Unnormalized coordinates of observers, used by near field calculation s_obs_coord[_tidx + m * _bdim] = nufft_array->d_obs_coord[_glb_src_idx_start2 + _shared_start2 + _tidx + obs_size_dev * m]; } // m } // int s_near_list_start = 0; /////////////////////////////////////////////////////////////////////////////////// // Doing direct summation to get accurate near field ///////////////////////////////////////////////////////////////////////////////////// while( s_near_list_start < _total_num_near_box ){ if( s_near_list_start + _tidx < _total_num_near_box ){ _near_box_idx = near_to_glb_idx(s_near_list_start + _tidx, _box_idx, near_correct_layer, nufft_const->num_boxes);//if out of computational domain, return -1 if( _near_box_idx < 0 ) s_near_box_list[_tidx] = -1; else{ if( nufft_array->d_obs_box_map[_near_box_idx + 2 * nufft_const->total_num_boxes] > 0)//this function has not been tested s_near_box_list[_tidx] = _near_box_idx; else s_near_box_list[_tidx] = -1; } } else{ // because this is checked inside the inner while loop s_near_box_list[_tidx] = -1; } __syncthreads(); _near_box_counter = -1; _g_addr_start = 0; _s_addr_start = 0; _s_addr_end = 0; _fetch = false; // The grand loop while (_s_addr_end >= 0)//fetch all near srcs { __syncthreads(); if (_tidx < _s_addr_end) { for (int m = 0; m < 3; m++) { s_src_coord[_tidx + m * _block_size] = nufft_array->d_obs_coord[_g_addr_start + _tidx + src_size_dev * m]; } // m for(unsigned int j = 0; j < FIELD_DIM; j++) s_src_amp[_tidx+j*BLOCK_SIZE_NEAR] = nufft_array->d_src_amp[_g_addr_start + _tidx + j*src_size_dev]; } #ifdef _GPU_D_TEST for (int i = _s_addr_start; i < _block_size && i < _s_addr_end; i++) { _d_test[_gid] += s_src_amp[i]; } #endif // ready to move on to next near box? //NEED MORE CONSIDERATION IF _near_box_counter needs the last two conditions!!!//////////////////////s_near_list_start + _tidx < _total_num_near_box if (_s_addr_end - _block_size < 0 && _near_box_counter + 1 < _block_size && _near_box_counter+1 <= _total_num_near_box-s_near_list_start)//_near_box_counter + 1 < _total_num_near_box && { _fetch = true; } while (_fetch)// until shared mem is full or run out of near box { // fetch new boxes until the shared memory is full // update status variables _near_box_counter++;//move on to next near box //_near_box_idx = nufft_array->d_near_box_list[_near_box_counter + 1 + _local_box_idx * total_num_near_box_per_box_p1]; _near_box_idx = s_near_box_list[_near_box_counter]; if( _near_box_counter + 1 >= _block_size || _near_box_counter + 1 == _total_num_near_box-s_near_list_start)//_near_box_counter+1 >= _total_num_near_box || SHOULD CHECK IT IS LARGER THAN BLOCK SIZE?? _fetch = false; if(_near_box_idx < 0) continue; //_near_src: total num of srcs in the near box _near_src = nufft_array->d_obs_box_map[_near_box_idx + nufft_const->total_num_boxes * 2]; //_g_addr_start: start index of srcs in the near box _g_addr_start = nufft_array->d_obs_box_map[_near_box_idx]; //record the start and end index in the context of shared memory _s_addr_start = _s_addr_end; _s_addr_end = _s_addr_end + _near_src; // Shared memory is full if (_s_addr_end >= _block_size)// || _near_box_counter + 1 >= _total_num_near_box { _fetch = false; } if (_tidx >= _s_addr_start && _tidx < _s_addr_end) { for (int m = 0; m < 3; m++) { s_src_coord[_tidx + m * _block_size] = nufft_array->d_obs_coord[_g_addr_start + _tidx - _s_addr_start + src_size_dev * m]; } // m for(unsigned int j = 0; j < FIELD_DIM; j++) s_src_amp[_tidx+j*BLOCK_SIZE_NEAR] = nufft_array->d_src_amp[_g_addr_start - _s_addr_start + _tidx + j*src_size_dev]; } } // fetch __syncthreads(); // do calculations for (int i = 0; i < _block_size && i < _s_addr_end; i++) { for (int m = 0; m < 3; m++) { _r1[m] = s_obs_coord[_tidx + m * _block_size] - s_src_coord[i + m * _block_size]; } // m for(unsigned int j = 0; j < FIELD_DIM; j++) _magn[j] = s_src_amp[i+j*BLOCK_SIZE_NEAR]; get_field_static_vector(_r1, _magn, _Q2, epsilon); } // i _s_addr_end -= _block_size; _g_addr_start += _near_src - _s_addr_end > _block_size ? _block_size : _near_src - _s_addr_end; } // _loop to fetch more srcs into shared memory //#ifdef BEN_DEBUG // _d_test[_gid] = _total_num_near_box-s_near_list_start; //#endif s_near_list_start += BLOCK_SIZE_NEAR; }//while loop to fetch more near boxes if (_shared_start2 + _tidx < _num_src2) { for(unsigned int j = 0; j < FIELD_DIM; j++) nufft_array->d_field_amp[_glb_src_idx_start2 + _shared_start2 + _tidx + j*obs_size_dev] += _Q2[j]; } // _loop to fetch more obs into shared memory __syncthreads(); _shared_start2 += _block_size; } // shared_start2 } }
c8f90b64d1e263aed6cf76ba0827f8fbbce147da.cu
/* * Copyright 2007-2012 Computational Electromagnetic Group (CEM), Dept. ECE, UC San Diego. All rights reserved. * Author: Shaojing Li, March 2012 */ /* * nufft_static_vector_gpu_kernel.cu: kernels used by NufftStaticVectorGpu */ #include "interp_kernel.h" #include "nufft_static_vector_gpu_kernel.h" //#define BEN_DEBUG //#define BEN_DEBUG_MULTI //#define BEN_NEW_METHOD //#define BEN_DEBUG_FFT namespace NBODYFAST_NS{ /* * most kernels have a cubic verison and a linear version, corresponding to two different interpolation scheme */ __global__ void nufft_project_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; // thread index within a block unsigned int _bdim = blockDim.x; // number of threads per block unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; // block index (the grid might be 2D) unsigned int _local_box_idx; // local box index (index of the box among all boxes on the same device) unsigned int _box_idx; // global box index (index of the box as in the entire computational domain) unsigned int _box_idx_dim[3]; // 3D index of boxes (used to determine the location of box) unsigned int _box_sub_idx = 0; // sub index of a box (used when a box is treated by multiple blocks) unsigned int _obs_idx; // observer index (this is the index of grid points in this kernel) unsigned int _obs_idx_dim[3]; // 3D index of observers (used to determin the coordinates of each grid points) __shared__ FP_TYPE s_src_coord[BLOCK_SIZE_PROJ_INTERP * 3]; // shared memory array to store the source coordinates __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_PROJ_INTERP * FIELD_DIM]; //shared memory array to store the source amplitudes // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; int const src_size_dev = nufft_const->src_size_dev; // number of sources on this device (so the length of source arrays) int * const num_boxes = nufft_const->num_boxes; // number of total boxes (across the entire domain) FP_TYPE * const box_size = nufft_const->box_size; // as it is shown...the box size FP_TYPE * const src_domain_range = nufft_const->src_domain_range; // computational domain boundaries and sizes FP_TYPE _P[FIELD_DIM]; // used to store final fields. declared as an array as it might have more than 1 components for vector fields for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; int _global_src_idx_start2 = 0; // the offset of source coord/amp in the global source arrays int _num_src2 = 0; // number of sources in the current box FP_TYPE _box_origin[3]; // the front-top-left corner of the current box FP_TYPE _interp_coeff; // interpolation coefficients FP_TYPE _r_norm[3]; // normalized coordinates of a source within a box. used to do the Lagrange interpolation int _shared_start = 0; // a temporary variable for serializing tasks while number of threads is less than number of sources int _shared_offset; // indicate the range of shared memory for current box (when multiple boxes are handled by the same block) if (param->num_blk_per_box > 0) // number of block per box is greater than or equal to 1 { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; _shared_offset = 0; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) // number of boxes per block is greater than 1 { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_array->d_src_box_list_inv[0]) return; // if _local_box_idx is greater than total number of boxes the current device should process, then just terminate the current thread _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; // otherwise, get the global box idx from d_src_box_list if (_box_idx >= nufft_const->total_num_boxes) return; // I think this is an unnecessary check idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); // get 3D observer index from 1D observer index // get 3D box index from 1D box index _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; // calculate the position of the box } // The starting index of sources in the current box _global_src_idx_start2 = nufft_array->d_src_box_map[_box_idx]; // Number of sources in the current box _num_src2 = nufft_array->d_src_box_map[_box_idx + nufft_const->total_num_boxes * 2]; while (_shared_start < _num_src2) { // Load/Calculate the normalized coordinates and amplitudes of sources to shared memory if (_shared_start + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // s_src_coord stores the coordinates that have been normalized to [0, 1]; s_src_coord[_tidx + m * _bdim] = (nufft_array->d_src_coord[_global_src_idx_start2 + (_shared_start + _obs_idx) + src_size_dev * m] - _box_origin[m]) / box_size[m]; } for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx + j * BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_src_amp[_global_src_idx_start2 + _shared_start + _obs_idx + j * src_size_dev]; } __syncthreads(); // Loop around current piece of source (not more than number of threads per block) // From source to source grid for (int i = 0; (i < interp_nodes_per_box) && (i + _shared_start < _num_src2); i++) { for (int m = 0; m < 3; m++) { _r_norm[m] = s_src_coord[i + _shared_offset + m * _bdim]; } lagrange_project_cubic(_interp_coeff, _r_norm, _obs_idx_dim); for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += _interp_coeff * s_src_amp[i + _shared_offset + j * BLOCK_SIZE_PROJ_INTERP]; } // i __syncthreads(); _shared_start += interp_nodes_per_box; } // _shared_start for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_src_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j * nufft_const->total_num_boxes_dev * interp_nodes_per_box] = _P[j]; #ifdef _GPU_D_TEST _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] = 0.00f; #endif #ifdef _GPU_D_TEST _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] += _P[0]; // * _bdim;//_interp_coeff; #endif } __global__ void nufft_project_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _local_box_idx; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; __shared__ FP_TYPE s_src_coord[BLOCK_SIZE_PROJ_INTERP * 3]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_PROJ_INTERP * FIELD_DIM]; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; int const src_size_dev = nufft_const->src_size_dev; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; FP_TYPE _P[FIELD_DIM]; for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; int _global_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _box_origin[3]; FP_TYPE _interp_coeff; FP_TYPE _r_norm[3]; int _shared_start = 0; int _shared_offset; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; _shared_offset = 0; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_array->d_src_box_list_inv[0]) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; if (_box_idx >= nufft_const->total_num_boxes) return; idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } // The starting index of sources in the current box _global_src_idx_start2 = nufft_array->d_src_box_map[_box_idx]; // Number of sources in the current box _num_src2 = nufft_array->d_src_box_map[_box_idx + nufft_const->total_num_boxes * 2]; while (_shared_start < _num_src2) { // Load/Calculate the normalized coordinates and amplitudes of sources to shared memory if (_shared_start + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // s_src_coord stores the coordinates that have been normalized to [0, 1]; s_src_coord[_tidx + m * _bdim] = (nufft_array->d_src_coord[_global_src_idx_start2 + (_shared_start + _obs_idx) + src_size_dev * m] - _box_origin[m]) / box_size[m]; } for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx + j * BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_src_amp[_global_src_idx_start2 + _shared_start + _obs_idx + j * src_size_dev]; } __syncthreads(); // Loop around current piece of source (not more than number of threads per block) // From source to source grid for (int i = 0; (i < interp_nodes_per_box) && (i + _shared_start < _num_src2); i++) { for (int m = 0; m < 3; m++) { _r_norm[m] = s_src_coord[i + _shared_offset + m * _bdim]; } lagrange_project_linear(_interp_coeff, _r_norm, _obs_idx_dim); for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += _interp_coeff * s_src_amp[i + _shared_offset + j * BLOCK_SIZE_PROJ_INTERP]; } // i __syncthreads(); _shared_start += interp_nodes_per_box; } // _shared_start for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_src_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j * nufft_const->total_num_boxes_dev * interp_nodes_per_box] = _P[j]; #ifdef _GPU_D_TEST _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] = 9.99f; _d_test[_bid * BLOCK_SIZE_PROJ_INTERP + _tidx] = _P[0]; #endif } __global__ void nufft_fft_prep_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { const unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; // global thread index const unsigned int interp_nodes_per_box = 64; FP_TYPE _P[FIELD_DIM]; for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; if (_gid >= nufft_const->total_num_grid_pts) return; // terminate extra threads launched int _grid_idx_dim[3]; // 3D global grid point index int _local_idx_dim[6]; // 3D local grid point index (for each grid point, there could be two different boxes contributing to the field along each dimension) int _box_idx_dim[6]; // for each grid point, there could be two boxes contributing to it along each dimension int _box_idx[9]; // _box_idx[8] stores number of boxes contributing to the current grid points. _box_idx[0~7] stores the contributing box index int _local_idx[9]; // _box_idx[0~7] stores the contributing index of the grid points of the contributing box for (int i = 0; i < 9; i++) { _box_idx[i] = -1; _local_idx[i] = -1; } // 3D index of current grid points in the entire projection grid _grid_idx_dim[2] = _gid / (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1]); _grid_idx_dim[1] = (_gid - _grid_idx_dim[2] * (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1])) / nufft_const->num_grid_pts[0]; _grid_idx_dim[0] = _gid % nufft_const->num_grid_pts[0]; // the position of current grid points in the FFT grid (FFT grid is padded so is larger than the projection grid) int _cufft_in_addr = _grid_idx_dim[2] * nufft_const->fft_size[0] * nufft_const->fft_size[1] + _grid_idx_dim[1] * nufft_const->fft_size[0] + _grid_idx_dim[0]; int _u_src_grid_addr = 0; grid_idx_to_local_idx_cubic(_grid_idx_dim, _box_idx_dim, _local_idx_dim, nufft_const->num_boxes); // get local index int _dim_idx[3]; int _cnt1 = 0; int _box_idx_temp; // there will be 8 possible boxes that overlaps at the current grid points // however, there might be less if the current grid points is not at the corner of a box, or the box is at the corner, on the edge or surface of the entire computational domain. // the following triple loops judges how many boxes are valid and really contributing to the current grid point for (_dim_idx[0] = 0; _dim_idx[0] < 2; _dim_idx[0]++) for (_dim_idx[1] = 0; _dim_idx[1] < 2; _dim_idx[1]++) for (_dim_idx[2] = 0; _dim_idx[2] < 2; _dim_idx[2]++) { if (_box_idx_dim[_dim_idx[0]] >= 0 && _box_idx_dim[2 + _dim_idx[1]] >= 0 && _box_idx_dim[4 + _dim_idx[2]] >= 0) { _box_idx_temp = _box_idx_dim[_dim_idx[0]] + _box_idx_dim[2 + _dim_idx[1]] * nufft_const->num_boxes[0] + _box_idx_dim[4 + _dim_idx[2]] * nufft_const->num_boxes[0] * nufft_const->num_boxes[1]; _box_idx[_cnt1] = _box_idx_temp; int _local_idx_dim_temp[3]; _local_idx_dim_temp[0] = _local_idx_dim[_dim_idx[0]]; _local_idx_dim_temp[1] = _local_idx_dim[2 + _dim_idx[1]]; _local_idx_dim_temp[2] = _local_idx_dim[4 + _dim_idx[2]]; local_idx_dim_to_local_idx_cubic(&_local_idx[_cnt1], _local_idx_dim_temp); _cnt1++; } } _box_idx[8] = _cnt1; #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif // add the projected amplitudes of all overlapping grid points together for (int i = 0; i < _box_idx[8]; i++) { _u_src_grid_addr = _box_idx[i] * interp_nodes_per_box + _local_idx[i]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += nufft_array->d_u_src_grid[_u_src_grid_addr + j * nufft_const->total_num_boxes * interp_nodes_per_box]; #ifdef _GPU_D_TEST _d_test[_gid] = size_t(_u_src_grid_addr); #endif } for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_fft_inplace_r2c_FP[_cufft_in_addr + j*nufft_const->total_num_fft_pts] = _P[j]; } ///FFT CHANGE __global__ void nufft_fft_prep_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { const unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; const unsigned int interp_nodes_per_box = 8; FP_TYPE _P[FIELD_DIM]; for(unsigned int i = 0; i < FIELD_DIM; i++ ) _P[i] = 0.0f; if (_gid >= nufft_const->total_num_grid_pts) return; int _grid_idx_dim[3]; int _local_idx_dim[6]; int _box_idx_dim[6]; int _box_idx[9]; int _local_idx[9]; for (int i = 0; i < 9; i++) { _box_idx[i] = -1; _local_idx[i] = -1; } _grid_idx_dim[2] = _gid / (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1]); _grid_idx_dim[1] = (_gid - _grid_idx_dim[2] * (nufft_const->num_grid_pts[0] * nufft_const->num_grid_pts[1])) / nufft_const->num_grid_pts[0]; _grid_idx_dim[0] = _gid % nufft_const->num_grid_pts[0]; int _cufft_in_addr = _grid_idx_dim[2] * nufft_const->fft_size[0] * nufft_const->fft_size[1] + _grid_idx_dim[1] * nufft_const->fft_size[0] + _grid_idx_dim[0]; int _u_src_grid_addr = 0; grid_idx_to_local_idx_linear(_grid_idx_dim, _box_idx_dim, _local_idx_dim, nufft_const->num_boxes); int _dim_idx[3]; int _cnt1 = 0; int _box_idx_temp; for (_dim_idx[0] = 0; _dim_idx[0] < 2; _dim_idx[0]++) for (_dim_idx[1] = 0; _dim_idx[1] < 2; _dim_idx[1]++) for (_dim_idx[2] = 0; _dim_idx[2] < 2; _dim_idx[2]++) { if (_box_idx_dim[_dim_idx[0]] >= 0 && _box_idx_dim[2 + _dim_idx[1]] >= 0 && _box_idx_dim[4 + _dim_idx[2]] >= 0) { _box_idx_temp = _box_idx_dim[_dim_idx[0]] + _box_idx_dim[2 + _dim_idx[1]] * nufft_const->num_boxes[0] + _box_idx_dim[4 + _dim_idx[2]] * nufft_const->num_boxes[0] * nufft_const->num_boxes[1]; _box_idx[_cnt1] = _box_idx_temp; int _local_idx_dim_temp[3]; _local_idx_dim_temp[0] = _local_idx_dim[_dim_idx[0]]; _local_idx_dim_temp[1] = _local_idx_dim[2 + _dim_idx[1]]; _local_idx_dim_temp[2] = _local_idx_dim[4 + _dim_idx[2]]; local_idx_dim_to_local_idx_linear(&_local_idx[_cnt1], _local_idx_dim_temp); _cnt1++; } } _box_idx[8] = _cnt1; for (int i = 0; i < _box_idx[8]; i++) { _u_src_grid_addr = _box_idx[i] * interp_nodes_per_box + _local_idx[i]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) _P[j] += nufft_array->d_u_src_grid[_u_src_grid_addr + j * nufft_const->total_num_boxes * interp_nodes_per_box]; } for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_fft_inplace_r2c_FP[_cufft_in_addr + j*nufft_const->total_num_fft_pts] = _P[j]; #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; // _d_test[_gid] = nufft_array->d_u_src_grid[_gid]; #endif } //BEN ELIMINATED THE d_fft_inplace_b __global__ void nufft_convolution_static_vector(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // convolution is simple. just multiply the transformed matrix and impedance matrix, entry-by-entry const unsigned int _tid = threadIdx.x; const unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; //const unsigned int S_GREEN_SIZE = __shared__ CUFFT_COMPLEX_TYPE s_u_src_grid_k[FIELD_DIM * BLOCK_SIZE_CONV]; __shared__ FP_TYPE s_g_grid_k[ FIELD_DIM*(FIELD_DIM+1)/2 * BLOCK_SIZE_CONV];//IMP MAT CHANGE!!!SHOULD BE LESS!!!!IF THIS SUBROUTINE IS LIMITED BY SHARED MEMORY, THEN //WE SHOULD SIMPLY NOT USE SHARED MEMORY HERE if (_gid >= (nufft_const->total_num_fft_r2c_pts)) return; int i = _gid % nufft_const->fft_r2c_size[0]; int k = _gid / (nufft_const->fft_r2c_size[0]*nufft_const->fft_r2c_size[1]); int j = (_gid - k*nufft_const->fft_r2c_size[0]*nufft_const->fft_r2c_size[1])/nufft_const->fft_r2c_size[0]; int flag_y = 1; int flag_z = 1; if( j >= nufft_const->green_size[1] ) { j = nufft_const->fft_size[1] - j; flag_y = -1;} if( k >= nufft_const->green_size[2]) { k = nufft_const->fft_size[2] - k; flag_z = -1;} int _green_id = i + j*nufft_const->green_size[0] + k*nufft_const->green_size[0]*nufft_const->green_size[1]; for(unsigned int j = 0; j < FIELD_DIM; j++) s_u_src_grid_k[_tid+j*BLOCK_SIZE_CONV] = nufft_array->d_fft_inplace_r2c[_gid+j*nufft_const->total_num_fft_r2c_pts]; //the Green's func assignment in shared memory is not compatible to FIELD_DIM != 3 s_g_grid_k[_tid ] = nufft_array->d_k_imp_mat_data_gpu[_green_id]; //xx s_g_grid_k[_tid+ BLOCK_SIZE_CONV] = flag_y * nufft_array->d_k_imp_mat_data_gpu[_green_id + nufft_const->total_num_green_pts]; //xy s_g_grid_k[_tid+2*BLOCK_SIZE_CONV] = flag_z * nufft_array->d_k_imp_mat_data_gpu[_green_id + 2*nufft_const->total_num_green_pts]; //xz s_g_grid_k[_tid+3*BLOCK_SIZE_CONV] = nufft_array->d_k_imp_mat_data_gpu[_green_id + 3*nufft_const->total_num_green_pts]; //yy s_g_grid_k[_tid+4*BLOCK_SIZE_CONV] = flag_y*flag_z * nufft_array->d_k_imp_mat_data_gpu[_green_id + 4*nufft_const->total_num_green_pts]; //yz s_g_grid_k[_tid+5*BLOCK_SIZE_CONV] = nufft_array->d_k_imp_mat_data_gpu[_green_id + 5*nufft_const->total_num_green_pts]; //zz FP_TYPE real0 = s_u_src_grid_k[_tid].x*s_g_grid_k[_tid] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV]; FP_TYPE img0 = s_u_src_grid_k[_tid].y*s_g_grid_k[_tid] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV]; FP_TYPE real1 = s_u_src_grid_k[_tid].x*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+3*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV]; FP_TYPE img1 = s_u_src_grid_k[_tid].y*s_g_grid_k[_tid+BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+3*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV]; FP_TYPE real2 = s_u_src_grid_k[_tid].x*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].x*s_g_grid_k[_tid+5*BLOCK_SIZE_CONV]; FP_TYPE img2 = s_u_src_grid_k[_tid].y*s_g_grid_k[_tid+2*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+4*BLOCK_SIZE_CONV] + s_u_src_grid_k[_tid+2*BLOCK_SIZE_CONV].y*s_g_grid_k[_tid+5*BLOCK_SIZE_CONV]; //the division here can be put into the Greens'func preprocessing nufft_array->d_fft_inplace_r2c[_gid].x = real0/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid].y = img0/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+nufft_const->total_num_fft_r2c_pts].x = real1/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+nufft_const->total_num_fft_r2c_pts].y = img1/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+2*nufft_const->total_num_fft_r2c_pts].x = real2/FP_TYPE(nufft_const->total_num_fft_pts); nufft_array->d_fft_inplace_r2c[_gid+2*nufft_const->total_num_fft_r2c_pts].y = img2/FP_TYPE(nufft_const->total_num_fft_pts); #ifdef BEN_DEBUG_FFT _d_test[_gid] = nufft_const->green_size[0]*1000+nufft_const->fft_size[0];//s_g_grid_k[_tid]; #endif #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = nufft_array->d_fft_inplace_b[_gid].x; #endif } //BEN ELIMINATED THE d_fft_inplace_b __global__ void nufft_fft_postp_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // post processing does opposite thing as the pre processing unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; const unsigned int interp_nodes_per_box = 64; if (param->num_blk_per_box > 0) { _box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) { _box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_box_idx >= nufft_const->total_num_boxes) return; // Get the Index number of the observer box _box_idx_dim[2] = _box_idx / (nufft_const->num_boxes[0] * nufft_const->num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (nufft_const->num_boxes[0] * nufft_const->num_boxes[1])) / nufft_const->num_boxes[0]; _box_idx_dim[0] = _box_idx % nufft_const->num_boxes[0]; // Get the global index number of the grid point unsigned int obs_idx_glb; idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); obs_idx_dim_to_obs_idx_glb_cubic(obs_idx_glb, _box_idx_dim, _obs_idx_dim, nufft_const->fft_size); /*nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_b[obs_idx_glb].x;*/ //nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_f[obs_idx_glb].x; for(unsigned int j = 0; j < FIELD_DIM; j++) nufft_array->d_u_obs_grid[_gid+j*nufft_const->total_num_boxes*interp_nodes_per_box] = nufft_array->d_fft_inplace_r2c_FP[obs_idx_glb+j*nufft_const->total_num_fft_pts]; #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = nufft_array->d_u_obs_grid[_gid]; #endif } //BEN ELIMINATED THE d_fft_inplace_b __global__ void nufft_fft_postp_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; const unsigned int interp_nodes_per_box = 8; if (param->num_blk_per_box > 0) { _box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; } if (param->num_box_per_blk > 0) { _box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_box_idx >= nufft_const->total_num_boxes) return; // Get the Index number of the observer box _box_idx_dim[2] = _box_idx / (nufft_const->num_boxes[0] * nufft_const->num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (nufft_const->num_boxes[0] * nufft_const->num_boxes[1])) / nufft_const->num_boxes[0]; _box_idx_dim[0] = _box_idx % nufft_const->num_boxes[0]; // Get the global index number of the grid point unsigned int obs_idx_glb; idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); obs_idx_dim_to_obs_idx_glb_linear(obs_idx_glb, _box_idx_dim, _obs_idx_dim, nufft_const->fft_size); //nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_b[obs_idx_glb].x; //nufft_array->d_u_obs_grid[_gid] = nufft_array->d_fft_inplace_f[obs_idx_glb].x; for(unsigned int j = 0; j < FIELD_DIM; j++) nufft_array->d_u_obs_grid[_gid+j*nufft_const->total_num_boxes*interp_nodes_per_box] = nufft_array->d_fft_inplace_r2c_FP[obs_idx_glb+j*nufft_const->total_num_fft_pts]; #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = nufft_array->d_u_obs_grid[_gid]; #endif } //!!!!!!!!!!!!!NEAR CHANGE!!!!!!!!!!!!! #ifndef BEN_NEW_METHOD __global__ void nufft_correct_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // nufft_correct_static_vector_cubic calculate the inaccurate field generated by near field sources again and subtract them from the total field // we don't have to project the amplitudes from source to source grid again since we already have them stored in the array d_u_src_grid_dev unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_CORRECT*FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int i = 0; i < FIELD_DIM; i++) _Q1[i] = 0.0f; FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif // Ben new bound method //for (_near_box_idx_dim[2] = nufft_array->d_near_bound_list[6*_box_idx+2]; _near_box_idx_dim[2] <= nufft_array->d_near_bound_list[6*_box_idx+5]; _near_box_idx_dim[2]++){ // for(_near_box_idx_dim[1] = nufft_array->d_near_bound_list[6*_box_idx+1]; _near_box_idx_dim[1] <= nufft_array->d_near_bound_list[6*_box_idx+4]; _near_box_idx_dim[1]++){ // for(_near_box_idx_dim[0] = nufft_array->d_near_bound_list[6*_box_idx+0]; _near_box_idx_dim[0] <= nufft_array->d_near_bound_list[6*_box_idx+3]; _near_box_idx_dim[0]++){ // // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // // the total number of near boxes. // _near_box_idx = _near_box_idx_dim[0] + _near_box_idx_dim[1]*num_boxes[0] + _near_box_idx_dim[2]*num_boxes[0]*num_boxes[1];//near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); // // Current near box origins // for (int m = 0; m < 3; m++) // _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; // __syncthreads(); // _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; // s_src_amp[_tidx] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx]; // __syncthreads(); // /////////////////////////////////////////////////////////////////////////////////////// // //// Get the inaccurate near field. // //// Source: near-list box grid points // //// Observer: actual box grid points // //// Interaction: direct Green's function // ///////////////////////////////////////////////////////////////////////////////////////// // for (int m = 0; m < 3; m++) // { // // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. // _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; // } // // cubic : 28% of computational time of this subroutine goes here // direct_grid_interact_cubic_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon); // //__syncthreads(); // } // } //}// _near_box_counter // Loop around all near boxes, presumbly 27 !!!!CHANGED BY BEN for (int _near_box_counter = 0; _near_box_counter < total_num_near_box_per_box; _near_box_counter++) { // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // the total number of near boxes. _near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 || nufft_array->d_src_box_map[_near_box_idx + nufft_const->total_num_boxes * 2] == 0) continue; // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } __syncthreads(); _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// //// Get the inaccurate near field. //// Source: near-list box grid points //// Observer: actual box grid points //// Interaction: direct Green's function ///////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // cubic : 28% of computational time of this subroutine goes here direct_grid_interact_cubic_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon, BLOCK_SIZE_CORRECT); //__syncthreads(); #ifdef _GPU_D_TEST _d_test[_gid] += _Q1[0]; #endif } // _near_box_counter __syncthreads(); // Field amplitudes on the observer grid // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb] - _Q1[0]; // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; #ifdef BEN_DEBUG_MULTI //_d_test[_gid] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx]; #endif } #else __global__ void nufft_correct_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { // nufft_correct_static_vector_cubic calculate the inaccurate field generated by near field sources again and subtract them from the total field // we don't have to project the amplitudes from source to source grid again since we already have them stored in the array d_u_src_grid_dev unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; _local_box_idx = _bid; _obs_idx = _tidx % interp_nodes_per_box; //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[64]; __shared__ int s_near_box_list[BLOCK_SIZE_CORRECT]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = FP_TYPE(0.0f); FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif int near_box_left = total_num_near_box_per_box; int s_near_list_start = 0; #ifdef BEN_DEBUG int tmp_cnt = 0; #endif while( near_box_left > 0 ){ if( _tidx < near_box_left ){ s_near_box_list[_tidx] = near_to_glb_idx(s_near_list_start + _tidx, _bid, near_correct_layer, num_boxes);//_bid can be replaced by _box_idx_dim, because it should be only computed once } else{ // IS IT NECESSARY??? s_near_box_list[_tidx] = -1; } __syncthreads(); // Loop around all near boxes, presumbly 27 !!!!CHANGED BY BEN for (int _near_box_counter = 0; _near_box_counter < near_box_left && _near_box_counter < BLOCK_SIZE_CORRECT; _near_box_counter++) { // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // the total number of near boxes. _near_box_idx = s_near_box_list[_near_box_counter]; //_near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 ) continue; #ifdef BEN_DEBUG tmp_cnt++; #endif // Current near box index number _near_box_idx_dim[2] = _near_box_idx / (num_boxes[0] * num_boxes[1]); _near_box_idx_dim[1] = (_near_box_idx - _near_box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _near_box_idx_dim[0] = _near_box_idx % num_boxes[0]; // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// //// Get the inaccurate near field. //// Source: near-list box grid points //// Observer: actual box grid points //// Interaction: direct Green's function ///////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // cubic : 28% of computational time of this subroutine goes here direct_grid_interact_cubic_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon, BLOCK_SIZE_CORRECT); __syncthreads(); #ifdef _GPU_D_TEST _d_test[_gid] += _Q1[0]; #endif } // _near_box_counter near_box_left -= BLOCK_SIZE_CORRECT; s_near_list_start += BLOCK_SIZE_CORRECT; }//while(near_box_left > 0) __syncthreads(); #ifdef BEN_DEBUG //if( s_near_list_start != 0 ) _d_test[_gid] = tmp_cnt;//s_near_box_list[_tidx];//_Q1[0];// #endif if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; } } //!!!!!!!!!!!!!NEAR CHANGE!!!!!!!!!!!!! #endif #ifndef BEN_NEW_METHOD __global__ void nufft_correct_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= nufft_const->total_num_boxes_dev) //nufft_const->total_num_boxes is the number of boxes in current device return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_CORRECT*FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = FP_TYPE(0.0f); FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); // Loop around all near boxes, presumbly 27 /*for (int _near_box_counter = 0; _near_box_counter < nufft_array->d_near_box_list[_local_box_idx * total_num_near_box_per_box_p1]; _near_box_counter++) {*/ for( int _near_box_counter = 0; _near_box_counter < total_num_near_box_per_box; _near_box_counter++){ // For each box, d_NearBoxListThread has 28 entries (maybe empty), while the first entry is // the total number of near boxes. //_near_box_idx = nufft_array->d_near_box_list[_near_box_counter + 1 + _local_box_idx * total_num_near_box_per_box_p1]; _near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 || nufft_array->d_src_box_map[_near_box_idx + nufft_const->total_num_boxes * 2] == 0) continue; // Current near box index number /*_near_box_idx_dim[2] = _near_box_idx / (num_boxes[0] * num_boxes[1]); _near_box_idx_dim[1] = (_near_box_idx - _near_box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _near_box_idx_dim[0] = _near_box_idx % num_boxes[0];*/ // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } // Field amplitudes on the source grid _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1];//d_src_box_list_inv has a length of total_num_boxes+1 for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// // Get the inaccurate near field. // Source: near-list box grid points // Observer: actual box grid points // Interaction: direct Green's function /////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // linear: 0% of computational time of this subroutine goes here direct_grid_interact_linear_static_vector(_r1, _Q1, s_src_amp + _shared_offset, cell_size, epsilon, BLOCK_SIZE_CORRECT); } // _near_box_counter __syncthreads(); // Field amplitudes on the observer grid // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb] - _Q1[0]; // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb]; for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; #ifdef BEN_DEBUG_MULTI //_d_test[_gid] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx]; #endif #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = _local_box_idx * interp_nodes_per_box + _obs_idx; #endif } #else __global__ void nufft_correct_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _local_box_idx2; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; unsigned int _obs_idx_dim[3]; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; int const near_correct_layer = nufft_const->near_correct_layer; int const total_num_near_box_per_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); // int const problem_size = nufft_const->problem_size; FP_TYPE const epsilon = nufft_const->epsilon; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const cell_size = nufft_const->cell_size; FP_TYPE * const src_domain_range = nufft_const->src_domain_range; _local_box_idx = _bid; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_amp[8]; __shared__ int s_near_box_list[BLOCK_SIZE_CORRECT]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = FP_TYPE(0.0f); FP_TYPE _r1[3]; FP_TYPE _box_origin[3]; int _near_box_idx = 0; int _near_box_idx_dim[3]; FP_TYPE _near_box_origin[3]; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + src_domain_range[m]; } idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); int near_box_left = total_num_near_box_per_box; int s_near_list_start = 0; #ifdef BEN_DEBUG int tmp_cnt = 0; #endif while( near_box_left > 0 ){ if( _tidx < near_box_left ){ s_near_box_list[_tidx] = near_to_glb_idx(s_near_list_start + _tidx, _bid, near_correct_layer, num_boxes);//_bid can be replaced by _box_idx_dim, because it should be only computed once } else{ // IS IT NECESSARY??? s_near_box_list[_tidx] = -1; } __syncthreads(); // Loop around all possible near boxes for( int _near_box_counter = 0; _near_box_counter < near_box_left && _near_box_counter < BLOCK_SIZE_CORRECT; _near_box_counter++){ _near_box_idx = s_near_box_list[_near_box_counter]; //_near_box_idx = near_cnt_to_dim( _near_box_idx_dim, _near_box_counter, _box_idx_dim, near_correct_layer, num_boxes); if( _near_box_idx < 0 ) continue; //tmp_cnt ++; // Current near box index number _near_box_idx_dim[2] = _near_box_idx / (num_boxes[0] * num_boxes[1]); _near_box_idx_dim[1] = (_near_box_idx - _near_box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _near_box_idx_dim[0] = _near_box_idx % num_boxes[0]; // Current near box origins for (int m = 0; m < 3; m++) { _near_box_origin[m] = _near_box_idx_dim[m] * box_size[m] + src_domain_range[m]; } // Field amplitudes on the source grid _local_box_idx2 = nufft_array->d_src_box_list_inv[_near_box_idx + 1]; if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_src_amp[_tidx+j*BLOCK_SIZE_CORRECT] = nufft_array->d_u_src_grid_dev[_local_box_idx2 * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; } __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// // Get the inaccurate near field. // Source: near-list box grid points // Observer: actual box grid points // Interaction: direct Green's function /////////////////////////////////////////////////////////////////////////////////////// for (int m = 0; m < 3; m++) { // _r1 is the bias between the left-top-front corner of the near-list box currently being handled and the actual box grid point. _r1[m] = _near_box_origin[m] - (_box_origin[m] + _obs_idx_dim[m] * cell_size[m]) ; } // linear: 0% of computational time of this subroutine goes here //if( _tidx < interp_nodes_per_box) direct_grid_interact_linear_static_vector(_r1, _Q1, s_src_amp, cell_size, epsilon, BLOCK_SIZE_CORRECT); __syncthreads(); } // _near_box_counter near_box_left -= BLOCK_SIZE_CORRECT; s_near_list_start += BLOCK_SIZE_CORRECT; } //while( near_box_left > 0 ) __syncthreads(); #ifdef BEN_DEBUG //if( s_near_list_start != 0 ) _d_test[_gid] = tmp_cnt;//s_near_box_list[_tidx];//_Q1[0];// #endif // Field amplitudes on the observer grid // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb] - _Q1[0]; // s_obs_amp[_tidx] = d_u_obs_grid[_obs_idx_glb]; if( _tidx < interp_nodes_per_box){ for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box] -= _Q1[j]; } #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = _local_box_idx * interp_nodes_per_box + _obs_idx; #endif } #endif __global__ void nufft_interp_static_vector_cubic(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 64; unsigned int const total_num_boxes = nufft_const->total_num_boxes; unsigned int const total_num_boxes_dev = nufft_const->total_num_boxes_dev; int const obs_size_dev = nufft_const->obs_size_dev; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const obs_domain_range = nufft_const->obs_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_obs_coord[BLOCK_SIZE_PROJ_INTERP * 3]; __shared__ FP_TYPE s_obs_amp[BLOCK_SIZE_PROJ_INTERP*FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++) _Q1[j] = 0.0f; FP_TYPE _r_norm[3]; int _glb_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _box_origin[3]; int _shared_start2; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + obs_domain_range[m]; } idx_to_idx_dim_cubic(_obs_idx_dim, _obs_idx); // Field amplitudes on the observer grid, with inaccurate near field subtracted for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_obs_amp[_tidx+j*BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); // Number of observers in the current box _num_src2 = nufft_array->d_obs_box_map[_box_idx + 2 * total_num_boxes]; _glb_src_idx_start2 = nufft_array->d_obs_box_map[_box_idx]; _shared_start2 = 0; while (_shared_start2 < _num_src2) // loop around all sources { /////////////////////////////////////////////////////////////////////////////////////// // Doing interpolations from grid to actual observers /////////////////////////////////////////////////////////////////////////////////////// // // Calculating far field, interpolation from the grid points to the observers for(unsigned int j = 0; j < FIELD_DIM; j++ ) _Q1[j] = 0.0f; if (_shared_start2 + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // Unnormalized coordinates of observers, used by near field calculation s_obs_coord[_tidx + m * _bdim] = nufft_array->d_obs_coord[_glb_src_idx_start2 + _shared_start2 + _obs_idx + obs_size_dev * m]; // Normalized coordinates of observers, used by far field interpolation _r_norm[m] = (s_obs_coord[_tidx + m * _bdim] - _box_origin[m]) / box_size[m]; } // m } // lagrange_interp_cubic_vector(_Q1, _r_norm, s_obs_amp + _shared_offset, BLOCK_SIZE_PROJ_INTERP); if (_shared_start2 + _obs_idx < _num_src2) { for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_field_amp[_glb_src_idx_start2 + _shared_start2 + _obs_idx + j*obs_size_dev] = _Q1[j]; } __syncthreads(); _shared_start2 += interp_nodes_per_box; } // shared_start2 #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; _d_test[_gid] =s_obs_amp[_tidx]; #endif } __global__ void nufft_interp_static_vector_linear(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _box_idx; unsigned int _box_idx_dim[3]; unsigned int _box_sub_idx = 0; unsigned int _obs_idx; // unsigned int _obs_idx_glb; unsigned int _obs_idx_dim[3]; // Used only when a block handles multiple boxes int _shared_offset; // Local copies of global constant variables unsigned int const interp_nodes_per_box = 8; unsigned int const total_num_boxes = nufft_const->total_num_boxes; unsigned int const total_num_boxes_dev = nufft_const->total_num_boxes_dev; // int const problem_size = nufft_const->problem_size; // int const src_size_dev = nufft_const->src_size_dev; int const obs_size_dev = nufft_const->obs_size_dev; int * const num_boxes = nufft_const->num_boxes; FP_TYPE * const box_size = nufft_const->box_size; FP_TYPE * const obs_domain_range = nufft_const->obs_domain_range; if (param->num_blk_per_box > 0) { _local_box_idx = _bid / param->num_blk_per_box; _box_sub_idx = _bid % param->num_blk_per_box; _obs_idx = _tidx + _box_sub_idx * _bdim; if (_obs_idx >= interp_nodes_per_box) return; _shared_offset = 0; } if (param->num_box_per_blk > 0) { _local_box_idx = _bid * param->num_box_per_blk + _tidx / interp_nodes_per_box; _box_sub_idx = 0; _obs_idx = _tidx % interp_nodes_per_box; _shared_offset = _tidx / interp_nodes_per_box * interp_nodes_per_box; if (_tidx >= interp_nodes_per_box * param->num_box_per_blk) return; } if (_local_box_idx >= total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_obs_coord[BLOCK_SIZE_PROJ_INTERP * 3]; __shared__ FP_TYPE s_obs_amp[BLOCK_SIZE_PROJ_INTERP * FIELD_DIM]; FP_TYPE _Q1[FIELD_DIM]; // An array stores far field on observer for(unsigned int j = 0; j < FIELD_DIM; j++ ) _Q1[j] = 0.0f; FP_TYPE _r_norm[3]; int _glb_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _box_origin[3]; int _shared_start2; // Get the index number of the observer box _box_idx_dim[2] = _box_idx / (num_boxes[0] * num_boxes[1]); _box_idx_dim[1] = (_box_idx - _box_idx_dim[2] * (num_boxes[0] * num_boxes[1])) / num_boxes[0]; _box_idx_dim[0] = _box_idx % num_boxes[0]; for (int m = 0; m < 3; m++) { _box_origin[m] = _box_idx_dim[m] * box_size[m] + obs_domain_range[m]; } idx_to_idx_dim_linear(_obs_idx_dim, _obs_idx); // Field amplitudes on the observer grid for(unsigned int j = 0; j < FIELD_DIM; j++ ) s_obs_amp[_tidx+j*BLOCK_SIZE_PROJ_INTERP] = nufft_array->d_u_obs_grid_dev[_local_box_idx * interp_nodes_per_box + _obs_idx + j*nufft_const->total_num_boxes_dev*interp_nodes_per_box]; __syncthreads(); /////////////////////////////////////////////////////////////////////////////////////// // The following is a combined loop to get far field // Sources: far-field grid // Observers: actual observers // Operations: Lagrange interpolation /////////////////////////////////////////////////////////////////////////////////////// // Number of observers in the current box _num_src2 = nufft_array->d_obs_box_map[_box_idx + 2 * total_num_boxes]; _glb_src_idx_start2 = nufft_array->d_obs_box_map[_box_idx]; _shared_start2 = 0; while (_shared_start2 < _num_src2) { /////////////////////////////////////////////////////////////////////////////////////// // Doing interpolations from grid to actual observers /////////////////////////////////////////////////////////////////////////////////////// // Calculating far field, interpolation from the grid points to the observers for(unsigned int j = 0; j < FIELD_DIM; j++ ) _Q1[j] = 0.0f; if (_shared_start2 + _obs_idx < _num_src2) { for (int m = 0; m < 3; m++) { // Unnormalized coordinates of observers, used by near field calculation s_obs_coord[_tidx + m * _bdim] = nufft_array->d_obs_coord[_glb_src_idx_start2 + _shared_start2 + _obs_idx + obs_size_dev * m]; // Normalized coordinates of observers, used by far field interpolation _r_norm[m] = (s_obs_coord[_tidx + m * _bdim] - _box_origin[m]) / box_size[m]; } } lagrange_interp_linear_vector(_Q1, _r_norm, s_obs_amp + _shared_offset, BLOCK_SIZE_PROJ_INTERP); if (_shared_start2 + _obs_idx < _num_src2) { for(unsigned int j = 0; j < FIELD_DIM; j++ ) nufft_array->d_field_amp[_glb_src_idx_start2 + _shared_start2 + _obs_idx + j*obs_size_dev] = _Q1[j]; } __syncthreads(); _shared_start2 += interp_nodes_per_box; } // shared_start2 #ifdef BEN_DEBUG_MULTI //_d_test[_gid] = _Q1[0]; #endif #ifdef _GPU_D_TEST _d_test[_gid] = 9.99f; _d_test[_gid] = _Q1[0]; #endif } //BEN ELIMINATED THE D_NEAR_BOX_LIST IN THIS SUBROUTINE //THE MULTIPROCESSOR OCCUPANCY LIMITATION IS REGISTER=40 -> 75% OCCUPANCY. REGISTER = 32 WILL LEAD TO 100% OCCUPANCY __global__ void nufft_exact_near_field(FP_TYPE *_d_test, NufftArrayGpuStaticVector *nufft_array, NufftParamGpu *nufft_const, GpuExecParam *param) { unsigned int _tidx = threadIdx.x; unsigned int _bdim = blockDim.x; unsigned int _bid = blockIdx.x + blockIdx.y * gridDim.x; unsigned int _gid = threadIdx.x + (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x; unsigned int _local_box_idx; unsigned int _box_idx; int const _block_size = BLOCK_SIZE_NEAR; int const src_size_dev = nufft_const->src_size_dev; int const obs_size_dev = src_size_dev; // int const obs_size_dev = nufft_const->obs_size_dev; FP_TYPE const epsilon = nufft_const->epsilon; _local_box_idx = _bid; if (_local_box_idx >= nufft_const->total_num_boxes_dev) return; _box_idx = nufft_array->d_src_box_list[_local_box_idx + 1]; __shared__ FP_TYPE s_src_coord[BLOCK_SIZE_NEAR * 3]; __shared__ FP_TYPE s_obs_coord[BLOCK_SIZE_NEAR * 3]; __shared__ FP_TYPE s_src_amp[BLOCK_SIZE_NEAR * FIELD_DIM]; __shared__ int s_near_box_list[BLOCK_SIZE_NEAR]; //Stores near field on observer FP_TYPE _Q2[FIELD_DIM]; for(unsigned int j = 0; j < FIELD_DIM; j++) _Q2[j] = 0.0f; FP_TYPE _r1[3]; int _glb_src_idx_start2 = 0; int _num_src2 = 0; FP_TYPE _magn[FIELD_DIM]; int _shared_start2 = 0; //BEN ADDED THESE TWO PARAMETERS FOR NEAR BOX LIST unsigned int const near_correct_layer = nufft_const->near_correct_layer; int _total_num_near_box = (2*near_correct_layer+1)*(2*near_correct_layer+1)*(2*near_correct_layer+1); int _near_box_idx = 0; //int _total_near_box = nufft_array->d_near_box_list[_local_box_idx * total_num_near_box_per_box_p1]; int _near_box_counter = 0; int _g_addr_start = 0; int _s_addr_start = 0; int _s_addr_end = 0; int _near_src = 0; bool _fetch; ///////////////////////////////////////////////////////////////////////////////////// // The following is a loop to get accurate near field on observers // Sources: sources in near-field boxes // Observers: actual observers // Pay attention that the sources in either source box or observer box or both // can be greater than number of thread per box /////////////////////////////////////////////////////////////////////////////////////// // Number of observers in the current box _num_src2 = nufft_array->d_obs_box_map[_box_idx + 2 * nufft_const->total_num_boxes]; _glb_src_idx_start2 = nufft_array->d_obs_box_map[_box_idx]; while (_shared_start2 < _num_src2)//if shared start is smaller than the num of srcs inside current box //empty src box will stop working here { #ifdef _GPU_D_TEST _d_test[_gid] = 0.00f; #endif for(unsigned int j = 0; j < FIELD_DIM; j++) _Q2[j] = 0.0f; if (_shared_start2 + _tidx < _num_src2) { for (int m = 0; m < 3; m++) { // Unnormalized coordinates of observers, used by near field calculation s_obs_coord[_tidx + m * _bdim] = nufft_array->d_obs_coord[_glb_src_idx_start2 + _shared_start2 + _tidx + obs_size_dev * m]; } // m } // int s_near_list_start = 0; /////////////////////////////////////////////////////////////////////////////////// // Doing direct summation to get accurate near field ///////////////////////////////////////////////////////////////////////////////////// while( s_near_list_start < _total_num_near_box ){ if( s_near_list_start + _tidx < _total_num_near_box ){ _near_box_idx = near_to_glb_idx(s_near_list_start + _tidx, _box_idx, near_correct_layer, nufft_const->num_boxes);//if out of computational domain, return -1 if( _near_box_idx < 0 ) s_near_box_list[_tidx] = -1; else{ if( nufft_array->d_obs_box_map[_near_box_idx + 2 * nufft_const->total_num_boxes] > 0)//this function has not been tested s_near_box_list[_tidx] = _near_box_idx; else s_near_box_list[_tidx] = -1; } } else{ // because this is checked inside the inner while loop s_near_box_list[_tidx] = -1; } __syncthreads(); _near_box_counter = -1; _g_addr_start = 0; _s_addr_start = 0; _s_addr_end = 0; _fetch = false; // The grand loop while (_s_addr_end >= 0)//fetch all near srcs { __syncthreads(); if (_tidx < _s_addr_end) { for (int m = 0; m < 3; m++) { s_src_coord[_tidx + m * _block_size] = nufft_array->d_obs_coord[_g_addr_start + _tidx + src_size_dev * m]; } // m for(unsigned int j = 0; j < FIELD_DIM; j++) s_src_amp[_tidx+j*BLOCK_SIZE_NEAR] = nufft_array->d_src_amp[_g_addr_start + _tidx + j*src_size_dev]; } #ifdef _GPU_D_TEST for (int i = _s_addr_start; i < _block_size && i < _s_addr_end; i++) { _d_test[_gid] += s_src_amp[i]; } #endif // ready to move on to next near box? //NEED MORE CONSIDERATION IF _near_box_counter needs the last two conditions!!!//////////////////////s_near_list_start + _tidx < _total_num_near_box if (_s_addr_end - _block_size < 0 && _near_box_counter + 1 < _block_size && _near_box_counter+1 <= _total_num_near_box-s_near_list_start)//_near_box_counter + 1 < _total_num_near_box && { _fetch = true; } while (_fetch)// until shared mem is full or run out of near box { // fetch new boxes until the shared memory is full // update status variables _near_box_counter++;//move on to next near box //_near_box_idx = nufft_array->d_near_box_list[_near_box_counter + 1 + _local_box_idx * total_num_near_box_per_box_p1]; _near_box_idx = s_near_box_list[_near_box_counter]; if( _near_box_counter + 1 >= _block_size || _near_box_counter + 1 == _total_num_near_box-s_near_list_start)//_near_box_counter+1 >= _total_num_near_box || SHOULD CHECK IT IS LARGER THAN BLOCK SIZE?? _fetch = false; if(_near_box_idx < 0) continue; //_near_src: total num of srcs in the near box _near_src = nufft_array->d_obs_box_map[_near_box_idx + nufft_const->total_num_boxes * 2]; //_g_addr_start: start index of srcs in the near box _g_addr_start = nufft_array->d_obs_box_map[_near_box_idx]; //record the start and end index in the context of shared memory _s_addr_start = _s_addr_end; _s_addr_end = _s_addr_end + _near_src; // Shared memory is full if (_s_addr_end >= _block_size)// || _near_box_counter + 1 >= _total_num_near_box { _fetch = false; } if (_tidx >= _s_addr_start && _tidx < _s_addr_end) { for (int m = 0; m < 3; m++) { s_src_coord[_tidx + m * _block_size] = nufft_array->d_obs_coord[_g_addr_start + _tidx - _s_addr_start + src_size_dev * m]; } // m for(unsigned int j = 0; j < FIELD_DIM; j++) s_src_amp[_tidx+j*BLOCK_SIZE_NEAR] = nufft_array->d_src_amp[_g_addr_start - _s_addr_start + _tidx + j*src_size_dev]; } } // fetch __syncthreads(); // do calculations for (int i = 0; i < _block_size && i < _s_addr_end; i++) { for (int m = 0; m < 3; m++) { _r1[m] = s_obs_coord[_tidx + m * _block_size] - s_src_coord[i + m * _block_size]; } // m for(unsigned int j = 0; j < FIELD_DIM; j++) _magn[j] = s_src_amp[i+j*BLOCK_SIZE_NEAR]; get_field_static_vector(_r1, _magn, _Q2, epsilon); } // i _s_addr_end -= _block_size; _g_addr_start += _near_src - _s_addr_end > _block_size ? _block_size : _near_src - _s_addr_end; } // _loop to fetch more srcs into shared memory //#ifdef BEN_DEBUG // _d_test[_gid] = _total_num_near_box-s_near_list_start; //#endif s_near_list_start += BLOCK_SIZE_NEAR; }//while loop to fetch more near boxes if (_shared_start2 + _tidx < _num_src2) { for(unsigned int j = 0; j < FIELD_DIM; j++) nufft_array->d_field_amp[_glb_src_idx_start2 + _shared_start2 + _tidx + j*obs_size_dev] += _Q2[j]; } // _loop to fetch more obs into shared memory __syncthreads(); _shared_start2 += _block_size; } // shared_start2 } }
e586d4853bdbf8155b8309a38e1c3e45ff2986fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "custom_cuda_layers.h" #define rows_trans 16 #define cols_trans 16 template <typename T> __global__ void Transpose_Kernel(const T* inp, T* out, int row_width, int col_width) { __shared__ T data_block[rows_trans * (cols_trans + 1)]; int r = threadIdx.x / cols_trans; int c = threadIdx.x % cols_trans; int m = row_width / cols_trans; int i = blockIdx.x / m * rows_trans + r; int j = blockIdx.x % m * cols_trans + c; int row_stride = rows_trans / ((rows_trans * cols_trans + THREADS - 1) / THREADS); for (int k = 0; k < rows_trans; k += row_stride) data_block[(k + r) * cols_trans + c] = inp[(i + k) * row_width + j]; __syncthreads(); i = blockIdx.x % m * rows_trans + r; j = blockIdx.x / m * cols_trans + c; for (int k = 0; k < rows_trans; k += row_stride) out[(i + k) * col_width + j] = data_block[c * cols_trans + r + k]; } template <> void Transpose<__half>(const __half* inp_mat, __half* out_mat, int rows, int cols, hipStream_t stream) { int threads = THREADS; hipLaunchKernelGGL(( Transpose_Kernel<__half>), dim3((rows * cols + threads - 1) / threads), dim3(threads), 0, stream, inp_mat, out_mat, cols, rows); } template <> void Transpose<float>(const float* inp_mat, float* out_mat, int rows, int cols, hipStream_t stream) { int threads = THREADS; hipLaunchKernelGGL(( Transpose_Kernel<float>), dim3((rows * cols + threads - 1) / threads), dim3(threads), 0, stream, inp_mat, out_mat, cols, rows); } template <typename T> __global__ void transform_0213(T* output, const T* vals, int hidden_dim, int seq_length, int heads); template <> __global__ void transform_0213<float>(float* output, const float* vals, int hidden_dim, int seq_length, int heads) { int d0_stride = hidden_dim * seq_length / 4; int d1_stride = hidden_dim / 4; int d2_stride = hidden_dim / heads / 4; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) const float4* vals_vec = reinterpret_cast<const float4*>(vals); float4* output_vec = reinterpret_cast<float4*>(output); float4 inputs = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = inputs; } template <> __global__ void transform_0213<__half>(__half* output, const __half* vals, int hidden_dim, int seq_length, int heads) { #if __CUDA_ARCH__ >= 700 int d0_stride = hidden_dim * seq_length / 8; int d1_stride = hidden_dim / 8; int d2_stride = hidden_dim / heads / 8; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) float4 vals_arr[1]; const float4* vals_vec = reinterpret_cast<const float4*>(vals); float4* output_vec = reinterpret_cast<float4*>(output); vals_arr[0] = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = vals_arr[0]; #endif } template <> void launch_transform_0213<float>(float* output, const float* vals, int batch_size, int seq_length, int hidden_dim, int heads, hipStream_t stream) { dim3 block_dim(hidden_dim / heads / 4, heads); dim3 grid_dim(batch_size, seq_length); hipLaunchKernelGGL(( transform_0213<float>) , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, hidden_dim, seq_length, heads); } template <> void launch_transform_0213<__half>(__half* output, const __half* vals, int batch_size, int seq_length, int hidden_dim, int heads, hipStream_t stream) { dim3 block_dim(hidden_dim / heads / 8, heads); dim3 grid_dim(batch_size, seq_length); hipLaunchKernelGGL(( transform_0213<__half>) , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, hidden_dim, seq_length, heads); } // Bias add template <typename T> __global__ void bias_add_transform_0213(T* output, const T* vals, const T* bias, int hidden_dim, int seq_length, int heads); template <> __global__ void bias_add_transform_0213<float>(float* output, const float* vals, const float* bias, int hidden_dim, int seq_length, int heads) { int d0_stride = hidden_dim * seq_length / 4; int d1_stride = hidden_dim / 4; int d2_stride = hidden_dim / heads / 4; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int cnt = blockIdx.z; // Hidden count int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) const float4* vals_vec = reinterpret_cast<const float4*>(vals); const float4* bias_vec = reinterpret_cast<const float4*>(bias); float4* output_vec = reinterpret_cast<float4*>(output); float4 inputs = vals_vec[d0 * d0_stride * gridDim.z + cnt * d1_stride + d1 * d1_stride * gridDim.z + d2 * d2_stride + d3]; float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3]; float4 outputs; outputs.x = inputs.x + biases.x; outputs.y = inputs.y + biases.y; outputs.z = inputs.z + biases.z; outputs.w = inputs.w + biases.w; output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = outputs; } #define ATTN_H 3 #define MAX_SEQ_LINE 10 template <> __global__ void bias_add_transform_0213<__half>(__half* output, const __half* vals, const __half* bias, int hidden_dim, int seq_length, int heads) { #if __CUDA_ARCH__ >= 700 __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length / 8; int d1_stride = hidden_dim / 8; int d2_stride = hidden_dim / heads / 8; int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8 int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8 int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int cnt = threadIdx.z; // blockIdx.z; // Hidden count int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) float4 vals_arr[1]; float4 bias_arr[1]; float4 output_arr[1]; __half2* vals_half = reinterpret_cast<__half2*>(vals_arr); __half2* bias_half = reinterpret_cast<__half2*>(bias_arr); __half2* output_half = reinterpret_cast<__half2*>(output_arr); const float4* vals_vec = reinterpret_cast<const float4*>(vals); const float4* bias_vec = reinterpret_cast<const float4*>(bias); float4* output_vec = reinterpret_cast<float4*>(output); int iter_index = cnt * d1_stride + d2 * d2_stride + d3; int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1); bias_arr[0] = bias_vec[iter_index]; for (int iter = 0; iter < 2; iter++) { int iter_id = iter * iteration_stride + iter_index; vals_arr[0] = vals_vec[input_offset + iter_id]; output_half[0] = vals_half[0] + bias_half[0]; output_half[1] = vals_half[1] + bias_half[1]; output_half[2] = vals_half[2] + bias_half[2]; output_half[3] = vals_half[3] + bias_half[3]; in_data[iter_id] = output_arr[0]; } __syncthreads(); iteration_stride = blockDim.z * (blockDim.y >> 1); int matrix_stride = (d0_out_stride * gridDim.x); int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1); int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride; for (int iter = 0; iter < 2; iter++) { int iter_row = (iter * iteration_stride) + head_count; int iter_offset = (iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride; output_vec[out_index + iter_offset] = in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; } #endif } // [B S C*H] - > C * [B A S N] template <> void launch_bias_add_transform_0213<float>(float* output, const float* vals, const float* bias, int batch_size, int seq_length, int hidden_dim, int heads, hipStream_t stream, int trans_count) { dim3 block_dim(hidden_dim / heads / 4, heads); dim3 grid_dim(batch_size, seq_length, trans_count); hipLaunchKernelGGL(( bias_add_transform_0213<float>) , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, bias, hidden_dim, seq_length, heads); } template <> void launch_bias_add_transform_0213<__half>(__half* output, const __half* vals, const __half* bias, int batch_size, int seq_length, int hidden_dim, int heads, hipStream_t stream, int trans_count) { dim3 block_dim(hidden_dim / heads / 8, heads, trans_count); dim3 grid_dim(batch_size, seq_length / 2); hipLaunchKernelGGL(( bias_add_transform_0213<__half>) , dim3(grid_dim), dim3(block_dim), 0, stream, output, vals, bias, hidden_dim, seq_length, heads); } template <typename T> __global__ void transform4d_0213(T* out, const T* in, int heads, int seq_length, int hidden_dim); template <> __global__ void transform4d_0213<float>(float* out, const float* in, int heads, int seq_length, int hidden_dim) { int d0_stride = hidden_dim * seq_length / 4; int d1_stride = d0_stride / heads; int d2_stride = hidden_dim / heads / 4; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = hidden_dim / 4; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y / ((seq_length + blockDim.y - 1) / blockDim.y); // Head int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length; int cnt = blockIdx.z; int d3 = threadIdx.x; // Values (groups of 8) if (d2 < seq_length) { const float4* in_vec = reinterpret_cast<const float4*>(in); float4* out_vec = reinterpret_cast<float4*>(out); float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride + d2 * d2_out_stride * gridDim.z + d3] = vals_vec; } } template <> __global__ void transform4d_0213<__half>(__half* out, const __half* in, int heads, int seq_length, int hidden_dim) { #if __CUDA_ARCH__ >= 700 __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length / 8; int d1_stride = hidden_dim / 8; int d2_stride = hidden_dim / heads / 8; int d0 = blockIdx.x; // Batch int d1 = threadIdx.y; // Head int d2 = blockIdx.y; // Sequence int cnt = threadIdx.z; // Hidden count int d3 = threadIdx.x; // Values (groups of 8) const float4* in_vec = reinterpret_cast<const float4*>(in); float4* out_vec = reinterpret_cast<float4*>(out); int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + d1 % 2 * d2_stride; int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1); int iteration_stride = blockDim.z * (blockDim.y >> 1); int matrix_stride = (d0_stride * gridDim.x); for (int iter = 0; iter < 2; iter++) { int iter_row = iter * iteration_stride + head_count; int iter_offset = (iter_row % blockDim.y) * d2_stride; in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] = in_vec[input_offset + iter_offset * seq_length + (iter_row / blockDim.y) * matrix_stride]; } __syncthreads(); iteration_stride = d1_stride * blockDim.z; int iter_index = cnt * d1_stride + d1 * d2_stride + d3; int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1); for (int iter = 0; iter < 2; iter++) { int iter_id = iter * iteration_stride + iter_index; out_vec[output_offset + iter_id] = in_data[iter_id]; } #endif } // 3 * [B A S N] - > [B S C*H] template <> void launch_transform4d_0213<float>(float* out, const float* in, int batch_size, int heads, int seq_length, int hidden_dim, hipStream_t stream, int trans_count) { dim3 grid_dims(batch_size, heads * ((seq_length + 7) / 8), trans_count); dim3 block_dims(hidden_dim / heads / 4, 8); hipLaunchKernelGGL(( transform4d_0213<float>) , dim3(grid_dims), dim3(block_dims), 0, stream, out, in, heads, seq_length, hidden_dim); } template <> void launch_transform4d_0213<__half>(__half* out, const __half* in, int batch_size, int heads, int seq_length, int hidden_dim, hipStream_t stream, int trans_count) { dim3 grid_dims(batch_size, seq_length / 2); dim3 block_dims(hidden_dim / heads / 8, heads, trans_count); hipLaunchKernelGGL(( transform4d_0213<__half>) , dim3(grid_dims), dim3(block_dims), 0, stream, out, in, heads, seq_length, hidden_dim); }
e586d4853bdbf8155b8309a38e1c3e45ff2986fe.cu
#include "custom_cuda_layers.h" #define rows_trans 16 #define cols_trans 16 template <typename T> __global__ void Transpose_Kernel(const T* inp, T* out, int row_width, int col_width) { __shared__ T data_block[rows_trans * (cols_trans + 1)]; int r = threadIdx.x / cols_trans; int c = threadIdx.x % cols_trans; int m = row_width / cols_trans; int i = blockIdx.x / m * rows_trans + r; int j = blockIdx.x % m * cols_trans + c; int row_stride = rows_trans / ((rows_trans * cols_trans + THREADS - 1) / THREADS); for (int k = 0; k < rows_trans; k += row_stride) data_block[(k + r) * cols_trans + c] = inp[(i + k) * row_width + j]; __syncthreads(); i = blockIdx.x % m * rows_trans + r; j = blockIdx.x / m * cols_trans + c; for (int k = 0; k < rows_trans; k += row_stride) out[(i + k) * col_width + j] = data_block[c * cols_trans + r + k]; } template <> void Transpose<__half>(const __half* inp_mat, __half* out_mat, int rows, int cols, cudaStream_t stream) { int threads = THREADS; Transpose_Kernel<__half><<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( inp_mat, out_mat, cols, rows); } template <> void Transpose<float>(const float* inp_mat, float* out_mat, int rows, int cols, cudaStream_t stream) { int threads = THREADS; Transpose_Kernel<float><<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( inp_mat, out_mat, cols, rows); } template <typename T> __global__ void transform_0213(T* output, const T* vals, int hidden_dim, int seq_length, int heads); template <> __global__ void transform_0213<float>(float* output, const float* vals, int hidden_dim, int seq_length, int heads) { int d0_stride = hidden_dim * seq_length / 4; int d1_stride = hidden_dim / 4; int d2_stride = hidden_dim / heads / 4; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) const float4* vals_vec = reinterpret_cast<const float4*>(vals); float4* output_vec = reinterpret_cast<float4*>(output); float4 inputs = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = inputs; } template <> __global__ void transform_0213<__half>(__half* output, const __half* vals, int hidden_dim, int seq_length, int heads) { #if __CUDA_ARCH__ >= 700 int d0_stride = hidden_dim * seq_length / 8; int d1_stride = hidden_dim / 8; int d2_stride = hidden_dim / heads / 8; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) float4 vals_arr[1]; const float4* vals_vec = reinterpret_cast<const float4*>(vals); float4* output_vec = reinterpret_cast<float4*>(output); vals_arr[0] = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = vals_arr[0]; #endif } template <> void launch_transform_0213<float>(float* output, const float* vals, int batch_size, int seq_length, int hidden_dim, int heads, cudaStream_t stream) { dim3 block_dim(hidden_dim / heads / 4, heads); dim3 grid_dim(batch_size, seq_length); transform_0213<float> <<<grid_dim, block_dim, 0, stream>>>(output, vals, hidden_dim, seq_length, heads); } template <> void launch_transform_0213<__half>(__half* output, const __half* vals, int batch_size, int seq_length, int hidden_dim, int heads, cudaStream_t stream) { dim3 block_dim(hidden_dim / heads / 8, heads); dim3 grid_dim(batch_size, seq_length); transform_0213<__half> <<<grid_dim, block_dim, 0, stream>>>(output, vals, hidden_dim, seq_length, heads); } // Bias add template <typename T> __global__ void bias_add_transform_0213(T* output, const T* vals, const T* bias, int hidden_dim, int seq_length, int heads); template <> __global__ void bias_add_transform_0213<float>(float* output, const float* vals, const float* bias, int hidden_dim, int seq_length, int heads) { int d0_stride = hidden_dim * seq_length / 4; int d1_stride = hidden_dim / 4; int d2_stride = hidden_dim / heads / 4; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int cnt = blockIdx.z; // Hidden count int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) const float4* vals_vec = reinterpret_cast<const float4*>(vals); const float4* bias_vec = reinterpret_cast<const float4*>(bias); float4* output_vec = reinterpret_cast<float4*>(output); float4 inputs = vals_vec[d0 * d0_stride * gridDim.z + cnt * d1_stride + d1 * d1_stride * gridDim.z + d2 * d2_stride + d3]; float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3]; float4 outputs; outputs.x = inputs.x + biases.x; outputs.y = inputs.y + biases.y; outputs.z = inputs.z + biases.z; outputs.w = inputs.w + biases.w; output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = outputs; } #define ATTN_H 3 #define MAX_SEQ_LINE 10 template <> __global__ void bias_add_transform_0213<__half>(__half* output, const __half* vals, const __half* bias, int hidden_dim, int seq_length, int heads) { #if __CUDA_ARCH__ >= 700 __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length / 8; int d1_stride = hidden_dim / 8; int d2_stride = hidden_dim / heads / 8; int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8 int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8 int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = d2_stride * seq_length; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y; // Sequence ID (0-127) int cnt = threadIdx.z; // blockIdx.z; // Hidden count int d2 = threadIdx.y; // Head (0-11) int d3 = threadIdx.x; // Values (groups of 4) float4 vals_arr[1]; float4 bias_arr[1]; float4 output_arr[1]; __half2* vals_half = reinterpret_cast<__half2*>(vals_arr); __half2* bias_half = reinterpret_cast<__half2*>(bias_arr); __half2* output_half = reinterpret_cast<__half2*>(output_arr); const float4* vals_vec = reinterpret_cast<const float4*>(vals); const float4* bias_vec = reinterpret_cast<const float4*>(bias); float4* output_vec = reinterpret_cast<float4*>(output); int iter_index = cnt * d1_stride + d2 * d2_stride + d3; int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1); bias_arr[0] = bias_vec[iter_index]; for (int iter = 0; iter < 2; iter++) { int iter_id = iter * iteration_stride + iter_index; vals_arr[0] = vals_vec[input_offset + iter_id]; output_half[0] = vals_half[0] + bias_half[0]; output_half[1] = vals_half[1] + bias_half[1]; output_half[2] = vals_half[2] + bias_half[2]; output_half[3] = vals_half[3] + bias_half[3]; in_data[iter_id] = output_arr[0]; } __syncthreads(); iteration_stride = blockDim.z * (blockDim.y >> 1); int matrix_stride = (d0_out_stride * gridDim.x); int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1); int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride; for (int iter = 0; iter < 2; iter++) { int iter_row = (iter * iteration_stride) + head_count; int iter_offset = (iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride; output_vec[out_index + iter_offset] = in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; } #endif } // [B S C*H] - > C * [B A S N] template <> void launch_bias_add_transform_0213<float>(float* output, const float* vals, const float* bias, int batch_size, int seq_length, int hidden_dim, int heads, cudaStream_t stream, int trans_count) { dim3 block_dim(hidden_dim / heads / 4, heads); dim3 grid_dim(batch_size, seq_length, trans_count); bias_add_transform_0213<float> <<<grid_dim, block_dim, 0, stream>>>(output, vals, bias, hidden_dim, seq_length, heads); } template <> void launch_bias_add_transform_0213<__half>(__half* output, const __half* vals, const __half* bias, int batch_size, int seq_length, int hidden_dim, int heads, cudaStream_t stream, int trans_count) { dim3 block_dim(hidden_dim / heads / 8, heads, trans_count); dim3 grid_dim(batch_size, seq_length / 2); bias_add_transform_0213<__half> <<<grid_dim, block_dim, 0, stream>>>(output, vals, bias, hidden_dim, seq_length, heads); } template <typename T> __global__ void transform4d_0213(T* out, const T* in, int heads, int seq_length, int hidden_dim); template <> __global__ void transform4d_0213<float>(float* out, const float* in, int heads, int seq_length, int hidden_dim) { int d0_stride = hidden_dim * seq_length / 4; int d1_stride = d0_stride / heads; int d2_stride = hidden_dim / heads / 4; int d0_out_stride = d0_stride; int d1_out_stride = d2_stride; int d2_out_stride = hidden_dim / 4; int d0 = blockIdx.x; // Batch int d1 = blockIdx.y / ((seq_length + blockDim.y - 1) / blockDim.y); // Head int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length; int cnt = blockIdx.z; int d3 = threadIdx.x; // Values (groups of 8) if (d2 < seq_length) { const float4* in_vec = reinterpret_cast<const float4*>(in); float4* out_vec = reinterpret_cast<float4*>(out); float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride + d2 * d2_out_stride * gridDim.z + d3] = vals_vec; } } template <> __global__ void transform4d_0213<__half>(__half* out, const __half* in, int heads, int seq_length, int hidden_dim) { #if __CUDA_ARCH__ >= 700 __shared__ float4 in_data[3072]; int d0_stride = hidden_dim * seq_length / 8; int d1_stride = hidden_dim / 8; int d2_stride = hidden_dim / heads / 8; int d0 = blockIdx.x; // Batch int d1 = threadIdx.y; // Head int d2 = blockIdx.y; // Sequence int cnt = threadIdx.z; // Hidden count int d3 = threadIdx.x; // Values (groups of 8) const float4* in_vec = reinterpret_cast<const float4*>(in); float4* out_vec = reinterpret_cast<float4*>(out); int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + d1 % 2 * d2_stride; int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1); int iteration_stride = blockDim.z * (blockDim.y >> 1); int matrix_stride = (d0_stride * gridDim.x); for (int iter = 0; iter < 2; iter++) { int iter_row = iter * iteration_stride + head_count; int iter_offset = (iter_row % blockDim.y) * d2_stride; in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] = in_vec[input_offset + iter_offset * seq_length + (iter_row / blockDim.y) * matrix_stride]; } __syncthreads(); iteration_stride = d1_stride * blockDim.z; int iter_index = cnt * d1_stride + d1 * d2_stride + d3; int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1); for (int iter = 0; iter < 2; iter++) { int iter_id = iter * iteration_stride + iter_index; out_vec[output_offset + iter_id] = in_data[iter_id]; } #endif } // 3 * [B A S N] - > [B S C*H] template <> void launch_transform4d_0213<float>(float* out, const float* in, int batch_size, int heads, int seq_length, int hidden_dim, cudaStream_t stream, int trans_count) { dim3 grid_dims(batch_size, heads * ((seq_length + 7) / 8), trans_count); dim3 block_dims(hidden_dim / heads / 4, 8); transform4d_0213<float> <<<grid_dims, block_dims, 0, stream>>>(out, in, heads, seq_length, hidden_dim); } template <> void launch_transform4d_0213<__half>(__half* out, const __half* in, int batch_size, int heads, int seq_length, int hidden_dim, cudaStream_t stream, int trans_count) { dim3 grid_dims(batch_size, seq_length / 2); dim3 block_dims(hidden_dim / heads / 8, heads, trans_count); transform4d_0213<__half> <<<grid_dims, block_dims, 0, stream>>>(out, in, heads, seq_length, hidden_dim); }
7d58f87d05836a773c0b8e627247cf20699c34c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include <hiprand/hiprand_kernel.h> #include <device_launch_parameters.h> #include "utils.h" #include <iostream> #include <sstream> #include <fstream> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset /*for (size_t r = threadIdx.x; r < numRows; ++r) { for (size_t c = blockIdx.x; c < numCols; ++c) { uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } }*/ //printf(" %i: Hello World!\n", blockDim.x); //int index = (threadIdx.y * blockDim.x + threadIdx.x)+blockIdx.x; //uchar4 rgba = rgbaImage[r * numCols + c]; //float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; //printf(" %i: Hello World!\n", threadIdx.x*threadIdx.y); uint i = (blockIdx.x * blockDim.x) + threadIdx.x; uint j = (blockIdx.y * blockDim.y) + threadIdx.y; uchar4 rgba = rgbaImage[j * numCols + i]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; if(i >= numCols || j >= numRows) return; greyImage[j * numCols + i] = channelSum; /* if (threadIdx.x+(threadIdx.y*blockDim.x) > 254 ) { printf("%i\n",threadIdx.x+(threadIdx.y*blockDim.x)); printf("moro\n"); }*/ } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(32, 32, 1); // Total of 32*32=1024 threads // compute the x and y dimension of grid unsigned int gridX = (numCols % blockSize.x) == 0 ? (numCols/blockSize.x) : 1 + (numCols/blockSize.x); unsigned int gridY = (numRows% blockSize.y) == 0 ? (numRows/blockSize.y) : 1 + (numRows/blockSize.y); std::cout << "1 " << gridX << std::endl; std::cout << "2 " << gridY << std::endl; const dim3 gridSize( gridX, gridY, 1); hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
7d58f87d05836a773c0b8e627247cf20699c34c3.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include <curand_mtgp32_kernel.h> #include <device_launch_parameters.h> #include "utils.h" #include <iostream> #include <sstream> #include <fstream> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset /*for (size_t r = threadIdx.x; r < numRows; ++r) { for (size_t c = blockIdx.x; c < numCols; ++c) { uchar4 rgba = rgbaImage[r * numCols + c]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[r * numCols + c] = channelSum; } }*/ //printf(" %i: Hello World!\n", blockDim.x); //int index = (threadIdx.y * blockDim.x + threadIdx.x)+blockIdx.x; //uchar4 rgba = rgbaImage[r * numCols + c]; //float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; //printf(" %i: Hello World!\n", threadIdx.x*threadIdx.y); uint i = (blockIdx.x * blockDim.x) + threadIdx.x; uint j = (blockIdx.y * blockDim.y) + threadIdx.y; uchar4 rgba = rgbaImage[j * numCols + i]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; if(i >= numCols || j >= numRows) return; greyImage[j * numCols + i] = channelSum; /* if (threadIdx.x+(threadIdx.y*blockDim.x) > 254 ) { printf("%i\n",threadIdx.x+(threadIdx.y*blockDim.x)); printf("moro\n"); }*/ } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(32, 32, 1); // Total of 32*32=1024 threads // compute the x and y dimension of grid unsigned int gridX = (numCols % blockSize.x) == 0 ? (numCols/blockSize.x) : 1 + (numCols/blockSize.x); unsigned int gridY = (numRows% blockSize.y) == 0 ? (numRows/blockSize.y) : 1 + (numRows/blockSize.y); std::cout << "1 " << gridX << std::endl; std::cout << "2 " << gridY << std::endl; const dim3 gridSize( gridX, gridY, 1); rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
ae399bf3cb4a500cfc356426e987eab321d090fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "Diffus_4thO_GPU_core.h" #include "shared.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* CUDA implementation of fourth-order diffusion scheme [1] for piecewise-smooth recovery (2D/3D case) * The minimisation is performed using explicit scheme. * * Input Parameters: * 1. Noisy image/volume * 2. lambda - regularization parameter * 3. Edge-preserving parameter (sigma) * 4. Number of iterations, for explicit scheme >= 150 is recommended * 5. tau - time-marching step for explicit scheme * 6. eplsilon: tolerance constant * 7. GPU device number if for multigpu run (default 0) * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the paper by * [1] Hajiaboli, M.R., 2011. An anisotropic fourth-order diffusion filter for image noise removal. International Journal of Computer Vision, 92(2), pp.177-191. */ #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS 1.0e-7 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ __global__ void Weighted_Laplc2D_kernel(float *W_Lapl, float *U0, float sigma, int dimX, int dimY) { int i1,i2,j1,j2; float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; gradX = 0.5f*(U0[j*dimX+i2] - U0[j*dimX+i1]); gradX_sq = powf(gradX,2); gradY = 0.5f*(U0[j2*dimX+i] - U0[j1*dimX+i]); gradY_sq = powf(gradY,2); gradXX = U0[j*dimX+i2] + U0[j*dimX+i1] - 2*U0[index]; gradYY = U0[j2*dimX+i] + U0[j1*dimX+i] - 2*U0[index]; gradXY = 0.25f*(U0[j2*dimX+i2] + U0[j1*dimX+i1] - U0[j1*dimX+i2] - U0[j2*dimX+i1]); xy_2 = 2.0f*gradX*gradY*gradXY; denom = gradX_sq + gradY_sq; if (denom <= EPS) { V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/EPS; V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/EPS; } else { V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/denom; V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/denom; } c = 1.0f/(1.0f + denom/sigma); c_sq = c*c; W_Lapl[index] = c_sq*V_norm + c*V_orth; } return; } __global__ void Diffusion_update_step2D_kernel(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, int dimX, int dimY) { int i1,i2,j1,j2; float gradXXc, gradYYc; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; gradXXc = W_Lapl[j*dimX+i2] + W_Lapl[j*dimX+i1] - 2*W_Lapl[index]; gradYYc = W_Lapl[j2*dimX+i] + W_Lapl[j1*dimX+i] - 2*W_Lapl[index]; Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc) - (Output[index] - Input[index])); } return; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ __global__ void Weighted_Laplc3D_kernel(float *W_Lapl, float *U0, float sigma, int dimX, int dimY, int dimZ) { int i1,i2,j1,j2,k1,k2; float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq, gradZ, gradZ_sq, gradZZ, gradXZ, gradYZ, xyz_1, xyz_2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; k1 = k+1; if (k1 == dimZ) k1 = k-1; k2 = k-1; if (k2 < 0) k2 = k+1; int index = (dimX*dimY)*k + j*dimX+i; gradX = 0.5f*(U0[(dimX*dimY)*k + j*dimX+i2] - U0[(dimX*dimY)*k + j*dimX+i1]); gradX_sq = pow(gradX,2); gradY = 0.5f*(U0[(dimX*dimY)*k + j2*dimX+i] - U0[(dimX*dimY)*k + j1*dimX+i]); gradY_sq = pow(gradY,2); gradZ = 0.5f*(U0[(dimX*dimY)*k2 + j*dimX+i] - U0[(dimX*dimY)*k1 + j*dimX+i]); gradZ_sq = pow(gradZ,2); gradXX = U0[(dimX*dimY)*k + j*dimX+i2] + U0[(dimX*dimY)*k + j*dimX+i1] - 2*U0[index]; gradYY = U0[(dimX*dimY)*k + j2*dimX+i] + U0[(dimX*dimY)*k + j1*dimX+i] - 2*U0[index]; gradZZ = U0[(dimX*dimY)*k2 + j*dimX+i] + U0[(dimX*dimY)*k1 + j*dimX+i] - 2*U0[index]; gradXY = 0.25f*(U0[(dimX*dimY)*k + j2*dimX+i2] + U0[(dimX*dimY)*k + j1*dimX+i1] - U0[(dimX*dimY)*k + j1*dimX+i2] - U0[(dimX*dimY)*k + j2*dimX+i1]); gradXZ = 0.25f*(U0[(dimX*dimY)*k2 + j*dimX+i2] - U0[(dimX*dimY)*k2+j*dimX+i1] - U0[(dimX*dimY)*k1+j*dimX+i2] + U0[(dimX*dimY)*k1+j*dimX+i1]); gradYZ = 0.25f*(U0[(dimX*dimY)*k2 +j2*dimX+i] - U0[(dimX*dimY)*k2+j1*dimX+i] - U0[(dimX*dimY)*k1+j2*dimX+i] + U0[(dimX*dimY)*k1+j1*dimX+i]); xy_2 = 2.0f*gradX*gradY*gradXY; xyz_1 = 2.0f*gradX*gradZ*gradXZ; xyz_2 = 2.0f*gradY*gradZ*gradYZ; denom = gradX_sq + gradY_sq + gradZ_sq; if (denom <= EPS) { V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/EPS; V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/EPS; } else { V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/denom; V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/denom; } c = 1.0f/(1.0f + denom/sigma); c_sq = c*c; W_Lapl[index] = c_sq*V_norm + c*V_orth; } return; } __global__ void Diffusion_update_step3D_kernel(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, int dimX, int dimY, int dimZ) { int i1,i2,j1,j2,k1,k2; float gradXXc, gradYYc, gradZZc; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; k1 = k+1; if (k1 == dimZ) k1 = k-1; k2 = k-1; if (k2 < 0) k2 = k+1; int index = (dimX*dimY)*k + j*dimX+i; gradXXc = W_Lapl[(dimX*dimY)*k + j*dimX+i2] + W_Lapl[(dimX*dimY)*k + j*dimX+i1] - 2*W_Lapl[index]; gradYYc = W_Lapl[(dimX*dimY)*k + j2*dimX+i] + W_Lapl[(dimX*dimY)*k + j1*dimX+i] - 2*W_Lapl[index]; gradZZc = W_Lapl[(dimX*dimY)*k2 + j*dimX+i] + W_Lapl[(dimX*dimY)*k1 + j*dimX+i] - 2*W_Lapl[index]; Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc + gradZZc) - (Output[index] - Input[index])); } return; } __global__ void Diff4thcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void Diff4thResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void Diff4thcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void Diff4thResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ /********************* MAIN HOST FUNCTION ******************/ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ extern "C" int Diffus4th_GPU_main(float *Input, float *Output, float *infovector, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, float epsil, int gpu_device, int N, int M, int Z) { int deviceCount = -1; // number of devices hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(hipSetDevice(gpu_device)); int dimTotal, n, count = 0; float *d_input, *d_output, *d_W_Lapl, *d_update_prev=NULL, re; re = 0.0f; float sigmaPar2; sigmaPar2 = sigmaPar*sigmaPar; dimTotal = N*M*Z; CHECK(hipMalloc((void**)&d_input,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&d_output,dimTotal*sizeof(float))); CHECK(hipMalloc((void**)&d_W_Lapl,dimTotal*sizeof(float))); if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,dimTotal*sizeof(float)) ); CHECK(hipMemcpy(d_input,Input,dimTotal*sizeof(float),hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_output,Input,dimTotal*sizeof(float),hipMemcpyHostToDevice)); /*2D case */ dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D)); dim3 dimBlock3(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid3(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKZSIZE)); for(n=0; n < iterationsNumb; n++) { if ((epsil != 0.0f) && (n % 5 == 0)) { if (Z == 1)hipLaunchKernelGGL(( Diff4thcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_output, d_update_prev, N, M, dimTotal); elsehipLaunchKernelGGL(( Diff4thcopy_kernel3D), dim3(dimGrid3),dim3(dimBlock3), 0, 0, d_output, d_update_prev, N, M, Z, dimTotal); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } if (Z == 1) { /*2D case */ /* Calculating weighted Laplacian */ hipLaunchKernelGGL(( Weighted_Laplc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_W_Lapl, d_output, sigmaPar2, N, M); CHECK(hipDeviceSynchronize()); /* Perform iteration step */ hipLaunchKernelGGL(( Diffusion_update_step2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_output, d_input, d_W_Lapl, lambdaPar, sigmaPar2, tau, N, M); CHECK(hipDeviceSynchronize()); } else { /* Calculating weighted Laplacian */ hipLaunchKernelGGL(( Weighted_Laplc3D_kernel), dim3(dimGrid3),dim3(dimBlock3), 0, 0, d_W_Lapl, d_output, sigmaPar2, N, M, Z); CHECK(hipDeviceSynchronize()); /* Perform iteration step */ hipLaunchKernelGGL(( Diffusion_update_step3D_kernel), dim3(dimGrid3),dim3(dimBlock3), 0, 0, d_output, d_input, d_W_Lapl, lambdaPar, sigmaPar2, tau, N, M, Z); CHECK(hipDeviceSynchronize()); } if ((epsil != 0.0f) && (n % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ if (Z == 1)hipLaunchKernelGGL(( Diff4thResidCalc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_output, d_update_prev, d_W_Lapl, N, M, dimTotal); elsehipLaunchKernelGGL(( Diff4thResidCalc3D_kernel), dim3(dimGrid3),dim3(dimBlock3), 0, 0, d_output, d_update_prev, d_W_Lapl, N, M, Z, dimTotal); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors( hipPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(d_W_Lapl, d_W_Lapl + dimTotal); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_output, d_output + dimTotal); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } CHECK(hipMemcpy(Output,d_output,dimTotal*sizeof(float),hipMemcpyDeviceToHost)); CHECK(hipFree(d_input)); CHECK(hipFree(d_output)); CHECK(hipFree(d_W_Lapl)); if (epsil != 0.0f) hipFree(d_update_prev); /*adding info into info_vector */ infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ hipDeviceSynchronize(); return 0; }
ae399bf3cb4a500cfc356426e987eab321d090fc.cu
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "Diffus_4thO_GPU_core.h" #include "shared.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* CUDA implementation of fourth-order diffusion scheme [1] for piecewise-smooth recovery (2D/3D case) * The minimisation is performed using explicit scheme. * * Input Parameters: * 1. Noisy image/volume * 2. lambda - regularization parameter * 3. Edge-preserving parameter (sigma) * 4. Number of iterations, for explicit scheme >= 150 is recommended * 5. tau - time-marching step for explicit scheme * 6. eplsilon: tolerance constant * 7. GPU device number if for multigpu run (default 0) * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the paper by * [1] Hajiaboli, M.R., 2011. An anisotropic fourth-order diffusion filter for image noise removal. International Journal of Computer Vision, 92(2), pp.177-191. */ #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define EPS 1.0e-7 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ __global__ void Weighted_Laplc2D_kernel(float *W_Lapl, float *U0, float sigma, int dimX, int dimY) { int i1,i2,j1,j2; float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; gradX = 0.5f*(U0[j*dimX+i2] - U0[j*dimX+i1]); gradX_sq = powf(gradX,2); gradY = 0.5f*(U0[j2*dimX+i] - U0[j1*dimX+i]); gradY_sq = powf(gradY,2); gradXX = U0[j*dimX+i2] + U0[j*dimX+i1] - 2*U0[index]; gradYY = U0[j2*dimX+i] + U0[j1*dimX+i] - 2*U0[index]; gradXY = 0.25f*(U0[j2*dimX+i2] + U0[j1*dimX+i1] - U0[j1*dimX+i2] - U0[j2*dimX+i1]); xy_2 = 2.0f*gradX*gradY*gradXY; denom = gradX_sq + gradY_sq; if (denom <= EPS) { V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/EPS; V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/EPS; } else { V_norm = (gradXX*gradX_sq + xy_2 + gradYY*gradY_sq)/denom; V_orth = (gradXX*gradY_sq - xy_2 + gradYY*gradX_sq)/denom; } c = 1.0f/(1.0f + denom/sigma); c_sq = c*c; W_Lapl[index] = c_sq*V_norm + c*V_orth; } return; } __global__ void Diffusion_update_step2D_kernel(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, int dimX, int dimY) { int i1,i2,j1,j2; float gradXXc, gradYYc; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int index = i + dimX*j; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; gradXXc = W_Lapl[j*dimX+i2] + W_Lapl[j*dimX+i1] - 2*W_Lapl[index]; gradYYc = W_Lapl[j2*dimX+i] + W_Lapl[j1*dimX+i] - 2*W_Lapl[index]; Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc) - (Output[index] - Input[index])); } return; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ __global__ void Weighted_Laplc3D_kernel(float *W_Lapl, float *U0, float sigma, int dimX, int dimY, int dimZ) { int i1,i2,j1,j2,k1,k2; float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, xy_2, denom, V_norm, V_orth, c, c_sq, gradZ, gradZ_sq, gradZZ, gradXZ, gradYZ, xyz_1, xyz_2; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; k1 = k+1; if (k1 == dimZ) k1 = k-1; k2 = k-1; if (k2 < 0) k2 = k+1; int index = (dimX*dimY)*k + j*dimX+i; gradX = 0.5f*(U0[(dimX*dimY)*k + j*dimX+i2] - U0[(dimX*dimY)*k + j*dimX+i1]); gradX_sq = pow(gradX,2); gradY = 0.5f*(U0[(dimX*dimY)*k + j2*dimX+i] - U0[(dimX*dimY)*k + j1*dimX+i]); gradY_sq = pow(gradY,2); gradZ = 0.5f*(U0[(dimX*dimY)*k2 + j*dimX+i] - U0[(dimX*dimY)*k1 + j*dimX+i]); gradZ_sq = pow(gradZ,2); gradXX = U0[(dimX*dimY)*k + j*dimX+i2] + U0[(dimX*dimY)*k + j*dimX+i1] - 2*U0[index]; gradYY = U0[(dimX*dimY)*k + j2*dimX+i] + U0[(dimX*dimY)*k + j1*dimX+i] - 2*U0[index]; gradZZ = U0[(dimX*dimY)*k2 + j*dimX+i] + U0[(dimX*dimY)*k1 + j*dimX+i] - 2*U0[index]; gradXY = 0.25f*(U0[(dimX*dimY)*k + j2*dimX+i2] + U0[(dimX*dimY)*k + j1*dimX+i1] - U0[(dimX*dimY)*k + j1*dimX+i2] - U0[(dimX*dimY)*k + j2*dimX+i1]); gradXZ = 0.25f*(U0[(dimX*dimY)*k2 + j*dimX+i2] - U0[(dimX*dimY)*k2+j*dimX+i1] - U0[(dimX*dimY)*k1+j*dimX+i2] + U0[(dimX*dimY)*k1+j*dimX+i1]); gradYZ = 0.25f*(U0[(dimX*dimY)*k2 +j2*dimX+i] - U0[(dimX*dimY)*k2+j1*dimX+i] - U0[(dimX*dimY)*k1+j2*dimX+i] + U0[(dimX*dimY)*k1+j1*dimX+i]); xy_2 = 2.0f*gradX*gradY*gradXY; xyz_1 = 2.0f*gradX*gradZ*gradXZ; xyz_2 = 2.0f*gradY*gradZ*gradYZ; denom = gradX_sq + gradY_sq + gradZ_sq; if (denom <= EPS) { V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/EPS; V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/EPS; } else { V_norm = (gradXX*gradX_sq + gradYY*gradY_sq + gradZZ*gradZ_sq + xy_2 + xyz_1 + xyz_2)/denom; V_orth = ((gradY_sq + gradZ_sq)*gradXX + (gradX_sq + gradZ_sq)*gradYY + (gradX_sq + gradY_sq)*gradZZ - xy_2 - xyz_1 - xyz_2)/denom; } c = 1.0f/(1.0f + denom/sigma); c_sq = c*c; W_Lapl[index] = c_sq*V_norm + c*V_orth; } return; } __global__ void Diffusion_update_step3D_kernel(float *Output, float *Input, float *W_Lapl, float lambdaPar, float sigmaPar2, float tau, int dimX, int dimY, int dimZ) { int i1,i2,j1,j2,k1,k2; float gradXXc, gradYYc, gradZZc; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) { /* boundary conditions (Neumann reflections) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; k1 = k+1; if (k1 == dimZ) k1 = k-1; k2 = k-1; if (k2 < 0) k2 = k+1; int index = (dimX*dimY)*k + j*dimX+i; gradXXc = W_Lapl[(dimX*dimY)*k + j*dimX+i2] + W_Lapl[(dimX*dimY)*k + j*dimX+i1] - 2*W_Lapl[index]; gradYYc = W_Lapl[(dimX*dimY)*k + j2*dimX+i] + W_Lapl[(dimX*dimY)*k + j1*dimX+i] - 2*W_Lapl[index]; gradZZc = W_Lapl[(dimX*dimY)*k2 + j*dimX+i] + W_Lapl[(dimX*dimY)*k1 + j*dimX+i] - 2*W_Lapl[index]; Output[index] += tau*(-lambdaPar*(gradXXc + gradYYc + gradZZc) - (Output[index] - Input[index])); } return; } __global__ void Diff4thcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void Diff4thResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void Diff4thcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void Diff4thResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ /********************* MAIN HOST FUNCTION ******************/ /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ extern "C" int Diffus4th_GPU_main(float *Input, float *Output, float *infovector, float lambdaPar, float sigmaPar, int iterationsNumb, float tau, float epsil, int gpu_device, int N, int M, int Z) { int deviceCount = -1; // number of devices cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(cudaSetDevice(gpu_device)); int dimTotal, n, count = 0; float *d_input, *d_output, *d_W_Lapl, *d_update_prev=NULL, re; re = 0.0f; float sigmaPar2; sigmaPar2 = sigmaPar*sigmaPar; dimTotal = N*M*Z; CHECK(cudaMalloc((void**)&d_input,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&d_output,dimTotal*sizeof(float))); CHECK(cudaMalloc((void**)&d_W_Lapl,dimTotal*sizeof(float))); if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,dimTotal*sizeof(float)) ); CHECK(cudaMemcpy(d_input,Input,dimTotal*sizeof(float),cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_output,Input,dimTotal*sizeof(float),cudaMemcpyHostToDevice)); /*2D case */ dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D)); dim3 dimBlock3(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid3(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKZSIZE)); for(n=0; n < iterationsNumb; n++) { if ((epsil != 0.0f) && (n % 5 == 0)) { if (Z == 1) Diff4thcopy_kernel2D<<<dimGrid,dimBlock>>>(d_output, d_update_prev, N, M, dimTotal); else Diff4thcopy_kernel3D<<<dimGrid3,dimBlock3>>>(d_output, d_update_prev, N, M, Z, dimTotal); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } if (Z == 1) { /*2D case */ /* Calculating weighted Laplacian */ Weighted_Laplc2D_kernel<<<dimGrid,dimBlock>>>(d_W_Lapl, d_output, sigmaPar2, N, M); CHECK(cudaDeviceSynchronize()); /* Perform iteration step */ Diffusion_update_step2D_kernel<<<dimGrid,dimBlock>>>(d_output, d_input, d_W_Lapl, lambdaPar, sigmaPar2, tau, N, M); CHECK(cudaDeviceSynchronize()); } else { /* Calculating weighted Laplacian */ Weighted_Laplc3D_kernel<<<dimGrid3,dimBlock3>>>(d_W_Lapl, d_output, sigmaPar2, N, M, Z); CHECK(cudaDeviceSynchronize()); /* Perform iteration step */ Diffusion_update_step3D_kernel<<<dimGrid3,dimBlock3>>>(d_output, d_input, d_W_Lapl, lambdaPar, sigmaPar2, tau, N, M, Z); CHECK(cudaDeviceSynchronize()); } if ((epsil != 0.0f) && (n % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ if (Z == 1) Diff4thResidCalc2D_kernel<<<dimGrid,dimBlock>>>(d_output, d_update_prev, d_W_Lapl, N, M, dimTotal); else Diff4thResidCalc3D_kernel<<<dimGrid3,dimBlock3>>>(d_output, d_update_prev, d_W_Lapl, N, M, Z, dimTotal); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors( cudaPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(d_W_Lapl, d_W_Lapl + dimTotal); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_output, d_output + dimTotal); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } CHECK(cudaMemcpy(Output,d_output,dimTotal*sizeof(float),cudaMemcpyDeviceToHost)); CHECK(cudaFree(d_input)); CHECK(cudaFree(d_output)); CHECK(cudaFree(d_W_Lapl)); if (epsil != 0.0f) cudaFree(d_update_prev); /*adding info into info_vector */ infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ cudaDeviceSynchronize(); return 0; }
acf59d1809ef87a08a3d1cee8f36b3e0d359c9e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/native/TensorTransformations.h" #include "ATen/hip/detail/IndexUtils.cuh" #include "ATen/NativeFunctions.h" #include "ATen/hip/HIPContext.h" #include <cstddef> #include <vector> namespace at { namespace native { #define AT_APPLY_THREADS_PER_BLOCK 32 * 16 #define AT_APPLY_BLOCKS_PER_SM 4 template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 __launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) #endif __global__ void kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> __global__ void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } // Flip tensor given a list of dims Tensor flip_cuda(const Tensor& self, IntList dims) { auto in_tensor = self; const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel(); flip_check_errors(total_dims, flip_dims_size, dims); int64_t block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } auto flip_dims = std::vector<int64_t>(dims); wrap_all_dims(flip_dims, total_dims); // use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor); auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor); int flip_dim = in_tensor_info.collapseDims(flip_dims[0]); out_tensor_info.collapseDims(flip_dims[0]); hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>) , dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor_info, out_tensor_info, N, flip_dim, total_dims); }); return out_tensor; } auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}); auto shape = std::vector<int64_t>(in_tensor.sizes()); auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())}); auto strides = std::vector<int64_t>(in_tensor.strides()); auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())}); // stride_contiguous is the stride of non-contiguous tensor after calling contiguous(), // it is used to compute indices for each element in non-contiguous tensor Tensor stride_contiguous = at::zeros({total_dims}, kLong); int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>(); for (int64_t i = total_dims - 1; i >= 0; i--) { if (i == total_dims - 1) { stride_contiguous_d[i] = 1; } else { stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1]; } } AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims); }); return out_tensor; } }} // namespace at::native
acf59d1809ef87a08a3d1cee8f36b3e0d359c9e1.cu
#include "ATen/native/TensorTransformations.h" #include "ATen/cuda/detail/IndexUtils.cuh" #include "ATen/NativeFunctions.h" #include "ATen/cuda/CUDAContext.h" #include <cstddef> #include <vector> namespace at { namespace native { #define AT_APPLY_THREADS_PER_BLOCK 32 * 16 #define AT_APPLY_BLOCKS_PER_SM 4 template <typename scalar_t, typename IndexType> #if __CUDA_ARCH__ >= 350 __launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM) #endif __global__ void kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info, cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info, IndexType N, int flip_dim, IndexType total_dims) { for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) { IndexType dst_offset = 0; if (flip_dim == 0) { // flip 1st dim dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0]; } else { // flip last dim IndexType i = total_dims - 1; dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]); } out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index]; } } template <typename scalar_t> __global__ void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index >= N) { return; } int64_t cur_indices = linear_index, rem = 0, dst_offset = 0; for (int64_t i = 0; i < total_dims; i++) { int64_t temp = cur_indices; cur_indices = cur_indices / strides_contiguous[i]; rem = temp - cur_indices * strides_contiguous[i]; // flip the indices if it is in flip_dims for (int64_t j = 0; j < flip_dims_size; j++) { if (i == flip_dims[j]) { cur_indices = shape[i] - 1 - cur_indices; } } dst_offset += cur_indices * strides[i]; cur_indices = rem; } out_tensor[linear_index] = in_tensor[dst_offset]; } // Flip tensor given a list of dims Tensor flip_cuda(const Tensor& self, IntList dims) { auto in_tensor = self; const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel(); flip_check_errors(total_dims, flip_dims_size, dims); int64_t block_size = 512; dim3 dim_block(block_size); dim3 dim_grid((N + block_size - 1) / block_size); auto out_tensor = at::empty_like(in_tensor); if (out_tensor.numel() == 0) { return out_tensor; } auto flip_dims = std::vector<int64_t>(dims); wrap_all_dims(flip_dims, total_dims); // use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) { AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor); auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor); int flip_dim = in_tensor_info.collapseDims(flip_dims[0]); out_tensor_info.collapseDims(flip_dims[0]); kernel_pointwise_flip_apply2<scalar_t, int64_t> <<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor_info, out_tensor_info, N, flip_dim, total_dims); }); return out_tensor; } auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}); auto shape = std::vector<int64_t>(in_tensor.sizes()); auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())}); auto strides = std::vector<int64_t>(in_tensor.strides()); auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())}); // stride_contiguous is the stride of non-contiguous tensor after calling contiguous(), // it is used to compute indices for each element in non-contiguous tensor Tensor stride_contiguous = at::zeros({total_dims}, kLong); int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>(); for (int64_t i = total_dims - 1; i >= 0; i--) { if (i == total_dims - 1) { stride_contiguous_d[i] = 1; } else { stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1]; } } AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] { flip_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( in_tensor.data<scalar_t>(), out_tensor.data<scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims); }); return out_tensor; } }} // namespace at::native
3417191e21e3f9a0a0044b6e945174843874484b.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #include <algorithm> #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. // Column Major for Matrix A, B and C. // Note that if the output is column major, the bias has to be per row. i.e. every row has different bias. // If the output is row major, the bias has to be per column, i.e. every column has different bias. // Below list some other notices: // 1) we only have row major epilogue. // 2) we swap A and B if the output is column major then we can still use the // row major epilogue. // 3) Mx1 bias vector becomes 1xM after the swapping/transposing. // 4) we can use the existing OutputIterator to load 1xM bias vector. using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 8, N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // Define the epilogue operation as LinearCombinationRelu. This is approximately equal to // // d_ij = max(0, alpha * sum_k(a_ik * b_kj) + c_ij ) // using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue, // <- data type for alpha in linear combination function cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // <- alpha x C + bias // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias( {problem_size.m(), 1}); // <- Create matrix C with dimensions M x 1 cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_bias.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c_bias.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device {tensor_c_bias.device_data(), 0}, // <- the C matrix is treated as the bias vector. We can enable the GEMM // to project away the N dimension by setting the stride to zero. tensor_d.device_ref(), // <- reference to matrix D on device {alpha}, // <- alpha split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference gemm kernel // cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device_reference; // Launch device reference to compute strictly the product A * B gemm_device_reference( problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), 0, tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Compute bias + relu in host code for (int i = 0; i < problem_size.m(); ++i) { for (int j = 0; j < problem_size.n(); ++j) { tensor_ref_d.at({i, j}) = ::max( ElementOutput(0), ElementOutput(tensor_ref_d.at({i, j}) + tensor_c_bias.at({i, 0})) ); } } // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main() { bool notSupported = false; // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
3417191e21e3f9a0a0044b6e945174843874484b.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** */ #include <algorithm> #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/epilogue/thread/linear_combination_relu.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. // Column Major for Matrix A, B and C. // Note that if the output is column major, the bias has to be per row. i.e. every row has different bias. // If the output is row major, the bias has to be per column, i.e. every column has different bias. // Below list some other notices: // 1) we only have row major epilogue. // 2) we swap A and B if the output is column major then we can still use the // row major epilogue. // 3) Mx1 bias vector becomes 1xM after the swapping/transposing. // 4) we can use the existing OutputIterator to load 1xM bias vector. using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm75; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 8, N = 8, K = 4 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // Define the epilogue operation as LinearCombinationRelu. This is approximately equal to // // d_ij = max(0, alpha * sum_k(a_ik * b_kj) + c_ij ) // using EpilogueOp = cutlass::epilogue::thread::LinearCombinationRelu< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per // vectorized memory access. For half // precision, it's 8 elements. This becomes // the vector width of math instructions in // epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue, // <- data type for alpha in linear combination function cutlass::epilogue::thread::ScaleType::NoBetaScaling>; // <- alpha x C + bias // Number of pipelines you want to use constexpr int NumStages = 2; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c_bias( {problem_size.m(), 1}); // <- Create matrix C with dimensions M x 1 cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c_bias.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c_bias.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{ problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device {tensor_c_bias.device_data(), 0}, // <- the C matrix is treated as the bias vector. We can enable the GEMM // to project away the N dimension by setting the stride to zero. tensor_d.device_ref(), // <- reference to matrix D on device {alpha}, // <- alpha split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Check the problem size is supported or not cutlass::Status status = gemm_op.can_implement(arguments); CUTLASS_CHECK(status); // Initialize CUTLASS kernel with arguments and workspace pointer status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // // Create instantiation for device reference gemm kernel // cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device_reference; // Launch device reference to compute strictly the product A * B gemm_device_reference( problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), 0, tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Compute bias + relu in host code for (int i = 0; i < problem_size.m(); ++i) { for (int j = 0; j < problem_size.n(); ++j) { tensor_ref_d.at({i, j}) = std::max( ElementOutput(0), ElementOutput(tensor_ref_d.at({i, j}) + tensor_c_bias.at({i, 0})) ); } } // Check if output from CUTLASS kernel and reference kernel are equal or not std::cout << (cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()) ? "Passed" : "Failed") << std::endl; CUTLASS_CHECK(status); return 0; } int main() { bool notSupported = false; // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
65f6eb3cf896ddd9de8860ada5141099f0691e65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* 29/12/2019 hmhuan-1612858 nnkhai-1612909 */ #include <stdio.h> #include <stdint.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { hipEvent_t start; hipEvent_t stop; GpuTimer() { hipEventCreate(&start); hipEventCreate(&stop); } ~GpuTimer() { hipEventDestroy(start); hipEventDestroy(stop); } void Start() { hipEventRecord(start, 0); hipEventSynchronize(start); } void Stop() { hipEventRecord(stop, 0); } float Elapsed() { float elapsed; hipEventSynchronize(stop); hipEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void sortByHost(const uint32_t * in, int n, uint32_t * out, int nBits) { int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist, 0, nBins * sizeof(int)); for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); hist[bin]++; } // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0] = 0; for (int bin = 1; bin < nBins; bin++) histScan[bin] = histScan[bin - 1] + hist[bin - 1]; // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t * temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); } void sortRadixBase04(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { dim3 blkSize1(blockSizes[0]); // block size for histogram kernel dim3 blkSize2(blockSizes[1]); // block size for scan kernel dim3 gridSize((n - 1) / blkSize1.x + 1); // grid size for histogram kernel // TODO int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * gridSize.x * sizeof(int)); int *histScan = (int * )malloc(nBins * gridSize.x * sizeof(int)); uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; int nHist = nBins * gridSize.x; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist, 0, nHist * sizeof(int)); for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); hist[bin * gridSize.x + i / blkSize1.x]++; } // TODO: Exclusive scan histScan[0] = 0; for (int i = 1; i < nHist; i++) histScan[i] = histScan[i - 1] + hist[i - 1]; // TODO: Scatter for (int i = 0; i < n ; i++) { int bin = i / blkSize1.x + ((src[i] >> bit) & (nBins - 1)) * gridSize.x; dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t * temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); } // histogram kernel __global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit) { // Each block computes its local hist using atomic on SMEM extern __shared__ int s_bin[]; int i = blockIdx.x * blockDim.x + threadIdx.x; int delta = (nBins - 1) / blockDim.x + 1; for (int j = 0; j < delta; j++) { int id = threadIdx.x + j * blockDim.x; if (id < nBins) s_bin[id] = 0; } __syncthreads(); if (i < n) { int bin = (in[i] >> bit) & (nBins - 1); atomicAdd(&s_bin[bin], 1); } __syncthreads(); for (int j = 0; j < delta; j++) { int id = threadIdx.x + j * blockDim.x; if (id < nBins) hist[id * gridDim.x + blockIdx.x] += s_bin[id]; } } __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums, int mode = 1) { extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > 0 && i < n) s_data[blockDim.x - 1 - threadIdx.x] = in[i - 1]; else s_data[blockDim.x - 1 - threadIdx.x] = 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x < blockDim.x - stride) val = s_data[threadIdx.x + stride]; __syncthreads(); s_data[threadIdx.x] += val; __syncthreads(); } if (i < n) out[i] = s_data[blockDim.x - 1 - threadIdx.x]; if (blkSums != NULL) blkSums[blockIdx.x] = s_data[0]; } __global__ void addBlkSums(int * in, int n, int* blkSums) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n && blockIdx.x > 0) in[i] += blkSums[blockIdx.x - 1]; } __global__ void Scatter(uint32_t * in, int n, int nBits, int bit, int nBins, int *histScan, uint32_t * out) { extern __shared__ int s_data[]; int * s_in = s_data; int * s_hist = (int *)&s_in[blockDim.x]; int *dst = (int *)&s_hist[blockDim.x]; int *dst_ori = (int *)&dst[blockDim.x]; int *startIndex = (int *)&dst_ori[blockDim.x]; // Cp pht nBins int * scan = (int *)&startIndex[nBins]; int * hist = (int *)&scan[blockDim.x]; int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { s_in[threadIdx.x] = in[id]; s_hist[threadIdx.x] = (s_in[threadIdx.x] >> bit) & (nBins - 1); } else s_hist[threadIdx.x] = nBins - 1; scan[threadIdx.x] = 0; __syncthreads(); // TODO: B1 - sort radix with k = 1 for (int b = 0; b < nBits; b++) { // compute hist int _hist = s_hist[threadIdx.x]; int _in = s_in[threadIdx.x]; int _bin = (_hist >> b) & 1; hist[threadIdx.x] = _bin; if (threadIdx.x < blockDim.x - 1) scan[threadIdx.x + 1] = _bin; __syncthreads(); int _last_hist = hist[blockDim.x - 1]; for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x >= stride) val = scan[threadIdx.x - stride]; __syncthreads(); scan[threadIdx.x] += val; __syncthreads(); } __syncthreads(); // scatter int scan_ = scan[threadIdx.x]; int nZeros = blockDim.x - scan[blockDim.x - 1] - _last_hist;//hist[blockDim.x - 1]; int rank = 0; if (_bin == 0) rank = threadIdx.x - scan_;//scan[threadIdx.x]; else rank = nZeros + scan_;//scan[threadIdx.x]; dst[rank] = _hist;//s_hist[threadIdx.x]; dst_ori[rank] = _in;//s_in[threadIdx.x]; __syncthreads(); // copy or swap s_hist[threadIdx.x] = dst[threadIdx.x]; s_in[threadIdx.x] = dst_ori[threadIdx.x]; } int _hist = s_hist[threadIdx.x]; int _in = s_in[threadIdx.x]; __syncthreads(); // TODO: B2 + B3 if (threadIdx.x == 0) startIndex[_hist] = 0; else { if (_hist != s_hist[threadIdx.x - 1]) startIndex[_hist] = threadIdx.x; } __syncthreads(); // TODO: B4 real scatter if (id < n) { int preRank = threadIdx.x - startIndex[_hist]; int bin = ((_in >> bit) & (nBins - 1)); int scan = histScan[bin * gridDim.x + blockIdx.x]; //int rank = scan + preRank; out[scan + preRank] = _in; } } void sortRadixBase04_device(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { int nBins = 1 << nBits; dim3 blkSize1(blockSizes[0]); // block size for histogram kernel dim3 blkSize2(blockSizes[1]); // block size for scan kernel dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel dim3 gridSize2((nBins * gridSize1.x - 1) / blkSize2.x + 1); int * blkSums = (int *)malloc(gridSize2.x * sizeof(int)); uint32_t * d_src, *d_dst; int *d_scan, *d_blkSums; // float time_hist = 0, time_scan = 0, time_add = 0, time_scatter = 0; CHECK(hipMalloc(&d_src, n * sizeof(uint32_t))); CHECK(hipMalloc(&d_dst, n * sizeof(uint32_t))); CHECK(hipMalloc(&d_scan, nBins * gridSize1.x * sizeof(int))); CHECK(hipMalloc(&d_blkSums, gridSize2.x * sizeof(int))); CHECK(hipMemcpy(d_src, in, n * sizeof(uint32_t), hipMemcpyHostToDevice)); size_t sMemSize2 = blkSize2.x * sizeof(int); //GpuTimer timer; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit CHECK(hipMemset(d_scan, 0, nBins * gridSize1.x * sizeof(int))); //timer.Start(); hipLaunchKernelGGL(( computeHistKernel), dim3(gridSize1), dim3(blkSize1), nBins * sizeof(int), 0, d_src, n, d_scan, nBins, bit); //timer.Stop(); //time_hist += timer.Elapsed(); hipDeviceSynchronize(); CHECK(hipGetLastError()); // TODO: Scan // timer.Start(); hipLaunchKernelGGL(( scanBlkKernel), dim3(gridSize2), dim3(blkSize2), sMemSize2, 0, d_scan, nBins * gridSize1.x, d_scan, d_blkSums); hipDeviceSynchronize(); CHECK(hipGetLastError()); // timer.Stop(); // time_scan += timer.Elapsed(); CHECK(hipMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 1; i < gridSize2.x; i++) blkSums[i] += blkSums[i - 1]; CHECK(hipMemcpy(d_blkSums, blkSums, gridSize2.x * sizeof(int), hipMemcpyHostToDevice)); // timer.Start(); hipLaunchKernelGGL(( addBlkSums), dim3(gridSize2), dim3(blkSize2), 0, 0, d_scan, nBins * gridSize1.x, d_blkSums); hipDeviceSynchronize(); CHECK(hipGetLastError()); // timer.Stop(); // time_add += timer.Elapsed(); // TODO: Scatter // timer.Start(); hipLaunchKernelGGL(( Scatter), dim3(gridSize1), dim3(blkSize1), (blkSize1.x * 6 + nBins) * sizeof(int), 0, d_src, n, nBits, bit, nBins, d_scan, d_dst); hipDeviceSynchronize(); CHECK(hipGetLastError()); // timer.Stop(); // time_scatter += timer.Elapsed(); // TODO: Swap "src" and "dst" uint32_t * temp = d_src; d_src = d_dst; d_dst = temp; } // printf("Time compute hist: %.3f ms\n", time_hist); // printf("Time scan: %.3f ms\n", time_scan); // printf("Time add blkSums: %.3f ms\n", time_add); // printf("Time Scatter: %.3f ms\n\n", time_scatter); // TODO: Copy result to "out" CHECK(hipMemcpy(out, d_src, n * sizeof(uint32_t), hipMemcpyDeviceToHost)); // Free memories free(blkSums); CHECK(hipFree(d_src)); CHECK(hipFree(d_dst)); CHECK(hipFree(d_scan)); CHECK(hipFree(d_blkSums)); } void sortByDevice_thrust(const uint32_t * in, int n, uint32_t * out) { // TODO thrust::device_vector<uint32_t> dv_out(in, in + n); thrust::sort(dv_out.begin(), dv_out.end()); thrust::copy(dv_out.begin(), dv_out.end(), out); } GpuTimer timer; float sort(const uint32_t * in, int n, uint32_t * out, int nBits, int useDevice=0, int * blockSizes=NULL) { timer.Start(); if (useDevice == 0) { printf("\nRadix sort by host\n"); sortByHost(in, n, out, nBits); } else if (useDevice == 1) { printf("\nRadix sort by host level 1\n"); sortRadixBase04(in, n, out, nBits, blockSizes); //use default 8 } else if (useDevice == 2) { sortRadixBase04_device(in, n, out, nBits, blockSizes); } else { printf("\nSort by thrust\n"); sortByDevice_thrust(in, n, out); } timer.Stop(); float time = timer.Elapsed(); if (useDevice != 2) printf("Time: %.3f ms\n", time); return time; } void printDeviceInfo() { hipDeviceProp_t devProv; CHECK(hipGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("%d with %d != %d\n", i, out[i], correctOut[i]); printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int nBits = 8; int n = (1 << 24) + 1; if (argc > 1) nBits = atoi(argv[1]); printf("\nInput size: %d\n", n); printf("nBits: %d\n", nBits); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out_0 = (uint32_t *)malloc(bytes); // base 4 Host result uint32_t * out_1 = (uint32_t *)malloc(bytes); // base 4 Device result uint32_t * out_thrust = (uint32_t *)malloc(bytes); // result by Thrust uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits); sort(in, n, out_0, nBits, 1, blockSizes); checkCorrectness(out_0, correctOut, n); float avg_time = 0; int loop = 16; printf("\nRadix sort by device level 2\n"); for (int i = 0; i < loop; i++) { float time = sort(in, n, out_1, nBits, 2, blockSizes); avg_time += time; //printf("loop %d: %.3f ms\n", i + 1, time); } printf("Avg Time: %.3f ms\n", avg_time / loop); checkCorrectness(out_1, correctOut, n); sort(in, n, out_thrust, nBits, 3, blockSizes); checkCorrectness(out_thrust, out_1, n); // FREE MEMORIES free(in); free(out_0); free(out_thrust); free(out_1); free(correctOut); return EXIT_SUCCESS; }
65f6eb3cf896ddd9de8860ada5141099f0691e65.cu
/* 29/12/2019 hmhuan-1612858 nnkhai-1612909 */ #include <stdio.h> #include <stdint.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; void sortByHost(const uint32_t * in, int n, uint32_t * out, int nBits) { int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist, 0, nBins * sizeof(int)); for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); hist[bin]++; } // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0] = 0; for (int bin = 1; bin < nBins; bin++) histScan[bin] = histScan[bin - 1] + hist[bin - 1]; // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t * temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); } void sortRadixBase04(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { dim3 blkSize1(blockSizes[0]); // block size for histogram kernel dim3 blkSize2(blockSizes[1]); // block size for scan kernel dim3 gridSize((n - 1) / blkSize1.x + 1); // grid size for histogram kernel // TODO int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * gridSize.x * sizeof(int)); int *histScan = (int * )malloc(nBins * gridSize.x * sizeof(int)); uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; int nHist = nBins * gridSize.x; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit memset(hist, 0, nHist * sizeof(int)); for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); hist[bin * gridSize.x + i / blkSize1.x]++; } // TODO: Exclusive scan histScan[0] = 0; for (int i = 1; i < nHist; i++) histScan[i] = histScan[i - 1] + hist[i - 1]; // TODO: Scatter for (int i = 0; i < n ; i++) { int bin = i / blkSize1.x + ((src[i] >> bit) & (nBins - 1)) * gridSize.x; dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t * temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); } // histogram kernel __global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit) { // Each block computes its local hist using atomic on SMEM extern __shared__ int s_bin[]; int i = blockIdx.x * blockDim.x + threadIdx.x; int delta = (nBins - 1) / blockDim.x + 1; for (int j = 0; j < delta; j++) { int id = threadIdx.x + j * blockDim.x; if (id < nBins) s_bin[id] = 0; } __syncthreads(); if (i < n) { int bin = (in[i] >> bit) & (nBins - 1); atomicAdd(&s_bin[bin], 1); } __syncthreads(); for (int j = 0; j < delta; j++) { int id = threadIdx.x + j * blockDim.x; if (id < nBins) hist[id * gridDim.x + blockIdx.x] += s_bin[id]; } } __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums, int mode = 1) { extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > 0 && i < n) s_data[blockDim.x - 1 - threadIdx.x] = in[i - 1]; else s_data[blockDim.x - 1 - threadIdx.x] = 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x < blockDim.x - stride) val = s_data[threadIdx.x + stride]; __syncthreads(); s_data[threadIdx.x] += val; __syncthreads(); } if (i < n) out[i] = s_data[blockDim.x - 1 - threadIdx.x]; if (blkSums != NULL) blkSums[blockIdx.x] = s_data[0]; } __global__ void addBlkSums(int * in, int n, int* blkSums) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n && blockIdx.x > 0) in[i] += blkSums[blockIdx.x - 1]; } __global__ void Scatter(uint32_t * in, int n, int nBits, int bit, int nBins, int *histScan, uint32_t * out) { extern __shared__ int s_data[]; int * s_in = s_data; int * s_hist = (int *)&s_in[blockDim.x]; int *dst = (int *)&s_hist[blockDim.x]; int *dst_ori = (int *)&dst[blockDim.x]; int *startIndex = (int *)&dst_ori[blockDim.x]; // Cấp phát nBins int * scan = (int *)&startIndex[nBins]; int * hist = (int *)&scan[blockDim.x]; int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { s_in[threadIdx.x] = in[id]; s_hist[threadIdx.x] = (s_in[threadIdx.x] >> bit) & (nBins - 1); } else s_hist[threadIdx.x] = nBins - 1; scan[threadIdx.x] = 0; __syncthreads(); // TODO: B1 - sort radix with k = 1 for (int b = 0; b < nBits; b++) { // compute hist int _hist = s_hist[threadIdx.x]; int _in = s_in[threadIdx.x]; int _bin = (_hist >> b) & 1; hist[threadIdx.x] = _bin; if (threadIdx.x < blockDim.x - 1) scan[threadIdx.x + 1] = _bin; __syncthreads(); int _last_hist = hist[blockDim.x - 1]; for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x >= stride) val = scan[threadIdx.x - stride]; __syncthreads(); scan[threadIdx.x] += val; __syncthreads(); } __syncthreads(); // scatter int scan_ = scan[threadIdx.x]; int nZeros = blockDim.x - scan[blockDim.x - 1] - _last_hist;//hist[blockDim.x - 1]; int rank = 0; if (_bin == 0) rank = threadIdx.x - scan_;//scan[threadIdx.x]; else rank = nZeros + scan_;//scan[threadIdx.x]; dst[rank] = _hist;//s_hist[threadIdx.x]; dst_ori[rank] = _in;//s_in[threadIdx.x]; __syncthreads(); // copy or swap s_hist[threadIdx.x] = dst[threadIdx.x]; s_in[threadIdx.x] = dst_ori[threadIdx.x]; } int _hist = s_hist[threadIdx.x]; int _in = s_in[threadIdx.x]; __syncthreads(); // TODO: B2 + B3 if (threadIdx.x == 0) startIndex[_hist] = 0; else { if (_hist != s_hist[threadIdx.x - 1]) startIndex[_hist] = threadIdx.x; } __syncthreads(); // TODO: B4 real scatter if (id < n) { int preRank = threadIdx.x - startIndex[_hist]; int bin = ((_in >> bit) & (nBins - 1)); int scan = histScan[bin * gridDim.x + blockIdx.x]; //int rank = scan + preRank; out[scan + preRank] = _in; } } void sortRadixBase04_device(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { int nBins = 1 << nBits; dim3 blkSize1(blockSizes[0]); // block size for histogram kernel dim3 blkSize2(blockSizes[1]); // block size for scan kernel dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel dim3 gridSize2((nBins * gridSize1.x - 1) / blkSize2.x + 1); int * blkSums = (int *)malloc(gridSize2.x * sizeof(int)); uint32_t * d_src, *d_dst; int *d_scan, *d_blkSums; // float time_hist = 0, time_scan = 0, time_add = 0, time_scatter = 0; CHECK(cudaMalloc(&d_src, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_dst, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_scan, nBins * gridSize1.x * sizeof(int))); CHECK(cudaMalloc(&d_blkSums, gridSize2.x * sizeof(int))); CHECK(cudaMemcpy(d_src, in, n * sizeof(uint32_t), cudaMemcpyHostToDevice)); size_t sMemSize2 = blkSize2.x * sizeof(int); //GpuTimer timer; for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit CHECK(cudaMemset(d_scan, 0, nBins * gridSize1.x * sizeof(int))); //timer.Start(); computeHistKernel<<<gridSize1, blkSize1, nBins * sizeof(int)>>>(d_src, n, d_scan, nBins, bit); //timer.Stop(); //time_hist += timer.Elapsed(); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // TODO: Scan // timer.Start(); scanBlkKernel<<<gridSize2, blkSize2, sMemSize2>>>(d_scan, nBins * gridSize1.x, d_scan, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // timer.Stop(); // time_scan += timer.Elapsed(); CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 1; i < gridSize2.x; i++) blkSums[i] += blkSums[i - 1]; CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize2.x * sizeof(int), cudaMemcpyHostToDevice)); // timer.Start(); addBlkSums<<<gridSize2, blkSize2>>>(d_scan, nBins * gridSize1.x, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // timer.Stop(); // time_add += timer.Elapsed(); // TODO: Scatter // timer.Start(); Scatter<<<gridSize1, blkSize1, (blkSize1.x * 6 + nBins) * sizeof(int)>>>(d_src, n, nBits, bit, nBins, d_scan, d_dst); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // timer.Stop(); // time_scatter += timer.Elapsed(); // TODO: Swap "src" and "dst" uint32_t * temp = d_src; d_src = d_dst; d_dst = temp; } // printf("Time compute hist: %.3f ms\n", time_hist); // printf("Time scan: %.3f ms\n", time_scan); // printf("Time add blkSums: %.3f ms\n", time_add); // printf("Time Scatter: %.3f ms\n\n", time_scatter); // TODO: Copy result to "out" CHECK(cudaMemcpy(out, d_src, n * sizeof(uint32_t), cudaMemcpyDeviceToHost)); // Free memories free(blkSums); CHECK(cudaFree(d_src)); CHECK(cudaFree(d_dst)); CHECK(cudaFree(d_scan)); CHECK(cudaFree(d_blkSums)); } void sortByDevice_thrust(const uint32_t * in, int n, uint32_t * out) { // TODO thrust::device_vector<uint32_t> dv_out(in, in + n); thrust::sort(dv_out.begin(), dv_out.end()); thrust::copy(dv_out.begin(), dv_out.end(), out); } GpuTimer timer; float sort(const uint32_t * in, int n, uint32_t * out, int nBits, int useDevice=0, int * blockSizes=NULL) { timer.Start(); if (useDevice == 0) { printf("\nRadix sort by host\n"); sortByHost(in, n, out, nBits); } else if (useDevice == 1) { printf("\nRadix sort by host level 1\n"); sortRadixBase04(in, n, out, nBits, blockSizes); //use default 8 } else if (useDevice == 2) { sortRadixBase04_device(in, n, out, nBits, blockSizes); } else { printf("\nSort by thrust\n"); sortByDevice_thrust(in, n, out); } timer.Stop(); float time = timer.Elapsed(); if (useDevice != 2) printf("Time: %.3f ms\n", time); return time; } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("%d with %d != %d\n", i, out[i], correctOut[i]); printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int nBits = 8; int n = (1 << 24) + 1; if (argc > 1) nBits = atoi(argv[1]); printf("\nInput size: %d\n", n); printf("nBits: %d\n", nBits); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out_0 = (uint32_t *)malloc(bytes); // base 4 Host result uint32_t * out_1 = (uint32_t *)malloc(bytes); // base 4 Device result uint32_t * out_thrust = (uint32_t *)malloc(bytes); // result by Thrust uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand(); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits); sort(in, n, out_0, nBits, 1, blockSizes); checkCorrectness(out_0, correctOut, n); float avg_time = 0; int loop = 16; printf("\nRadix sort by device level 2\n"); for (int i = 0; i < loop; i++) { float time = sort(in, n, out_1, nBits, 2, blockSizes); avg_time += time; //printf("loop %d: %.3f ms\n", i + 1, time); } printf("Avg Time: %.3f ms\n", avg_time / loop); checkCorrectness(out_1, correctOut, n); sort(in, n, out_thrust, nBits, 3, blockSizes); checkCorrectness(out_thrust, out_1, n); // FREE MEMORIES free(in); free(out_0); free(out_thrust); free(out_1); free(correctOut); return EXIT_SUCCESS; }
e108ce1b66982bdfc4bb6dd8c4963ab4705ededd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=6400 --blockDim=64 --warp-sync=32 typedef unsigned int uint; typedef unsigned short ushort; #define FLT_MAX 0x1.fffffep127f __device__ static __attribute__((always_inline)) void colorSums(const float3 *colors, float3 *sums); __device__ static __attribute__((always_inline)) float3 firstEigenVector(float matrix[6]); __device__ static __attribute__((always_inline)) float3 bestFitLine(const float3 *colors, float3 color_sum); template <class T> __device__ static __attribute__((always_inline)) void swap(T &a, T &b); __device__ static __attribute__((always_inline)) void sortColors(const float *values, int *ranks); __device__ static __attribute__((always_inline)) void loadColorBlock(const uint *image, float3 colors[16], float3 sums[16], int xrefs[16], int blockOffset); __device__ static __attribute__((always_inline)) float3 roundAndExpand(float3 v, ushort *w); __device__ static __attribute__((always_inline)) float evalPermutation4(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum); __device__ static __attribute__((always_inline)) float evalPermutation3(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum); __device__ static __attribute__((always_inline)) void evalAllPermutations(const float3 *colors, const uint *permutations, ushort &bestStart, ushort &bestEnd, uint &bestPermutation, float *errors, float3 color_sum); __device__ static __attribute__((always_inline)) int findMinError(float *errors); __device__ static __attribute__((always_inline)) void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 *result, int blockOffset); #define NUM_THREADS 64 // Number of threads per block. __device__ static __attribute__((always_inline)) void colorSums(const float3 *colors, float3 *sums) { const int idx = threadIdx.x; sums[idx] = colors[idx]; sums[idx] += sums[idx^8]; sums[idx] += sums[idx^4]; sums[idx] += sums[idx^2]; sums[idx] += sums[idx^1]; } __device__ static __attribute__((always_inline)) float3 firstEigenVector(float matrix[6]) { // 8 iterations seems to be more than enough. float3 v = make_float3(1.0f, 1.0f, 1.0f); for (int i = 0; __global_invariant(__implies(threadIdx.x >= 16, !__enabled())), i < 8; i++) { float x = v.x * matrix[0] + v.y * matrix[1] + v.z * matrix[2]; float y = v.x * matrix[1] + v.y * matrix[3] + v.z * matrix[4]; float z = v.x * matrix[2] + v.y * matrix[4] + v.z * matrix[5]; float m = max(max(x, y), z); float iv = 1.0f / m; v = make_float3(x*iv, y*iv, z*iv); } return v; } __device__ static __attribute__((always_inline)) float3 bestFitLine(const float3 *colors, float3 color_sum) { // Compute covariance matrix of the given colors. const int idx = threadIdx.x; float3 diff = colors[idx] - color_sum * (1.0f / 16.0f); // @@ Eliminate two-way bank conflicts here. // @@ It seems that doing that and unrolling the reduction doesn't help... __shared__ float covariance[16*6]; covariance[6 * idx + 0] = diff.x * diff.x; // 0, 6, 12, 2, 8, 14, 4, 10, 0 covariance[6 * idx + 1] = diff.x * diff.y; covariance[6 * idx + 2] = diff.x * diff.z; covariance[6 * idx + 3] = diff.y * diff.y; covariance[6 * idx + 4] = diff.y * diff.z; covariance[6 * idx + 5] = diff.z * diff.z; for (int d = 8; __global_invariant(__implies(idx >= 16, !__enabled())), __global_invariant(__implies(idx >= 16, !__write(covariance))), __global_invariant(__implies(idx >= 16, !__read(covariance))), __global_invariant(__implies(idx/32 == __other_int(idx)/32 & blockIdx.x == __other_int(blockIdx.x), !__write(covariance))), d > 0; d >>= 1) { if (idx < d) { covariance[6 * idx + 0] += covariance[6 * (idx+d) + 0]; covariance[6 * idx + 1] += covariance[6 * (idx+d) + 1]; covariance[6 * idx + 2] += covariance[6 * (idx+d) + 2]; covariance[6 * idx + 3] += covariance[6 * (idx+d) + 3]; covariance[6 * idx + 4] += covariance[6 * (idx+d) + 4]; covariance[6 * idx + 5] += covariance[6 * (idx+d) + 5]; } } // Compute first eigen vector. return firstEigenVector(covariance); } template <class T> __device__ static __attribute__((always_inline)) void swap(T &a, T &b) { T tmp = a; a = b; b = tmp; } //__constant__ float3 kColorMetric = { 0.2126f, 0.7152f, 0.0722f }; __constant__ float3 kColorMetric = { 1.0f, 1.0f, 1.0f }; __device__ static __attribute__((always_inline)) void sortColors(const float *values, int *ranks) { const int tid = threadIdx.x; int rank = 0; #pragma unroll for (int i = 0; __global_invariant(__implies(tid >= 16, !__enabled())), i < 16; i++) { rank += (values[i] < values[tid]); } ranks[tid] = rank; // Resolve elements with the same index. #pragma unroll for (int i = 0; __global_invariant(__implies(tid >= 16, !__enabled())), __global_invariant(__implies(tid >= 16, !__read(ranks))), __global_invariant(__implies(tid/32 == __other_int(tid)/32 & blockIdx.x == __other_int(blockIdx.x), !__write(ranks))), i < 15; i++) { if (tid > i && ranks[tid] == ranks[i]) { ++ranks[tid]; } } // IMPERIAL EDIT: post condition of the above code __assume(__implies(tid < 16 & __other_int(tid) < 16 & blockIdx.x == __other_int(blockIdx.x), ranks[tid] != ranks[__other_int(tid)])); } __device__ static __attribute__((always_inline)) void loadColorBlock(const uint *image, float3 colors[16], float3 sums[16], int xrefs[16], int blockOffset) { const int bid = blockIdx.x + blockOffset; const int idx = threadIdx.x; __shared__ float dps[16]; float3 tmp; if (idx < 16) { // Read color and copy to shared mem. uint c = image[(bid) * 16 + idx]; colors[idx].x = ((c >> 0) & 0xFF) * (1.0f / 255.0f); colors[idx].y = ((c >> 8) & 0xFF) * (1.0f / 255.0f); colors[idx].z = ((c >> 16) & 0xFF) * (1.0f / 255.0f); // Sort colors along the best fit line. colorSums(colors, sums); float3 axis = bestFitLine(colors, sums[0]); dps[idx] = dot(colors[idx], axis); sortColors(dps, xrefs); tmp = colors[idx]; colors[xrefs[idx]] = tmp; } } __device__ static __attribute__((always_inline)) float3 roundAndExpand(float3 v, ushort *w) { v.x = rintf(__saturatef(v.x) * 31.0f); v.y = rintf(__saturatef(v.y) * 63.0f); v.z = rintf(__saturatef(v.z) * 31.0f); *w = ((ushort)v.x << 11) | ((ushort)v.y << 5) | (ushort)v.z; v.x *= 0.03227752766457f; // approximate integer bit expansion. v.y *= 0.01583151765563f; v.z *= 0.03227752766457f; return v; } __constant__ float alphaTable4[4] = { 9.0f, 0.0f, 6.0f, 3.0f }; __constant__ float alphaTable3[4] = { 4.0f, 0.0f, 2.0f, 2.0f }; __constant__ const int prods4[4] = { 0x090000,0x000900,0x040102,0x010402 }; __constant__ const int prods3[4] = { 0x040000,0x000400,0x040101,0x010401 }; #define USE_TABLES 1 __device__ static __attribute__((always_inline)) float evalPermutation4(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable4[bits & 3] * colors[i]; akku += prods4[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (9.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = (1 + beta) * (1.0f / 3.0f); } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif // alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them. const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.111111111111f) * dot(e, kColorMetric); } __device__ static __attribute__((always_inline)) float evalPermutation3(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable3[bits & 3] * colors[i]; akku += prods3[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (4.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = 0.5f; } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.25f) * dot(e, kColorMetric); } __device__ static __attribute__((always_inline)) void evalAllPermutations(const float3 *colors, const uint *permutations, ushort &bestStart, ushort &bestEnd, uint &bestPermutation, float *errors, float3 color_sum) { const int idx = threadIdx.x; float bestError = FLT_MAX; __shared__ uint s_permutations[160]; for (int i = 0; i < 16; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 992) { break; } ushort start, end; uint permutation = permutations[pidx]; if (pidx < 160) { s_permutations[pidx] = permutation; } float error = evalPermutation4(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; } } if (bestStart < bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= 0x55555555; // Flip indices. } for (int i = 0; i < 3; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 160) { break; } ushort start, end; uint permutation = s_permutations[pidx]; float error = evalPermutation3(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; if (bestStart > bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices. } } } errors[idx] = bestError; } __device__ static __attribute__((always_inline)) int findMinError(float *errors) { const int idx = threadIdx.x; __shared__ int indices[NUM_THREADS]; indices[idx] = idx; for (int d = NUM_THREADS/2; d > 32; d >>= 1) { __syncthreads(); if (idx < d) { float err0 = errors[idx]; float err1 = errors[idx + d]; if (err1 < err0) { errors[idx] = err1; indices[idx] = indices[idx + d]; } } } __syncthreads(); // unroll last 6 iterations if (idx < 32) { if (errors[idx + 32] < errors[idx]) { errors[idx] = errors[idx + 32]; indices[idx] = indices[idx + 32]; } if (errors[idx + 16] < errors[idx]) { errors[idx] = errors[idx + 16]; indices[idx] = indices[idx + 16]; } if (errors[idx + 8] < errors[idx]) { errors[idx] = errors[idx + 8]; indices[idx] = indices[idx + 8]; } if (errors[idx + 4] < errors[idx]) { errors[idx] = errors[idx + 4]; indices[idx] = indices[idx + 4]; } if (errors[idx + 2] < errors[idx]) { errors[idx] = errors[idx + 2]; indices[idx] = indices[idx + 2]; } if (errors[idx + 1] < errors[idx]) { errors[idx] = errors[idx + 1]; indices[idx] = indices[idx + 1]; } } __syncthreads(); return indices[0]; } __device__ static __attribute__((always_inline)) void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 *result, int blockOffset) { const int bid = blockIdx.x + blockOffset; if (start == end) { permutation = 0; } // Reorder permutation. uint indices = 0; for (int i = 0; i < 16; i++) { int ref = xrefs[i]; indices |= ((permutation >> (2 * ref)) & 3) << (2 * i); } // Write endpoints. result[bid].x = (end << 16) | start; // Write palette indices. result[bid].y = indices; } __global__ void compress(const uint *permutations, const uint *image, uint2 *result, int blockOffset) { const int idx = threadIdx.x; __shared__ float3 colors[16]; __shared__ float3 sums[16]; __shared__ int xrefs[16]; loadColorBlock(image, colors, sums, xrefs, blockOffset); __syncthreads(); ushort bestStart, bestEnd; uint bestPermutation; __shared__ float errors[NUM_THREADS]; evalAllPermutations(colors, permutations, bestStart, bestEnd, bestPermutation, errors, sums[0]); // Use a parallel reduction to find minimum error. const int minIdx = findMinError(errors); __syncthreads(); // Only write the result of the winner thread. if (idx == minIdx) { saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result, blockOffset); } }
e108ce1b66982bdfc4bb6dd8c4963ab4705ededd.cu
//pass //--gridDim=6400 --blockDim=64 --warp-sync=32 typedef unsigned int uint; typedef unsigned short ushort; #define FLT_MAX 0x1.fffffep127f __device__ static __attribute__((always_inline)) void colorSums(const float3 *colors, float3 *sums); __device__ static __attribute__((always_inline)) float3 firstEigenVector(float matrix[6]); __device__ static __attribute__((always_inline)) float3 bestFitLine(const float3 *colors, float3 color_sum); template <class T> __device__ static __attribute__((always_inline)) void swap(T &a, T &b); __device__ static __attribute__((always_inline)) void sortColors(const float *values, int *ranks); __device__ static __attribute__((always_inline)) void loadColorBlock(const uint *image, float3 colors[16], float3 sums[16], int xrefs[16], int blockOffset); __device__ static __attribute__((always_inline)) float3 roundAndExpand(float3 v, ushort *w); __device__ static __attribute__((always_inline)) float evalPermutation4(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum); __device__ static __attribute__((always_inline)) float evalPermutation3(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum); __device__ static __attribute__((always_inline)) void evalAllPermutations(const float3 *colors, const uint *permutations, ushort &bestStart, ushort &bestEnd, uint &bestPermutation, float *errors, float3 color_sum); __device__ static __attribute__((always_inline)) int findMinError(float *errors); __device__ static __attribute__((always_inline)) void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 *result, int blockOffset); #define NUM_THREADS 64 // Number of threads per block. __device__ static __attribute__((always_inline)) void colorSums(const float3 *colors, float3 *sums) { const int idx = threadIdx.x; sums[idx] = colors[idx]; sums[idx] += sums[idx^8]; sums[idx] += sums[idx^4]; sums[idx] += sums[idx^2]; sums[idx] += sums[idx^1]; } __device__ static __attribute__((always_inline)) float3 firstEigenVector(float matrix[6]) { // 8 iterations seems to be more than enough. float3 v = make_float3(1.0f, 1.0f, 1.0f); for (int i = 0; __global_invariant(__implies(threadIdx.x >= 16, !__enabled())), i < 8; i++) { float x = v.x * matrix[0] + v.y * matrix[1] + v.z * matrix[2]; float y = v.x * matrix[1] + v.y * matrix[3] + v.z * matrix[4]; float z = v.x * matrix[2] + v.y * matrix[4] + v.z * matrix[5]; float m = max(max(x, y), z); float iv = 1.0f / m; v = make_float3(x*iv, y*iv, z*iv); } return v; } __device__ static __attribute__((always_inline)) float3 bestFitLine(const float3 *colors, float3 color_sum) { // Compute covariance matrix of the given colors. const int idx = threadIdx.x; float3 diff = colors[idx] - color_sum * (1.0f / 16.0f); // @@ Eliminate two-way bank conflicts here. // @@ It seems that doing that and unrolling the reduction doesn't help... __shared__ float covariance[16*6]; covariance[6 * idx + 0] = diff.x * diff.x; // 0, 6, 12, 2, 8, 14, 4, 10, 0 covariance[6 * idx + 1] = diff.x * diff.y; covariance[6 * idx + 2] = diff.x * diff.z; covariance[6 * idx + 3] = diff.y * diff.y; covariance[6 * idx + 4] = diff.y * diff.z; covariance[6 * idx + 5] = diff.z * diff.z; for (int d = 8; __global_invariant(__implies(idx >= 16, !__enabled())), __global_invariant(__implies(idx >= 16, !__write(covariance))), __global_invariant(__implies(idx >= 16, !__read(covariance))), __global_invariant(__implies(idx/32 == __other_int(idx)/32 & blockIdx.x == __other_int(blockIdx.x), !__write(covariance))), d > 0; d >>= 1) { if (idx < d) { covariance[6 * idx + 0] += covariance[6 * (idx+d) + 0]; covariance[6 * idx + 1] += covariance[6 * (idx+d) + 1]; covariance[6 * idx + 2] += covariance[6 * (idx+d) + 2]; covariance[6 * idx + 3] += covariance[6 * (idx+d) + 3]; covariance[6 * idx + 4] += covariance[6 * (idx+d) + 4]; covariance[6 * idx + 5] += covariance[6 * (idx+d) + 5]; } } // Compute first eigen vector. return firstEigenVector(covariance); } template <class T> __device__ static __attribute__((always_inline)) void swap(T &a, T &b) { T tmp = a; a = b; b = tmp; } //__constant__ float3 kColorMetric = { 0.2126f, 0.7152f, 0.0722f }; __constant__ float3 kColorMetric = { 1.0f, 1.0f, 1.0f }; __device__ static __attribute__((always_inline)) void sortColors(const float *values, int *ranks) { const int tid = threadIdx.x; int rank = 0; #pragma unroll for (int i = 0; __global_invariant(__implies(tid >= 16, !__enabled())), i < 16; i++) { rank += (values[i] < values[tid]); } ranks[tid] = rank; // Resolve elements with the same index. #pragma unroll for (int i = 0; __global_invariant(__implies(tid >= 16, !__enabled())), __global_invariant(__implies(tid >= 16, !__read(ranks))), __global_invariant(__implies(tid/32 == __other_int(tid)/32 & blockIdx.x == __other_int(blockIdx.x), !__write(ranks))), i < 15; i++) { if (tid > i && ranks[tid] == ranks[i]) { ++ranks[tid]; } } // IMPERIAL EDIT: post condition of the above code __assume(__implies(tid < 16 & __other_int(tid) < 16 & blockIdx.x == __other_int(blockIdx.x), ranks[tid] != ranks[__other_int(tid)])); } __device__ static __attribute__((always_inline)) void loadColorBlock(const uint *image, float3 colors[16], float3 sums[16], int xrefs[16], int blockOffset) { const int bid = blockIdx.x + blockOffset; const int idx = threadIdx.x; __shared__ float dps[16]; float3 tmp; if (idx < 16) { // Read color and copy to shared mem. uint c = image[(bid) * 16 + idx]; colors[idx].x = ((c >> 0) & 0xFF) * (1.0f / 255.0f); colors[idx].y = ((c >> 8) & 0xFF) * (1.0f / 255.0f); colors[idx].z = ((c >> 16) & 0xFF) * (1.0f / 255.0f); // Sort colors along the best fit line. colorSums(colors, sums); float3 axis = bestFitLine(colors, sums[0]); dps[idx] = dot(colors[idx], axis); sortColors(dps, xrefs); tmp = colors[idx]; colors[xrefs[idx]] = tmp; } } __device__ static __attribute__((always_inline)) float3 roundAndExpand(float3 v, ushort *w) { v.x = rintf(__saturatef(v.x) * 31.0f); v.y = rintf(__saturatef(v.y) * 63.0f); v.z = rintf(__saturatef(v.z) * 31.0f); *w = ((ushort)v.x << 11) | ((ushort)v.y << 5) | (ushort)v.z; v.x *= 0.03227752766457f; // approximate integer bit expansion. v.y *= 0.01583151765563f; v.z *= 0.03227752766457f; return v; } __constant__ float alphaTable4[4] = { 9.0f, 0.0f, 6.0f, 3.0f }; __constant__ float alphaTable3[4] = { 4.0f, 0.0f, 2.0f, 2.0f }; __constant__ const int prods4[4] = { 0x090000,0x000900,0x040102,0x010402 }; __constant__ const int prods3[4] = { 0x040000,0x000400,0x040101,0x010401 }; #define USE_TABLES 1 __device__ static __attribute__((always_inline)) float evalPermutation4(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable4[bits & 3] * colors[i]; akku += prods4[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (9.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = (1 + beta) * (1.0f / 3.0f); } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif // alpha2, beta2, alphabeta and factor could be precomputed for each permutation, but it's faster to recompute them. const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.111111111111f) * dot(e, kColorMetric); } __device__ static __attribute__((always_inline)) float evalPermutation3(const float3 *colors, uint permutation, ushort *start, ushort *end, float3 color_sum) { // Compute endpoints using least squares. #if USE_TABLES float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); int akku = 0; // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); alphax_sum += alphaTable3[bits & 3] * colors[i]; akku += prods3[bits & 3]; } float alpha2_sum = float(akku >> 16); float beta2_sum = float((akku >> 8) & 0xff); float alphabeta_sum = float((akku >> 0) & 0xff); float3 betax_sum = (4.0f * color_sum) - alphax_sum; #else float alpha2_sum = 0.0f; float beta2_sum = 0.0f; float alphabeta_sum = 0.0f; float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f); // Compute alpha & beta for this permutation. for (int i = 0; i < 16; i++) { const uint bits = permutation >> (2*i); float beta = (bits & 1); if (bits & 2) { beta = 0.5f; } float alpha = 1.0f - beta; alpha2_sum += alpha * alpha; beta2_sum += beta * beta; alphabeta_sum += alpha * beta; alphax_sum += alpha * colors[i]; } float3 betax_sum = color_sum - alphax_sum; #endif const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum); float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor; float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor; // Round a, b to the closest 5-6-5 color and expand... a = roundAndExpand(a, start); b = roundAndExpand(b, end); // compute the error float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum); return (0.25f) * dot(e, kColorMetric); } __device__ static __attribute__((always_inline)) void evalAllPermutations(const float3 *colors, const uint *permutations, ushort &bestStart, ushort &bestEnd, uint &bestPermutation, float *errors, float3 color_sum) { const int idx = threadIdx.x; float bestError = FLT_MAX; __shared__ uint s_permutations[160]; for (int i = 0; i < 16; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 992) { break; } ushort start, end; uint permutation = permutations[pidx]; if (pidx < 160) { s_permutations[pidx] = permutation; } float error = evalPermutation4(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; } } if (bestStart < bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= 0x55555555; // Flip indices. } for (int i = 0; i < 3; i++) { int pidx = idx + NUM_THREADS * i; if (pidx >= 160) { break; } ushort start, end; uint permutation = s_permutations[pidx]; float error = evalPermutation3(colors, permutation, &start, &end, color_sum); if (error < bestError) { bestError = error; bestPermutation = permutation; bestStart = start; bestEnd = end; if (bestStart > bestEnd) { swap(bestEnd, bestStart); bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices. } } } errors[idx] = bestError; } __device__ static __attribute__((always_inline)) int findMinError(float *errors) { const int idx = threadIdx.x; __shared__ int indices[NUM_THREADS]; indices[idx] = idx; for (int d = NUM_THREADS/2; d > 32; d >>= 1) { __syncthreads(); if (idx < d) { float err0 = errors[idx]; float err1 = errors[idx + d]; if (err1 < err0) { errors[idx] = err1; indices[idx] = indices[idx + d]; } } } __syncthreads(); // unroll last 6 iterations if (idx < 32) { if (errors[idx + 32] < errors[idx]) { errors[idx] = errors[idx + 32]; indices[idx] = indices[idx + 32]; } if (errors[idx + 16] < errors[idx]) { errors[idx] = errors[idx + 16]; indices[idx] = indices[idx + 16]; } if (errors[idx + 8] < errors[idx]) { errors[idx] = errors[idx + 8]; indices[idx] = indices[idx + 8]; } if (errors[idx + 4] < errors[idx]) { errors[idx] = errors[idx + 4]; indices[idx] = indices[idx + 4]; } if (errors[idx + 2] < errors[idx]) { errors[idx] = errors[idx + 2]; indices[idx] = indices[idx + 2]; } if (errors[idx + 1] < errors[idx]) { errors[idx] = errors[idx + 1]; indices[idx] = indices[idx + 1]; } } __syncthreads(); return indices[0]; } __device__ static __attribute__((always_inline)) void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 *result, int blockOffset) { const int bid = blockIdx.x + blockOffset; if (start == end) { permutation = 0; } // Reorder permutation. uint indices = 0; for (int i = 0; i < 16; i++) { int ref = xrefs[i]; indices |= ((permutation >> (2 * ref)) & 3) << (2 * i); } // Write endpoints. result[bid].x = (end << 16) | start; // Write palette indices. result[bid].y = indices; } __global__ void compress(const uint *permutations, const uint *image, uint2 *result, int blockOffset) { const int idx = threadIdx.x; __shared__ float3 colors[16]; __shared__ float3 sums[16]; __shared__ int xrefs[16]; loadColorBlock(image, colors, sums, xrefs, blockOffset); __syncthreads(); ushort bestStart, bestEnd; uint bestPermutation; __shared__ float errors[NUM_THREADS]; evalAllPermutations(colors, permutations, bestStart, bestEnd, bestPermutation, errors, sums[0]); // Use a parallel reduction to find minimum error. const int minIdx = findMinError(errors); __syncthreads(); // Only write the result of the winner thread. if (idx == minIdx) { saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result, blockOffset); } }
630f9151173c69140f5ba36436f1d006ae333d26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _pet_line_backproject_compressed_gpu_kernels.cu * * NiftyRec * Stefano Pedemonte, Oct. 2012. * CMIC - Centre for Medical Image Computing * UCL - University College London. * Harvard University, Martinos Center for Biomedical Imaging * Jan. 2014. */ #include "_pet_line_backproject_compressed_gpu.h" __device__ __constant__ int c_N_u; __device__ __constant__ int c_N_v; __device__ __constant__ int c_N_locations; __device__ __constant__ int c_N_samples; __device__ __constant__ int c_block_size; __global__ void pet_line_backproject_compressed_gpu_kernel(float *g_backprojection, float *g_attenuation, float *g_projection, unsigned short *g_locations, unsigned int direction) { const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int N_locations = c_N_locations; const unsigned int N_samples = c_N_samples; const unsigned int N_u = c_N_u; const unsigned int N_v = c_N_v; const unsigned int pixelNumber = N_u * N_v; // __shared__ float s_projection[512]; if(tid<N_locations){ unsigned short u = g_locations[tid*3]; unsigned short v = g_locations[tid*3+1]; unsigned int index; //load projection to shared mem // s_projection[threadIdx.x] = g_projection[tid]; if (direction==1) index = u + v*N_u; else if (direction==2) index = u*N_v + v; else if (direction==3) index = u*N_samples + v*N_u*N_samples; else if (direction==4) index = u*N_samples + v*N_samples*N_u; else if (direction==5) index = u*N_v*N_samples + v; else if (direction==6) index = u + v*N_u*N_samples; else index = u*N_v + v; for(unsigned int z=0; z<N_samples; z++) { // g_backprojection[index] = s_projection[threadIdx.x]; g_backprojection[index] = g_projection[tid]; if (direction==1) index += pixelNumber; else if (direction==2) index += pixelNumber; else if (direction==3) index += 1; else if (direction==4) index += 1; else if (direction==5) index += N_v; else if (direction==6) index += N_u; else index += pixelNumber; } } return; }
630f9151173c69140f5ba36436f1d006ae333d26.cu
/* * _pet_line_backproject_compressed_gpu_kernels.cu * * NiftyRec * Stefano Pedemonte, Oct. 2012. * CMIC - Centre for Medical Image Computing * UCL - University College London. * Harvard University, Martinos Center for Biomedical Imaging * Jan. 2014. */ #include "_pet_line_backproject_compressed_gpu.h" __device__ __constant__ int c_N_u; __device__ __constant__ int c_N_v; __device__ __constant__ int c_N_locations; __device__ __constant__ int c_N_samples; __device__ __constant__ int c_block_size; __global__ void pet_line_backproject_compressed_gpu_kernel(float *g_backprojection, float *g_attenuation, float *g_projection, unsigned short *g_locations, unsigned int direction) { const unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int N_locations = c_N_locations; const unsigned int N_samples = c_N_samples; const unsigned int N_u = c_N_u; const unsigned int N_v = c_N_v; const unsigned int pixelNumber = N_u * N_v; // __shared__ float s_projection[512]; if(tid<N_locations){ unsigned short u = g_locations[tid*3]; unsigned short v = g_locations[tid*3+1]; unsigned int index; //load projection to shared mem // s_projection[threadIdx.x] = g_projection[tid]; if (direction==1) index = u + v*N_u; else if (direction==2) index = u*N_v + v; else if (direction==3) index = u*N_samples + v*N_u*N_samples; else if (direction==4) index = u*N_samples + v*N_samples*N_u; else if (direction==5) index = u*N_v*N_samples + v; else if (direction==6) index = u + v*N_u*N_samples; else index = u*N_v + v; for(unsigned int z=0; z<N_samples; z++) { // g_backprojection[index] = s_projection[threadIdx.x]; g_backprojection[index] = g_projection[tid]; if (direction==1) index += pixelNumber; else if (direction==2) index += pixelNumber; else if (direction==3) index += 1; else if (direction==4) index += 1; else if (direction==5) index += N_v; else if (direction==6) index += N_u; else index += pixelNumber; } } return; }
4d2b4c9a53233f314966059db687231fcb38abf4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "hip/hip_runtime_api.h" #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/random.h> #include <cugraph/algorithms.hpp> #include <cugraph/graph.hpp> #include <sampling/random_walks.cuh> #include <raft/handle.hpp> #include <raft/random/rng.cuh> #include "random_walks_utils.cuh" #include <algorithm> #include <iostream> #include <iterator> #include <limits> #include <numeric> #include <utilities/high_res_timer.hpp> #include <vector> using namespace cugraph::experimental; template <typename value_t> using vector_test_t = detail::device_vec_t<value_t>; // for debug purposes namespace { // anonym. template <typename vertex_t, typename edge_t, typename weight_t> graph_t<vertex_t, edge_t, weight_t, false, false> make_graph(raft::handle_t const& handle, std::vector<vertex_t> const& v_src, std::vector<vertex_t> const& v_dst, std::vector<weight_t> const& v_w, vertex_t num_vertices, edge_t num_edges, bool is_weighted) { vector_test_t<vertex_t> d_src(num_edges, handle.get_stream()); vector_test_t<vertex_t> d_dst(num_edges, handle.get_stream()); vector_test_t<weight_t> d_weights(num_edges, handle.get_stream()); raft::update_device(d_src.data(), v_src.data(), d_src.size(), handle.get_stream()); raft::update_device(d_dst.data(), v_dst.data(), d_dst.size(), handle.get_stream()); weight_t* ptr_d_weights{nullptr}; if (is_weighted) { raft::update_device(d_weights.data(), v_w.data(), d_weights.size(), handle.get_stream()); ptr_d_weights = d_weights.data(); } edgelist_t<vertex_t, edge_t, weight_t> edgelist{ d_src.data(), d_dst.data(), ptr_d_weights, num_edges}; graph_t<vertex_t, edge_t, weight_t, false, false> graph( handle, edgelist, num_vertices, graph_properties_t{false, false, is_weighted}, false); return graph; } template <typename vertex_t, typename edge_t, typename index_t> bool check_col_indices(raft::handle_t const& handle, vector_test_t<edge_t> const& d_crt_out_degs, vector_test_t<vertex_t> const& d_col_indx, index_t num_paths) { bool all_indices_within_degs = thrust::all_of( rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), thrust::make_counting_iterator<index_t>(0), thrust::make_counting_iterator<index_t>(num_paths), [p_d_col_indx = detail::raw_const_ptr(d_col_indx), p_d_crt_out_degs = detail::raw_const_ptr(d_crt_out_degs)] __device__(auto indx) { if (p_d_crt_out_degs[indx] > 0) return ((p_d_col_indx[indx] >= 0) && (p_d_col_indx[indx] < p_d_crt_out_degs[indx])); else return true; }); return all_indices_within_degs; } } // namespace // FIXME (per rlratzel request): // This test may be considered an e2e test // which could be moved to a different test suite: // struct RandomWalksPrimsTest : public ::testing::Test { }; TEST_F(RandomWalksPrimsTest, SimpleGraphRWStart) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vs(num_edges); raft::update_host(v_ro.data(), offsets, num_vertices + 1, handle.get_stream()); raft::update_host(v_ci.data(), indices, num_edges, handle.get_stream()); raft::update_host(v_vs.data(), values, num_edges, handle.get_stream()); std::vector<edge_t> v_ro_expected{0, 1, 3, 6, 7, 8, 8}; std::vector<vertex_t> v_ci_expected{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_vs_expected{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; EXPECT_EQ(v_ro, v_ro_expected); EXPECT_EQ(v_ci, v_ci_expected); EXPECT_EQ(v_vs, v_vs_expected); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; rand_walker.start(d_start, d_coalesced_v, d_sizes); std::vector<vertex_t> v_coalesced_exp{1, -1, -1, 0, -1, -1, 4, -1, -1, 2, -1, -1}; raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), total_sz, handle.get_stream()); EXPECT_EQ(v_coalesced, v_coalesced_exp); std::vector<index_t> v_sizes{1, 1, 1, 1}; std::vector<index_t> v_sz_exp(num_paths); raft::update_host(v_sz_exp.data(), raw_const_ptr(d_sizes), num_paths, handle.get_stream()); EXPECT_EQ(v_sizes, v_sz_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphCoalesceExperiments) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); EXPECT_EQ(static_cast<size_t>(num_vertices), d_out_degs.size()); std::vector<edge_t> v_out_degs(num_vertices); raft::update_host( v_out_degs.data(), raw_const_ptr(d_out_degs), num_vertices, handle.get_stream()); std::vector<edge_t> v_out_degs_exp{1, 2, 3, 1, 1, 0}; EXPECT_EQ(v_out_degs, v_out_degs_exp); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); std::vector<edge_t> v_crt_out_degs(num_paths); raft::update_host( v_crt_out_degs.data(), raw_const_ptr(d_crt_out_degs), num_paths, handle.get_stream()); std::vector<edge_t> v_crt_out_degs_exp{2, 1, 1, 3}; EXPECT_EQ(v_crt_out_degs, v_crt_out_degs_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphColExtraction) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); col_indx_extract_t<decltype(graph_view), index_t> col_extractor{handle, graph_view, raw_const_ptr(d_crt_out_degs), raw_const_ptr(d_sizes), num_paths, max_depth}; // typically given by random engine: // std::vector<vertex_t> v_col_indx{1, 0, 0, 2}; vector_test_t<vertex_t> d_col_indx(num_paths, handle.get_stream()); raft::update_device(d_col_indx.data(), v_col_indx.data(), d_col_indx.size(), handle.get_stream()); vector_test_t<vertex_t> d_next_v(num_paths, handle.get_stream()); vector_test_t<weight_t> d_next_w(num_paths, handle.get_stream()); col_extractor(d_coalesced_v, d_col_indx, d_next_v, d_next_w); std::vector<vertex_t> v_next_v(num_paths); std::vector<weight_t> v_next_w(num_paths); raft::update_host(v_next_v.data(), raw_const_ptr(d_next_v), num_paths, handle.get_stream()); raft::update_host(v_next_w.data(), raw_const_ptr(d_next_w), num_paths, handle.get_stream()); std::vector<vertex_t> v_next_v_exp{4, 1, 5, 3}; std::vector<weight_t> v_next_w_exp{2.1f, 0.1f, 7.1f, 5.1f}; EXPECT_EQ(v_next_v, v_next_v_exp); EXPECT_EQ(v_next_w, v_next_w_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphRndGenColIndx) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; using real_t = float; using seed_t = long; using random_engine_t = rrandom_gen_t<vertex_t, edge_t>; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); // random engine generated: // vector_test_t<vertex_t> d_col_indx(num_paths, handle.get_stream()); vector_test_t<real_t> d_random(num_paths, handle.get_stream()); seed_t seed = static_cast<seed_t>(std::time(nullptr)); random_engine_t rgen(handle, num_paths, d_random, d_crt_out_degs, seed); rgen.generate_col_indices(d_col_indx); bool all_indices_within_degs = check_col_indices(handle, d_crt_out_degs, d_col_indx, num_paths); ASSERT_TRUE(all_indices_within_degs); } TEST_F(RandomWalksPrimsTest, SimpleGraphUpdatePathSizes) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; using real_t = float; using seed_t = long; using random_engine_t = rrandom_gen_t<vertex_t, edge_t>; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // Fixed set of out-degs, as opposed to have them generated by the algorithm. // That's because I want to test a certain functionality in isolation // std::vector<edge_t> v_crt_out_degs{2, 0, 1, 0}; vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); raft::update_device( d_crt_out_degs.data(), v_crt_out_degs.data(), d_crt_out_degs.size(), handle.get_stream()); rand_walker.update_path_sizes(d_crt_out_degs, d_sizes); std::vector<index_t> v_sizes(num_paths); raft::update_host(v_sizes.data(), raw_const_ptr(d_sizes), num_paths, handle.get_stream()); std::vector<index_t> v_sizes_exp{2, 1, 2, 1}; // i.e., corresponding 0-entries in crt-out-degs, don't get updated; EXPECT_EQ(v_sizes, v_sizes_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphScatterUpdate) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); col_indx_extract_t<decltype(graph_view), index_t> col_extractor{handle, graph_view, raw_const_ptr(d_crt_out_degs), raw_const_ptr(d_sizes), num_paths, max_depth}; // typically given by random engine: // std::vector<vertex_t> v_col_indx{1, 0, 0, 2}; vector_test_t<vertex_t> d_col_indx(num_paths, handle.get_stream()); raft::update_device(d_col_indx.data(), v_col_indx.data(), d_col_indx.size(), handle.get_stream()); vector_test_t<vertex_t> d_next_v(num_paths, handle.get_stream()); vector_test_t<weight_t> d_next_w(num_paths, handle.get_stream()); col_extractor(d_coalesced_v, d_col_indx, d_next_v, d_next_w); rand_walker.update_path_sizes(d_crt_out_degs, d_sizes); // check start(): // { std::vector<vertex_t> v_coalesced_exp{1, -1, -1, 0, -1, -1, 4, -1, -1, 2, -1, -1}; raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), total_sz, handle.get_stream()); EXPECT_EQ(v_coalesced, v_coalesced_exp); } // check crt_out_degs: // { std::vector<edge_t> v_crt_out_degs(num_paths); raft::update_host( v_crt_out_degs.data(), raw_const_ptr(d_crt_out_degs), num_paths, handle.get_stream()); std::vector<edge_t> v_crt_out_degs_exp{2, 1, 1, 3}; EXPECT_EQ(v_crt_out_degs, v_crt_out_degs_exp); } // check paths sizes update: // { std::vector<index_t> v_sizes(num_paths); raft::update_host(v_sizes.data(), raw_const_ptr(d_sizes), num_paths, handle.get_stream()); std::vector<index_t> v_sizes_exp{2, 2, 2, 2}; // i.e., corresponding 0-entries in crt-out-degs, don't get updated; EXPECT_EQ(v_sizes, v_sizes_exp); } // check next step: // { std::vector<vertex_t> v_next_v(num_paths); std::vector<weight_t> v_next_w(num_paths); raft::update_host(v_next_v.data(), raw_const_ptr(d_next_v), num_paths, handle.get_stream()); raft::update_host(v_next_w.data(), raw_const_ptr(d_next_w), num_paths, handle.get_stream()); std::vector<vertex_t> v_next_v_exp{4, 1, 5, 3}; std::vector<weight_t> v_next_w_exp{2.1f, 0.1f, 7.1f, 5.1f}; EXPECT_EQ(v_next_v, v_next_v_exp); EXPECT_EQ(v_next_w, v_next_w_exp); } rand_walker.scatter_vertices(d_next_v, d_coalesced_v, d_crt_out_degs, d_sizes); rand_walker.scatter_weights(d_next_w, d_coalesced_w, d_crt_out_degs, d_sizes); // check vertex/weight scatter: // { raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), total_sz, handle.get_stream()); raft::update_host( w_coalesced.data(), raw_const_ptr(d_coalesced_w), total_sz - num_paths, handle.get_stream()); std::vector<vertex_t> v_coalesced_exp{1, 4, -1, 0, 1, -1, 4, 5, -1, 2, 3, -1}; std::vector<weight_t> w_coalesced_exp{2.1, -1, 0.1, -1, 7.1, -1, 5.1, -1}; EXPECT_EQ(v_coalesced, v_coalesced_exp); EXPECT_EQ(w_coalesced, w_coalesced_exp); } } TEST_F(RandomWalksPrimsTest, SimpleGraphCoalesceDefragment) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<index_t> v_sizes{1, 2, 2, 1}; vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); raft::update_device(d_sizes.data(), v_sizes.data(), d_sizes.size(), handle.get_stream()); std::vector<vertex_t> v_coalesced(total_sz, -1); v_coalesced[0] = 3; v_coalesced[max_depth] = 5; v_coalesced[max_depth + 1] = 2; v_coalesced[2 * max_depth] = 4; v_coalesced[2 * max_depth + 1] = 0; v_coalesced[3 * max_depth] = 1; std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); w_coalesced[max_depth - 1] = 10.1; w_coalesced[2 * max_depth - 2] = 11.2; vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; rand_walker.stop(d_coalesced_v, d_coalesced_w, d_sizes); // check vertex/weight defragment: // { v_coalesced.resize(d_coalesced_v.size()); w_coalesced.resize(d_coalesced_w.size()); raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), d_coalesced_v.size(), handle.get_stream()); raft::update_host( w_coalesced.data(), raw_const_ptr(d_coalesced_w), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_coalesced_exp{3, 5, 2, 4, 0, 1}; std::vector<weight_t> w_coalesced_exp{10.1, 11.2}; EXPECT_EQ(v_coalesced, v_coalesced_exp); EXPECT_EQ(w_coalesced, w_coalesced_exp); } } TEST_F(RandomWalksPrimsTest, SimpleGraphRandomWalk) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); bool test_all_paths = cugraph::test::host_check_rw_paths(handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksQuery, GraphRWQueryOffsets) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_v_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); auto triplet = query_rw_sizes_offsets(handle, num_paths, detail::raw_const_ptr(d_v_sizes)); auto& d_v_offsets = std::get<0>(triplet); auto& d_w_sizes = std::get<1>(triplet); auto& d_w_offsets = std::get<2>(triplet); bool test_paths_sz = cugraph::test::host_check_query_rw(handle, d_v_sizes, d_v_offsets, d_w_sizes, d_w_offsets); if (!test_paths_sz) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_paths_sz); } TEST(RandomWalksSpecialCase, SingleRandomWalk) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); bool test_all_paths = cugraph::test::host_check_rw_paths(handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksSpecialCase, UnweightedGraph) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, false); // un-weighted auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); ASSERT_TRUE(values == nullptr); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); std::vector<vertex_t> v_start{2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); bool test_all_paths = cugraph::test::host_check_rw_paths(handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksPadded, SimpleGraph) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; bool use_padding{true}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth, use_padding); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); ASSERT_TRUE(d_sizes.size() == 0); bool test_all_paths = cugraph::test::host_check_rw_paths( handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes, num_paths); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksUtility, PathsToCOO) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; std::vector<index_t> v_sizes{2, 1, 3, 5, 1}; std::vector<vertex_t> v_coalesced{5, 3, 4, 9, 0, 1, 6, 2, 7, 3, 2, 5}; std::vector<weight_t> w_coalesced{0.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto num_paths = v_sizes.size(); auto total_sz = v_coalesced.size(); auto num_edges = w_coalesced.size(); ASSERT_TRUE(num_edges == total_sz - num_paths); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device(d_sizes.data(), v_sizes.data(), d_sizes.size(), handle.get_stream()); index_t coalesced_v_sz = d_coalesced_v.size(); auto tpl_coo_offsets = convert_paths_to_coo<vertex_t>(handle, coalesced_v_sz, static_cast<index_t>(num_paths), d_coalesced_v.release(), d_sizes.release()); auto&& d_src = std::move(std::get<0>(tpl_coo_offsets)); auto&& d_dst = std::move(std::get<1>(tpl_coo_offsets)); auto&& d_offsets = std::move(std::get<2>(tpl_coo_offsets)); ASSERT_TRUE(d_src.size() == num_edges); ASSERT_TRUE(d_dst.size() == num_edges); std::vector<vertex_t> v_src(num_edges, 0); std::vector<vertex_t> v_dst(num_edges, 0); std::vector<index_t> v_offsets(d_offsets.size(), 0); raft::update_host(v_src.data(), raw_const_ptr(d_src), d_src.size(), handle.get_stream()); raft::update_host(v_dst.data(), raw_const_ptr(d_dst), d_dst.size(), handle.get_stream()); raft::update_host( v_offsets.data(), raw_const_ptr(d_offsets), d_offsets.size(), handle.get_stream()); std::vector<vertex_t> v_src_exp{5, 9, 0, 6, 2, 7, 3}; std::vector<vertex_t> v_dst_exp{3, 0, 1, 2, 7, 3, 2}; std::vector<index_t> v_offsets_exp{0, 1, 3}; EXPECT_EQ(v_src, v_src_exp); EXPECT_EQ(v_dst, v_dst_exp); EXPECT_EQ(v_offsets, v_offsets_exp); }
4d2b4c9a53233f314966059db687231fcb38abf4.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "cuda_profiler_api.h" #include <utilities/base_fixture.hpp> #include <utilities/test_utilities.hpp> #include <rmm/thrust_rmm_allocator.h> #include <thrust/random.h> #include <cugraph/algorithms.hpp> #include <cugraph/graph.hpp> #include <sampling/random_walks.cuh> #include <raft/handle.hpp> #include <raft/random/rng.cuh> #include "random_walks_utils.cuh" #include <algorithm> #include <iostream> #include <iterator> #include <limits> #include <numeric> #include <utilities/high_res_timer.hpp> #include <vector> using namespace cugraph::experimental; template <typename value_t> using vector_test_t = detail::device_vec_t<value_t>; // for debug purposes namespace { // anonym. template <typename vertex_t, typename edge_t, typename weight_t> graph_t<vertex_t, edge_t, weight_t, false, false> make_graph(raft::handle_t const& handle, std::vector<vertex_t> const& v_src, std::vector<vertex_t> const& v_dst, std::vector<weight_t> const& v_w, vertex_t num_vertices, edge_t num_edges, bool is_weighted) { vector_test_t<vertex_t> d_src(num_edges, handle.get_stream()); vector_test_t<vertex_t> d_dst(num_edges, handle.get_stream()); vector_test_t<weight_t> d_weights(num_edges, handle.get_stream()); raft::update_device(d_src.data(), v_src.data(), d_src.size(), handle.get_stream()); raft::update_device(d_dst.data(), v_dst.data(), d_dst.size(), handle.get_stream()); weight_t* ptr_d_weights{nullptr}; if (is_weighted) { raft::update_device(d_weights.data(), v_w.data(), d_weights.size(), handle.get_stream()); ptr_d_weights = d_weights.data(); } edgelist_t<vertex_t, edge_t, weight_t> edgelist{ d_src.data(), d_dst.data(), ptr_d_weights, num_edges}; graph_t<vertex_t, edge_t, weight_t, false, false> graph( handle, edgelist, num_vertices, graph_properties_t{false, false, is_weighted}, false); return graph; } template <typename vertex_t, typename edge_t, typename index_t> bool check_col_indices(raft::handle_t const& handle, vector_test_t<edge_t> const& d_crt_out_degs, vector_test_t<vertex_t> const& d_col_indx, index_t num_paths) { bool all_indices_within_degs = thrust::all_of( rmm::exec_policy(handle.get_stream())->on(handle.get_stream()), thrust::make_counting_iterator<index_t>(0), thrust::make_counting_iterator<index_t>(num_paths), [p_d_col_indx = detail::raw_const_ptr(d_col_indx), p_d_crt_out_degs = detail::raw_const_ptr(d_crt_out_degs)] __device__(auto indx) { if (p_d_crt_out_degs[indx] > 0) return ((p_d_col_indx[indx] >= 0) && (p_d_col_indx[indx] < p_d_crt_out_degs[indx])); else return true; }); return all_indices_within_degs; } } // namespace // FIXME (per rlratzel request): // This test may be considered an e2e test // which could be moved to a different test suite: // struct RandomWalksPrimsTest : public ::testing::Test { }; TEST_F(RandomWalksPrimsTest, SimpleGraphRWStart) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vs(num_edges); raft::update_host(v_ro.data(), offsets, num_vertices + 1, handle.get_stream()); raft::update_host(v_ci.data(), indices, num_edges, handle.get_stream()); raft::update_host(v_vs.data(), values, num_edges, handle.get_stream()); std::vector<edge_t> v_ro_expected{0, 1, 3, 6, 7, 8, 8}; std::vector<vertex_t> v_ci_expected{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_vs_expected{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; EXPECT_EQ(v_ro, v_ro_expected); EXPECT_EQ(v_ci, v_ci_expected); EXPECT_EQ(v_vs, v_vs_expected); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; rand_walker.start(d_start, d_coalesced_v, d_sizes); std::vector<vertex_t> v_coalesced_exp{1, -1, -1, 0, -1, -1, 4, -1, -1, 2, -1, -1}; raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), total_sz, handle.get_stream()); EXPECT_EQ(v_coalesced, v_coalesced_exp); std::vector<index_t> v_sizes{1, 1, 1, 1}; std::vector<index_t> v_sz_exp(num_paths); raft::update_host(v_sz_exp.data(), raw_const_ptr(d_sizes), num_paths, handle.get_stream()); EXPECT_EQ(v_sizes, v_sz_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphCoalesceExperiments) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); EXPECT_EQ(static_cast<size_t>(num_vertices), d_out_degs.size()); std::vector<edge_t> v_out_degs(num_vertices); raft::update_host( v_out_degs.data(), raw_const_ptr(d_out_degs), num_vertices, handle.get_stream()); std::vector<edge_t> v_out_degs_exp{1, 2, 3, 1, 1, 0}; EXPECT_EQ(v_out_degs, v_out_degs_exp); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); std::vector<edge_t> v_crt_out_degs(num_paths); raft::update_host( v_crt_out_degs.data(), raw_const_ptr(d_crt_out_degs), num_paths, handle.get_stream()); std::vector<edge_t> v_crt_out_degs_exp{2, 1, 1, 3}; EXPECT_EQ(v_crt_out_degs, v_crt_out_degs_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphColExtraction) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); col_indx_extract_t<decltype(graph_view), index_t> col_extractor{handle, graph_view, raw_const_ptr(d_crt_out_degs), raw_const_ptr(d_sizes), num_paths, max_depth}; // typically given by random engine: // std::vector<vertex_t> v_col_indx{1, 0, 0, 2}; vector_test_t<vertex_t> d_col_indx(num_paths, handle.get_stream()); raft::update_device(d_col_indx.data(), v_col_indx.data(), d_col_indx.size(), handle.get_stream()); vector_test_t<vertex_t> d_next_v(num_paths, handle.get_stream()); vector_test_t<weight_t> d_next_w(num_paths, handle.get_stream()); col_extractor(d_coalesced_v, d_col_indx, d_next_v, d_next_w); std::vector<vertex_t> v_next_v(num_paths); std::vector<weight_t> v_next_w(num_paths); raft::update_host(v_next_v.data(), raw_const_ptr(d_next_v), num_paths, handle.get_stream()); raft::update_host(v_next_w.data(), raw_const_ptr(d_next_w), num_paths, handle.get_stream()); std::vector<vertex_t> v_next_v_exp{4, 1, 5, 3}; std::vector<weight_t> v_next_w_exp{2.1f, 0.1f, 7.1f, 5.1f}; EXPECT_EQ(v_next_v, v_next_v_exp); EXPECT_EQ(v_next_w, v_next_w_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphRndGenColIndx) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; using real_t = float; using seed_t = long; using random_engine_t = rrandom_gen_t<vertex_t, edge_t>; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); // random engine generated: // vector_test_t<vertex_t> d_col_indx(num_paths, handle.get_stream()); vector_test_t<real_t> d_random(num_paths, handle.get_stream()); seed_t seed = static_cast<seed_t>(std::time(nullptr)); random_engine_t rgen(handle, num_paths, d_random, d_crt_out_degs, seed); rgen.generate_col_indices(d_col_indx); bool all_indices_within_degs = check_col_indices(handle, d_crt_out_degs, d_col_indx, num_paths); ASSERT_TRUE(all_indices_within_degs); } TEST_F(RandomWalksPrimsTest, SimpleGraphUpdatePathSizes) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; using real_t = float; using seed_t = long; using random_engine_t = rrandom_gen_t<vertex_t, edge_t>; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // Fixed set of out-degs, as opposed to have them generated by the algorithm. // That's because I want to test a certain functionality in isolation // std::vector<edge_t> v_crt_out_degs{2, 0, 1, 0}; vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); raft::update_device( d_crt_out_degs.data(), v_crt_out_degs.data(), d_crt_out_degs.size(), handle.get_stream()); rand_walker.update_path_sizes(d_crt_out_degs, d_sizes); std::vector<index_t> v_sizes(num_paths); raft::update_host(v_sizes.data(), raw_const_ptr(d_sizes), num_paths, handle.get_stream()); std::vector<index_t> v_sizes_exp{2, 1, 2, 1}; // i.e., corresponding 0-entries in crt-out-degs, don't get updated; EXPECT_EQ(v_sizes, v_sizes_exp); } TEST_F(RandomWalksPrimsTest, SimpleGraphScatterUpdate) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<vertex_t> v_coalesced(total_sz, -1); std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_start(num_paths, handle.get_stream()); raft::update_device(d_start.data(), v_start.data(), d_start.size(), handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; auto const& d_out_degs = rand_walker.get_out_degs(); rand_walker.start(d_start, d_coalesced_v, d_sizes); // update crt_out_degs: // vector_test_t<edge_t> d_crt_out_degs(num_paths, handle.get_stream()); rand_walker.gather_from_coalesced( d_coalesced_v, d_out_degs, d_sizes, d_crt_out_degs, max_depth, num_paths); col_indx_extract_t<decltype(graph_view), index_t> col_extractor{handle, graph_view, raw_const_ptr(d_crt_out_degs), raw_const_ptr(d_sizes), num_paths, max_depth}; // typically given by random engine: // std::vector<vertex_t> v_col_indx{1, 0, 0, 2}; vector_test_t<vertex_t> d_col_indx(num_paths, handle.get_stream()); raft::update_device(d_col_indx.data(), v_col_indx.data(), d_col_indx.size(), handle.get_stream()); vector_test_t<vertex_t> d_next_v(num_paths, handle.get_stream()); vector_test_t<weight_t> d_next_w(num_paths, handle.get_stream()); col_extractor(d_coalesced_v, d_col_indx, d_next_v, d_next_w); rand_walker.update_path_sizes(d_crt_out_degs, d_sizes); // check start(): // { std::vector<vertex_t> v_coalesced_exp{1, -1, -1, 0, -1, -1, 4, -1, -1, 2, -1, -1}; raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), total_sz, handle.get_stream()); EXPECT_EQ(v_coalesced, v_coalesced_exp); } // check crt_out_degs: // { std::vector<edge_t> v_crt_out_degs(num_paths); raft::update_host( v_crt_out_degs.data(), raw_const_ptr(d_crt_out_degs), num_paths, handle.get_stream()); std::vector<edge_t> v_crt_out_degs_exp{2, 1, 1, 3}; EXPECT_EQ(v_crt_out_degs, v_crt_out_degs_exp); } // check paths sizes update: // { std::vector<index_t> v_sizes(num_paths); raft::update_host(v_sizes.data(), raw_const_ptr(d_sizes), num_paths, handle.get_stream()); std::vector<index_t> v_sizes_exp{2, 2, 2, 2}; // i.e., corresponding 0-entries in crt-out-degs, don't get updated; EXPECT_EQ(v_sizes, v_sizes_exp); } // check next step: // { std::vector<vertex_t> v_next_v(num_paths); std::vector<weight_t> v_next_w(num_paths); raft::update_host(v_next_v.data(), raw_const_ptr(d_next_v), num_paths, handle.get_stream()); raft::update_host(v_next_w.data(), raw_const_ptr(d_next_w), num_paths, handle.get_stream()); std::vector<vertex_t> v_next_v_exp{4, 1, 5, 3}; std::vector<weight_t> v_next_w_exp{2.1f, 0.1f, 7.1f, 5.1f}; EXPECT_EQ(v_next_v, v_next_v_exp); EXPECT_EQ(v_next_w, v_next_w_exp); } rand_walker.scatter_vertices(d_next_v, d_coalesced_v, d_crt_out_degs, d_sizes); rand_walker.scatter_weights(d_next_w, d_coalesced_w, d_crt_out_degs, d_sizes); // check vertex/weight scatter: // { raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), total_sz, handle.get_stream()); raft::update_host( w_coalesced.data(), raw_const_ptr(d_coalesced_w), total_sz - num_paths, handle.get_stream()); std::vector<vertex_t> v_coalesced_exp{1, 4, -1, 0, 1, -1, 4, 5, -1, 2, 3, -1}; std::vector<weight_t> w_coalesced_exp{2.1, -1, 0.1, -1, 7.1, -1, 5.1, -1}; EXPECT_EQ(v_coalesced, v_coalesced_exp); EXPECT_EQ(w_coalesced, w_coalesced_exp); } } TEST_F(RandomWalksPrimsTest, SimpleGraphCoalesceDefragment) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); index_t num_paths = 4; index_t max_depth = 3; index_t total_sz = num_paths * max_depth; std::vector<index_t> v_sizes{1, 2, 2, 1}; vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); raft::update_device(d_sizes.data(), v_sizes.data(), d_sizes.size(), handle.get_stream()); std::vector<vertex_t> v_coalesced(total_sz, -1); v_coalesced[0] = 3; v_coalesced[max_depth] = 5; v_coalesced[max_depth + 1] = 2; v_coalesced[2 * max_depth] = 4; v_coalesced[2 * max_depth + 1] = 0; v_coalesced[3 * max_depth] = 1; std::vector<weight_t> w_coalesced(total_sz - num_paths, -1); w_coalesced[max_depth - 1] = 10.1; w_coalesced[2 * max_depth - 2] = 11.2; vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<weight_t> d_coalesced_w(total_sz - num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device( d_coalesced_w.data(), w_coalesced.data(), d_coalesced_w.size(), handle.get_stream()); random_walker_t<decltype(graph_view)> rand_walker{handle, graph_view, num_paths, max_depth}; rand_walker.stop(d_coalesced_v, d_coalesced_w, d_sizes); // check vertex/weight defragment: // { v_coalesced.resize(d_coalesced_v.size()); w_coalesced.resize(d_coalesced_w.size()); raft::update_host( v_coalesced.data(), raw_const_ptr(d_coalesced_v), d_coalesced_v.size(), handle.get_stream()); raft::update_host( w_coalesced.data(), raw_const_ptr(d_coalesced_w), d_coalesced_w.size(), handle.get_stream()); std::vector<vertex_t> v_coalesced_exp{3, 5, 2, 4, 0, 1}; std::vector<weight_t> w_coalesced_exp{10.1, 11.2}; EXPECT_EQ(v_coalesced, v_coalesced_exp); EXPECT_EQ(w_coalesced, w_coalesced_exp); } } TEST_F(RandomWalksPrimsTest, SimpleGraphRandomWalk) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); bool test_all_paths = cugraph::test::host_check_rw_paths(handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksQuery, GraphRWQueryOffsets) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{1, 0, 4, 2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_v_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); auto triplet = query_rw_sizes_offsets(handle, num_paths, detail::raw_const_ptr(d_v_sizes)); auto& d_v_offsets = std::get<0>(triplet); auto& d_w_sizes = std::get<1>(triplet); auto& d_w_offsets = std::get<2>(triplet); bool test_paths_sz = cugraph::test::host_check_query_rw(handle, d_v_sizes, d_v_offsets, d_w_sizes, d_w_offsets); if (!test_paths_sz) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_paths_sz); } TEST(RandomWalksSpecialCase, SingleRandomWalk) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); bool test_all_paths = cugraph::test::host_check_rw_paths(handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksSpecialCase, UnweightedGraph) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, false); // un-weighted auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); ASSERT_TRUE(values == nullptr); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); std::vector<vertex_t> v_start{2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); bool test_all_paths = cugraph::test::host_check_rw_paths(handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksPadded, SimpleGraph) { using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; edge_t num_edges = 8; vertex_t num_vertices = 6; std::vector<vertex_t> v_src{0, 1, 1, 2, 2, 2, 3, 4}; std::vector<vertex_t> v_dst{1, 3, 4, 0, 1, 3, 5, 5}; std::vector<weight_t> v_w{0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto graph = make_graph(handle, v_src, v_dst, v_w, num_vertices, num_edges, true); auto graph_view = graph.view(); edge_t const* offsets = graph_view.offsets(); vertex_t const* indices = graph_view.indices(); weight_t const* values = graph_view.weights(); std::vector<edge_t> v_ro(num_vertices + 1); std::vector<vertex_t> v_ci(num_edges); std::vector<weight_t> v_vals(num_edges); raft::update_host(v_ro.data(), offsets, v_ro.size(), handle.get_stream()); raft::update_host(v_ci.data(), indices, v_ci.size(), handle.get_stream()); raft::update_host(v_vals.data(), values, v_vals.size(), handle.get_stream()); std::vector<vertex_t> v_start{2}; vector_test_t<vertex_t> d_v_start(v_start.size(), handle.get_stream()); raft::update_device(d_v_start.data(), v_start.data(), d_v_start.size(), handle.get_stream()); index_t num_paths = v_start.size(); index_t max_depth = 5; // 0-copy const device view: // detail::device_const_vector_view<vertex_t, index_t> d_start_view{d_v_start.data(), num_paths}; bool use_padding{true}; auto quad = detail::random_walks_impl(handle, graph_view, d_start_view, max_depth, use_padding); auto& d_coalesced_v = std::get<0>(quad); auto& d_coalesced_w = std::get<1>(quad); auto& d_sizes = std::get<2>(quad); auto seed0 = std::get<3>(quad); ASSERT_TRUE(d_sizes.size() == 0); bool test_all_paths = cugraph::test::host_check_rw_paths( handle, graph_view, d_coalesced_v, d_coalesced_w, d_sizes, num_paths); if (!test_all_paths) std::cout << "starting seed on failure: " << seed0 << '\n'; ASSERT_TRUE(test_all_paths); } TEST(RandomWalksUtility, PathsToCOO) { using namespace cugraph::experimental::detail; using vertex_t = int32_t; using edge_t = vertex_t; using weight_t = float; using index_t = vertex_t; raft::handle_t handle{}; std::vector<index_t> v_sizes{2, 1, 3, 5, 1}; std::vector<vertex_t> v_coalesced{5, 3, 4, 9, 0, 1, 6, 2, 7, 3, 2, 5}; std::vector<weight_t> w_coalesced{0.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1}; auto num_paths = v_sizes.size(); auto total_sz = v_coalesced.size(); auto num_edges = w_coalesced.size(); ASSERT_TRUE(num_edges == total_sz - num_paths); vector_test_t<vertex_t> d_coalesced_v(total_sz, handle.get_stream()); vector_test_t<index_t> d_sizes(num_paths, handle.get_stream()); raft::update_device( d_coalesced_v.data(), v_coalesced.data(), d_coalesced_v.size(), handle.get_stream()); raft::update_device(d_sizes.data(), v_sizes.data(), d_sizes.size(), handle.get_stream()); index_t coalesced_v_sz = d_coalesced_v.size(); auto tpl_coo_offsets = convert_paths_to_coo<vertex_t>(handle, coalesced_v_sz, static_cast<index_t>(num_paths), d_coalesced_v.release(), d_sizes.release()); auto&& d_src = std::move(std::get<0>(tpl_coo_offsets)); auto&& d_dst = std::move(std::get<1>(tpl_coo_offsets)); auto&& d_offsets = std::move(std::get<2>(tpl_coo_offsets)); ASSERT_TRUE(d_src.size() == num_edges); ASSERT_TRUE(d_dst.size() == num_edges); std::vector<vertex_t> v_src(num_edges, 0); std::vector<vertex_t> v_dst(num_edges, 0); std::vector<index_t> v_offsets(d_offsets.size(), 0); raft::update_host(v_src.data(), raw_const_ptr(d_src), d_src.size(), handle.get_stream()); raft::update_host(v_dst.data(), raw_const_ptr(d_dst), d_dst.size(), handle.get_stream()); raft::update_host( v_offsets.data(), raw_const_ptr(d_offsets), d_offsets.size(), handle.get_stream()); std::vector<vertex_t> v_src_exp{5, 9, 0, 6, 2, 7, 3}; std::vector<vertex_t> v_dst_exp{3, 0, 1, 2, 7, 3, 2}; std::vector<index_t> v_offsets_exp{0, 1, 3}; EXPECT_EQ(v_src, v_src_exp); EXPECT_EQ(v_dst, v_dst_exp); EXPECT_EQ(v_offsets, v_offsets_exp); }
feb57edd57e3f67840733089be58eb6e5e521742.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include <device_launch_parameters.h> #include <hiprand/hiprand_kernel.h> #define trainNum 100000 // number of train data #define testNum 1000 // number of test data #define inLayout 10 // number of input layer's neurons #define hideLayout 8 // number of hidden layer's neurons #define outLayout 1 // number of output layer's neurons #define initWeightMax 0.5 // max value of initial weight #define eta (0.1f) // learn rate #define iterMax 10000 // max iteration times #define batchNum 32 // number of batches #define BLOCKSIZE 16 #define BLOCKSIZE_32 32 /** * funcinitialize weight * outputweight_D * inputrow row of weight matrix * inputcol column of weight matrix * inputmaxNum max value of weight */ __global__ void Bp_Init_Weight(float *weight_D, int row, int col, float maxNum, int seed) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index int index = y_id * col + x_id; hiprandState_t s; hiprand_init(index + seed, 0, 0, &s); if (x_id < col && y_id < row) weight_D[index] = (hiprand_uniform(&s) - 0.5f) * maxNum; } /** * funccalculate C = A * B' with tiling * inputdev_A * inputdev_B * outputdev_C * inputheightA row of A matrix * inputwidthA A column of A matrix * inputheightB row of B matrix */ __global__ void MatMulCUDATB(float *dev_A, float *dev_B, float *dev_C, const int heightA, const int widthA, const int heightB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < widthA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < widthA) && (y_id < heightA)) As[threadIdx.y][threadIdx.x] = dev_A[y_id * widthA + colA]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < heightB) && (rowB <widthA)) Bs[threadIdx.y][threadIdx.x] = dev_B[x_id * widthA + rowB]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < heightB && y_id < heightA) { dev_C[y_id * heightB + x_id] = Cvalue; } } /** * funccalculate vector's inner product * inputAs * inputBs * inputlength */ __device__ inline static float BP_Dot(float *As, float *Bs, int length) { float dot = 0.0f; for (int i = 0; i < length; i++) { dot += As[i] * Bs[i]; } return(dot); } /** * funccalculate input of hidden layer * inputdev_A * inputdev_B * outputdev_C * inputheightA * inputwidthA * inputwidthB */ __global__ void BP_Calculate_HideIn(float *dev_A, float *dev_B, float *dev_C, const int heightA, const int widthA, const int widthB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index __shared__ float As[BLOCKSIZE_32][BLOCKSIZE_32]; __shared__ float Bs[BLOCKSIZE_32][BLOCKSIZE_32]; As[threadIdx.y][threadIdx.x] = 0.0f; Bs[threadIdx.y][threadIdx.x] = 0.0f; if (y_id < heightA && x_id < widthA) { As[threadIdx.y][threadIdx.x] = dev_A[threadIdx.y * widthA + x_id]; Bs[threadIdx.y][threadIdx.x] = dev_B[threadIdx.y * widthA + x_id]; } __syncthreads(); float dot = BP_Dot(As[threadIdx.y], Bs[threadIdx.x], BLOCKSIZE_32); atomicAdd(&dev_C[threadIdx.y * widthB + threadIdx.x], dot); } /** * funccalculate output of hidden layer * inputhideOut_D input of hidden layer * outputhideOut_D output of hidden layer * inputrow * inputcol */ __global__ void BP_Calculate_HideOut(float *hideOut_D, int row, int col) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index int index = y_id * col + x_id; if (x_id < col && y_id < row) { hideOut_D[index] = 1.0f / (1.0f + exp(-hideOut_D[index])); } } /** * funccalculate delta2_D = x_Out - A * B' * inputdev_A * inputdev_B * outputdelta2_D delta between hidden layer and output layer * inputxOut_D * inputheightA * inputwidthA * inputheightB */ __global__ void BP_Calculate_Delta2(float *dev_A, float *dev_B, float *delta2_D, float *xOut_D, const int heightA, const int widthA, const int heightB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < widthA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < widthA) && (y_id < heightA)) As[threadIdx.y][threadIdx.x] = dev_A[y_id * widthA + colA]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < heightB) && (rowB <widthA)) Bs[threadIdx.y][threadIdx.x] = dev_B[x_id * widthA + rowB]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < heightB && y_id < heightA) { int index = y_id * heightB + x_id; delta2_D[index] = xOut_D[index] - Cvalue; } } /** * funccalculate C = (hOut .* (1 - hOut)) .* (A * B) * inputdev_A * inputdev_B * outputdev_C * inputhideOut_D * inputheightA * inputwidthA * inputwidthB */ __global__ void BP_Calculate_Delta1(float *dev_A, float *dev_B, float *dev_C, float *hideOut_D, const int heightA, const int widthA, const int widthB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < widthA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < widthA) && (y_id < heightA)) As[threadIdx.y][threadIdx.x] = dev_A[y_id * widthA + colA]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < widthB) && (rowB <widthA)) Bs[threadIdx.y][threadIdx.x] = dev_B[rowB * widthB + x_id]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < widthB && y_id < heightA) { int index = y_id * widthB + x_id; float data = hideOut_D[index]; dev_C[index] = data * (1.0f - data) * Cvalue; } } /** * funcupdate weight C = C + eta/batchNum .* (A' * B) * inputdev_A * inputdev_B * outputdev_C * inputheightA * inputwidthA * inputheightB */ __global__ void BP_Update_Weight(float *dev_A, float *dev_B, float *dev_C, const int heightA, const int widthA, const int widthB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < heightA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < heightA) && (y_id < widthA)) As[threadIdx.y][threadIdx.x] = dev_A[colA * widthA + y_id]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < widthB) && (rowB < heightA)) Bs[threadIdx.y][threadIdx.x] = dev_B[rowB * widthB + x_id]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < widthB && y_id < widthA) { dev_C[y_id * widthB + x_id] += eta * Cvalue / float(batchNum); } } /** * funccalculate error vector * inputn * inputA * inputB * outputC */ __global__ void vectorSub(int n, float *A, float *B, float *C) { int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n){ C[i] = 0.5 * (A[i] - B[i]) * (A[i] - B[i]); //printf("Thread id is: %d test is: %.5f real is: %.5f Error Vector is: %.5f\n", i, A[i], B[i], C[i]); } } /** * funccalculate error * inputn * inputA * outputerror */ __global__ void BP_Calculate_Error(int n, float *A, float *error_D) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < n) { atomicAdd(&(error_D[0]), A[i]); //printf("Thread id is: %d, Error is: %.10f\n", i, error_D[0]); } } /* * funcBP algorithm parallelization version * inputinputTrain_H input train data * inputinputTest_H input test data * inputoutputTrain_H train data's label * inputoutputTest_H test data's label */ void BpMain(float *inputTrain_H, float *inputTest_H, float *outputTrain_H, float *outputTest_H) { /* Allocate device variables */ float *inputTrain_D, *inputTest_D, *outputTrain_D, *outputTest_D; hipMalloc((void**)&inputTrain_D, trainNum * inLayout * sizeof(float)); hipMalloc((void**)&inputTest_D, testNum * inLayout * sizeof(float)); hipMalloc((void**)&outputTrain_D, trainNum * outLayout * sizeof(float)); hipMalloc((void**)&outputTest_D, testNum * outLayout * sizeof(float)); float *weightHideIn_D, *weightOutHide_D; hipMalloc((void**)&weightHideIn_D, hideLayout * inLayout * sizeof(float)); hipMalloc((void**)&weightOutHide_D, outLayout * hideLayout * sizeof(float)); float *weightHideInT_D; hipMalloc((void**)&weightHideInT_D, hideLayout * inLayout * sizeof(float)); float *deltaHideIn_D, *deltaOutHide_D; hipMalloc((void**)&deltaHideIn_D, hideLayout * batchNum * sizeof(float)); hipMalloc((void**)&deltaOutHide_D, outLayout * batchNum * sizeof(float)); float *hideOut_D, *hideOutTest_D; hipMalloc((void**)&hideOut_D, hideLayout * batchNum * sizeof(float)); hipMemset(hideOut_D, 0, hideLayout * batchNum * sizeof(float)); hipMalloc((void**)&hideOutTest_D, hideLayout * testNum * sizeof(float)); float *phi_D; hipMalloc((void**)&phi_D, hideLayout * batchNum * sizeof(float)); float *yOut_D, *yOutTest_D; hipMalloc((void**)&yOut_D, outLayout * batchNum * sizeof(float)); hipMalloc((void**)&yOutTest_D, outLayout * testNum * sizeof(float)); float *w10 = (float*)malloc(hideLayout * inLayout * sizeof(float)); float *w21 = (float*)malloc(outLayout * hideLayout * sizeof(float)); float *error; hipMalloc((void**)&error, testNum * outLayout * sizeof(float)); float *error_H = (float*)malloc(sizeof(float)); float *error_D; hipMalloc((void**)&error_D, sizeof(float)); hipMemset(error_D, 0, sizeof(float)); // Copy host variables to device hipMemcpy(inputTrain_D, inputTrain_H, trainNum * inLayout * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(inputTest_D, inputTest_H, testNum * inLayout * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(outputTrain_D, outputTrain_H, trainNum * outLayout * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(outputTest_D, outputTest_H, testNum * outLayout * sizeof(float), hipMemcpyHostToDevice); /* Initialize thread block and kernel grid dimensions */ dim3 dimBlock2D(BLOCKSIZE, BLOCKSIZE); dim3 dimBlock2D_32(BLOCKSIZE_32, BLOCKSIZE_32); dim3 dimBlock1D(BLOCKSIZE * BLOCKSIZE); dim3 dimGrid2D_hide_in((inLayout + BLOCKSIZE - 1) / dimBlock2D.x, (hideLayout + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_out_hide((hideLayout + BLOCKSIZE - 1) / dimBlock2D.x, (outLayout + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_batch_hide((hideLayout + BLOCKSIZE - 1) / dimBlock2D.x, (batchNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_batch_out((outLayout + BLOCKSIZE - 1) / dimBlock2D.x, (batchNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_testNum_hide((hideLayout + BLOCKSIZE - 1) / dimBlock2D.x, (testNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_testNum_out((outLayout + BLOCKSIZE - 1) / dimBlock2D.x, (testNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid1D_testNum(((testNum + BLOCKSIZE - 1) / dimBlock2D.x)); dim3 dimGrid2D_32_batch_in((inLayout + BLOCKSIZE_32 - 1) / dimBlock2D_32.x, (batchNum + BLOCKSIZE_32 - 1) / dimBlock2D_32.y); /* weight initiallization */ hipLaunchKernelGGL(( Bp_Init_Weight), dim3(dimGrid2D_hide_in), dim3(dimBlock2D), 0, 0, weightHideIn_D, hideLayout, inLayout, initWeightMax, 0); hipLaunchKernelGGL(( Bp_Init_Weight), dim3(dimGrid2D_out_hide), dim3(dimBlock2D), 0, 0, weightOutHide_D, outLayout, hideLayout, initWeightMax, 393); for (int i = 0; i < 10000; i++) { for (int batch = 0; batch < trainNum; batch += batchNum) { /* hIn = X * W01' */ hipLaunchKernelGGL(( BP_Calculate_HideIn), dim3(dimGrid2D_32_batch_in), dim3(dimBlock2D_32), 0, 0, &inputTrain_D[batch * inLayout], weightHideIn_D, hideOut_D, batchNum, inLayout, hideLayout); /* hOut = h(hIn) */ hipLaunchKernelGGL(( BP_Calculate_HideOut), dim3(dimGrid2D_batch_hide), dim3(dimBlock2D), 0, 0, hideOut_D, batchNum, hideLayout); /* delta2 = xOut - hOut * W21' */ hipLaunchKernelGGL(( BP_Calculate_Delta2), dim3(dimGrid2D_batch_out), dim3(dimBlock2D), 0, 0, hideOut_D, weightOutHide_D, deltaOutHide_D, &outputTrain_D[batch * outLayout], batchNum, hideLayout, outLayout); /* delta1 = (hOut .* (1 - hOut)) .* (delta2 * W21) */ hipLaunchKernelGGL(( BP_Calculate_Delta1), dim3(dimGrid2D_batch_hide), dim3(dimBlock2D), 0, 0, deltaOutHide_D, weightOutHide_D, deltaHideIn_D, hideOut_D, batchNum, outLayout, hideLayout); /* W21 = W21 + eta / batchNum * delta2' * hOut */ hipLaunchKernelGGL(( BP_Update_Weight), dim3(dimGrid2D_out_hide), dim3(dimBlock2D), 0, 0, deltaOutHide_D, hideOut_D, weightOutHide_D, batchNum, outLayout, hideLayout); /* W10 = W10 + eta / batchNum * delta1' * X */ hipLaunchKernelGGL(( BP_Update_Weight), dim3(dimGrid2D_hide_in), dim3(dimBlock2D), 0, 0, deltaHideIn_D, &inputTrain_D[batch * inLayout], weightHideIn_D, batchNum, hideLayout, inLayout); } } /* test output */ /* hIn = X * W01' */ hipLaunchKernelGGL(( MatMulCUDATB), dim3(dimGrid2D_testNum_hide), dim3(dimBlock2D), 0, 0, inputTest_D, weightHideIn_D, hideOutTest_D, testNum, inLayout, hideLayout); /* hOut = h(hIn) */ hipLaunchKernelGGL(( BP_Calculate_HideOut), dim3(dimGrid2D_testNum_hide), dim3(dimBlock2D), 0, 0, hideOutTest_D, testNum, hideLayout); /* yOut = hOut * W21' */ hipLaunchKernelGGL(( MatMulCUDATB), dim3(dimGrid2D_testNum_out), dim3(dimBlock2D), 0, 0, hideOutTest_D, weightOutHide_D, yOutTest_D, testNum, hideLayout, outLayout); /* calculate error vector */ //printf("Calculate Error Vector\n"); hipLaunchKernelGGL(( vectorSub), dim3(dimGrid2D_testNum_out), dim3(dimBlock2D), 0, 0, testNum, yOutTest_D, outputTest_D, error); /* calculate error */ //printf("Calculate Average Error\n"); hipLaunchKernelGGL(( BP_Calculate_Error), dim3(dimGrid1D_testNum), dim3(dimBlock1D), 0, 0, testNum, error, error_D); hipMemcpy(error_H, error_D, sizeof(float), hipMemcpyDeviceToHost); printf("BP error is%.5f%%\n", 100.0f*float(*error_H) / float(testNum)); }
feb57edd57e3f67840733089be58eb6e5e521742.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> #include <device_launch_parameters.h> #include <curand_kernel.h> #define trainNum 100000 // number of train data #define testNum 1000 // number of test data #define inLayout 10 // number of input layer's neurons #define hideLayout 8 // number of hidden layer's neurons #define outLayout 1 // number of output layer's neurons #define initWeightMax 0.5 // max value of initial weight #define eta (0.1f) // learn rate #define iterMax 10000 // max iteration times #define batchNum 32 // number of batches #define BLOCKSIZE 16 #define BLOCKSIZE_32 32 /** * func:initialize weight * output:weight_D * input:row row of weight matrix * input:col column of weight matrix * input:maxNum max value of weight */ __global__ void Bp_Init_Weight(float *weight_D, int row, int col, float maxNum, int seed) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index int index = y_id * col + x_id; curandState s; curand_init(index + seed, 0, 0, &s); if (x_id < col && y_id < row) weight_D[index] = (curand_uniform(&s) - 0.5f) * maxNum; } /** * func:calculate C = A * B' with tiling * input:dev_A * input:dev_B * output:dev_C * input:heightA row of A matrix * input:widthA A column of A matrix * input:heightB row of B matrix */ __global__ void MatMulCUDATB(float *dev_A, float *dev_B, float *dev_C, const int heightA, const int widthA, const int heightB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < widthA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < widthA) && (y_id < heightA)) As[threadIdx.y][threadIdx.x] = dev_A[y_id * widthA + colA]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < heightB) && (rowB <widthA)) Bs[threadIdx.y][threadIdx.x] = dev_B[x_id * widthA + rowB]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < heightB && y_id < heightA) { dev_C[y_id * heightB + x_id] = Cvalue; } } /** * func:calculate vector's inner product * input:As * input:Bs * input:length */ __device__ inline static float BP_Dot(float *As, float *Bs, int length) { float dot = 0.0f; for (int i = 0; i < length; i++) { dot += As[i] * Bs[i]; } return(dot); } /** * func:calculate input of hidden layer * input:dev_A * input:dev_B * output:dev_C * input:heightA * input:widthA * input:widthB */ __global__ void BP_Calculate_HideIn(float *dev_A, float *dev_B, float *dev_C, const int heightA, const int widthA, const int widthB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index __shared__ float As[BLOCKSIZE_32][BLOCKSIZE_32]; __shared__ float Bs[BLOCKSIZE_32][BLOCKSIZE_32]; As[threadIdx.y][threadIdx.x] = 0.0f; Bs[threadIdx.y][threadIdx.x] = 0.0f; if (y_id < heightA && x_id < widthA) { As[threadIdx.y][threadIdx.x] = dev_A[threadIdx.y * widthA + x_id]; Bs[threadIdx.y][threadIdx.x] = dev_B[threadIdx.y * widthA + x_id]; } __syncthreads(); float dot = BP_Dot(As[threadIdx.y], Bs[threadIdx.x], BLOCKSIZE_32); atomicAdd(&dev_C[threadIdx.y * widthB + threadIdx.x], dot); } /** * func:calculate output of hidden layer * input:hideOut_D input of hidden layer * output:hideOut_D output of hidden layer * input:row * input:col */ __global__ void BP_Calculate_HideOut(float *hideOut_D, int row, int col) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index int index = y_id * col + x_id; if (x_id < col && y_id < row) { hideOut_D[index] = 1.0f / (1.0f + exp(-hideOut_D[index])); } } /** * func:calculate delta2_D = x_Out - A * B' * input:dev_A * input:dev_B * output:delta2_D delta between hidden layer and output layer * input:xOut_D * input:heightA * input:widthA * input:heightB */ __global__ void BP_Calculate_Delta2(float *dev_A, float *dev_B, float *delta2_D, float *xOut_D, const int heightA, const int widthA, const int heightB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < widthA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < widthA) && (y_id < heightA)) As[threadIdx.y][threadIdx.x] = dev_A[y_id * widthA + colA]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < heightB) && (rowB <widthA)) Bs[threadIdx.y][threadIdx.x] = dev_B[x_id * widthA + rowB]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < heightB && y_id < heightA) { int index = y_id * heightB + x_id; delta2_D[index] = xOut_D[index] - Cvalue; } } /** * func:calculate C = (hOut .* (1 - hOut)) .* (A * B) * input:dev_A * input:dev_B * output:dev_C * input:hideOut_D * input:heightA * input:widthA * input:widthB */ __global__ void BP_Calculate_Delta1(float *dev_A, float *dev_B, float *dev_C, float *hideOut_D, const int heightA, const int widthA, const int widthB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < widthA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < widthA) && (y_id < heightA)) As[threadIdx.y][threadIdx.x] = dev_A[y_id * widthA + colA]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < widthB) && (rowB <widthA)) Bs[threadIdx.y][threadIdx.x] = dev_B[rowB * widthB + x_id]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < widthB && y_id < heightA) { int index = y_id * widthB + x_id; float data = hideOut_D[index]; dev_C[index] = data * (1.0f - data) * Cvalue; } } /** * func:update weight C = C + eta/batchNum .* (A' * B) * input:dev_A * input:dev_B * output:dev_C * input:heightA * input:widthA * input:heightB */ __global__ void BP_Update_Weight(float *dev_A, float *dev_B, float *dev_C, const int heightA, const int widthA, const int widthB) { int x_id = blockDim.x * blockIdx.x + threadIdx.x; // column index int y_id = blockDim.y * blockIdx.y + threadIdx.y; // row index float Cvalue = 0; // loop over tiles in A and B for (int m = 0; m < heightA; m += BLOCKSIZE) { int colA = m + threadIdx.x; int rowB = m + threadIdx.y; // use shared memory to stroe Asub and Bsub __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; // load tile into shared memory if ((colA < heightA) && (y_id < widthA)) As[threadIdx.y][threadIdx.x] = dev_A[colA * widthA + y_id]; // A(y_id, colA) else As[threadIdx.y][threadIdx.x] = 0.0f; if ((x_id < widthB) && (rowB < heightA)) Bs[threadIdx.y][threadIdx.x] = dev_B[rowB * widthB + x_id]; // B(rowB, x_id) else Bs[threadIdx.y][threadIdx.x] = 0.0f; __syncthreads(); // matrix multiply in tile matrix for (int idx = 0; idx < BLOCKSIZE; ++idx) { Cvalue += As[threadIdx.y][idx] * Bs[idx][threadIdx.x]; } __syncthreads(); } if (x_id < widthB && y_id < widthA) { dev_C[y_id * widthB + x_id] += eta * Cvalue / float(batchNum); } } /** * func:calculate error vector * input:n * input:A * input:B * output:C */ __global__ void vectorSub(int n, float *A, float *B, float *C) { int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n){ C[i] = 0.5 * (A[i] - B[i]) * (A[i] - B[i]); //printf("Thread id is: %d test is: %.5f real is: %.5f Error Vector is: %.5f\n", i, A[i], B[i], C[i]); } } /** * func:calculate error * input:n * input:A * output:error */ __global__ void BP_Calculate_Error(int n, float *A, float *error_D) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < n) { atomicAdd(&(error_D[0]), A[i]); //printf("Thread id is: %d, Error is: %.10f\n", i, error_D[0]); } } /* * func:BP algorithm parallelization version * input:inputTrain_H input train data * input:inputTest_H input test data * input:outputTrain_H train data's label * input:outputTest_H test data's label */ void BpMain(float *inputTrain_H, float *inputTest_H, float *outputTrain_H, float *outputTest_H) { /* Allocate device variables */ float *inputTrain_D, *inputTest_D, *outputTrain_D, *outputTest_D; cudaMalloc((void**)&inputTrain_D, trainNum * inLayout * sizeof(float)); cudaMalloc((void**)&inputTest_D, testNum * inLayout * sizeof(float)); cudaMalloc((void**)&outputTrain_D, trainNum * outLayout * sizeof(float)); cudaMalloc((void**)&outputTest_D, testNum * outLayout * sizeof(float)); float *weightHideIn_D, *weightOutHide_D; cudaMalloc((void**)&weightHideIn_D, hideLayout * inLayout * sizeof(float)); cudaMalloc((void**)&weightOutHide_D, outLayout * hideLayout * sizeof(float)); float *weightHideInT_D; cudaMalloc((void**)&weightHideInT_D, hideLayout * inLayout * sizeof(float)); float *deltaHideIn_D, *deltaOutHide_D; cudaMalloc((void**)&deltaHideIn_D, hideLayout * batchNum * sizeof(float)); cudaMalloc((void**)&deltaOutHide_D, outLayout * batchNum * sizeof(float)); float *hideOut_D, *hideOutTest_D; cudaMalloc((void**)&hideOut_D, hideLayout * batchNum * sizeof(float)); cudaMemset(hideOut_D, 0, hideLayout * batchNum * sizeof(float)); cudaMalloc((void**)&hideOutTest_D, hideLayout * testNum * sizeof(float)); float *phi_D; cudaMalloc((void**)&phi_D, hideLayout * batchNum * sizeof(float)); float *yOut_D, *yOutTest_D; cudaMalloc((void**)&yOut_D, outLayout * batchNum * sizeof(float)); cudaMalloc((void**)&yOutTest_D, outLayout * testNum * sizeof(float)); float *w10 = (float*)malloc(hideLayout * inLayout * sizeof(float)); float *w21 = (float*)malloc(outLayout * hideLayout * sizeof(float)); float *error; cudaMalloc((void**)&error, testNum * outLayout * sizeof(float)); float *error_H = (float*)malloc(sizeof(float)); float *error_D; cudaMalloc((void**)&error_D, sizeof(float)); cudaMemset(error_D, 0, sizeof(float)); // Copy host variables to device cudaMemcpy(inputTrain_D, inputTrain_H, trainNum * inLayout * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(inputTest_D, inputTest_H, testNum * inLayout * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(outputTrain_D, outputTrain_H, trainNum * outLayout * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(outputTest_D, outputTest_H, testNum * outLayout * sizeof(float), cudaMemcpyHostToDevice); /* Initialize thread block and kernel grid dimensions */ dim3 dimBlock2D(BLOCKSIZE, BLOCKSIZE); dim3 dimBlock2D_32(BLOCKSIZE_32, BLOCKSIZE_32); dim3 dimBlock1D(BLOCKSIZE * BLOCKSIZE); dim3 dimGrid2D_hide_in((inLayout + BLOCKSIZE - 1) / dimBlock2D.x, (hideLayout + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_out_hide((hideLayout + BLOCKSIZE - 1) / dimBlock2D.x, (outLayout + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_batch_hide((hideLayout + BLOCKSIZE - 1) / dimBlock2D.x, (batchNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_batch_out((outLayout + BLOCKSIZE - 1) / dimBlock2D.x, (batchNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_testNum_hide((hideLayout + BLOCKSIZE - 1) / dimBlock2D.x, (testNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid2D_testNum_out((outLayout + BLOCKSIZE - 1) / dimBlock2D.x, (testNum + BLOCKSIZE - 1) / dimBlock2D.y); dim3 dimGrid1D_testNum(((testNum + BLOCKSIZE - 1) / dimBlock2D.x)); dim3 dimGrid2D_32_batch_in((inLayout + BLOCKSIZE_32 - 1) / dimBlock2D_32.x, (batchNum + BLOCKSIZE_32 - 1) / dimBlock2D_32.y); /* weight initiallization */ Bp_Init_Weight<<<dimGrid2D_hide_in, dimBlock2D>>>(weightHideIn_D, hideLayout, inLayout, initWeightMax, 0); Bp_Init_Weight<<<dimGrid2D_out_hide, dimBlock2D>>>(weightOutHide_D, outLayout, hideLayout, initWeightMax, 393); for (int i = 0; i < 10000; i++) { for (int batch = 0; batch < trainNum; batch += batchNum) { /* hIn = X * W01' */ BP_Calculate_HideIn<<<dimGrid2D_32_batch_in, dimBlock2D_32>>>(&inputTrain_D[batch * inLayout], weightHideIn_D, hideOut_D, batchNum, inLayout, hideLayout); /* hOut = h(hIn) */ BP_Calculate_HideOut<<<dimGrid2D_batch_hide, dimBlock2D>>>(hideOut_D, batchNum, hideLayout); /* delta2 = xOut - hOut * W21' */ BP_Calculate_Delta2<<<dimGrid2D_batch_out, dimBlock2D>>>(hideOut_D, weightOutHide_D, deltaOutHide_D, &outputTrain_D[batch * outLayout], batchNum, hideLayout, outLayout); /* delta1 = (hOut .* (1 - hOut)) .* (delta2 * W21) */ BP_Calculate_Delta1<<<dimGrid2D_batch_hide, dimBlock2D>>>(deltaOutHide_D, weightOutHide_D, deltaHideIn_D, hideOut_D, batchNum, outLayout, hideLayout); /* W21 = W21 + eta / batchNum * delta2' * hOut */ BP_Update_Weight<<<dimGrid2D_out_hide, dimBlock2D>>>(deltaOutHide_D, hideOut_D, weightOutHide_D, batchNum, outLayout, hideLayout); /* W10 = W10 + eta / batchNum * delta1' * X */ BP_Update_Weight<<<dimGrid2D_hide_in, dimBlock2D>>>(deltaHideIn_D, &inputTrain_D[batch * inLayout], weightHideIn_D, batchNum, hideLayout, inLayout); } } /* test output */ /* hIn = X * W01' */ MatMulCUDATB<<<dimGrid2D_testNum_hide, dimBlock2D>>>(inputTest_D, weightHideIn_D, hideOutTest_D, testNum, inLayout, hideLayout); /* hOut = h(hIn) */ BP_Calculate_HideOut<<<dimGrid2D_testNum_hide, dimBlock2D>>>(hideOutTest_D, testNum, hideLayout); /* yOut = hOut * W21' */ MatMulCUDATB<<<dimGrid2D_testNum_out, dimBlock2D>>>(hideOutTest_D, weightOutHide_D, yOutTest_D, testNum, hideLayout, outLayout); /* calculate error vector */ //printf("Calculate Error Vector\n"); vectorSub<<<dimGrid2D_testNum_out, dimBlock2D>>>(testNum, yOutTest_D, outputTest_D, error); /* calculate error */ //printf("Calculate Average Error\n"); BP_Calculate_Error<<<dimGrid1D_testNum, dimBlock1D>>>(testNum, error, error_D); cudaMemcpy(error_H, error_D, sizeof(float), cudaMemcpyDeviceToHost); printf("BP error is:%.5f%%\n", 100.0f*float(*error_H) / float(testNum)); }
4782f3e5300cf9efd463e25e8c0f44056b950b2f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022, The Neko Authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the authors nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <device/device_config.h> #include <device/cuda/check.h> #include "fdm_kernel.h" #include <stdio.h> extern "C" { /** Fortran wrapper for tnsr3d **/ void cuda_fdm_do_fast(void *e, void *r, void *s, void *d, int *nl, int *nel, hipStream_t stream) { const dim3 nthrds(1024, 1, 1); const dim3 nblcks(*nel, 1, 1); #define CASE(NL) \ case NL: \ hipLaunchKernelGGL(( fdm_do_fast_kernel<real,NL>) \ , dim3(nblcks), dim3(nthrds), 0, stream, (real *) e, (real *) r, \ (real *) s,(real *) d); \ CUDA_CHECK(hipGetLastError()); \ break; switch(*nl) { CASE(2); CASE(3); CASE(4); CASE(5); CASE(6); CASE(7); CASE(8); CASE(9); CASE(10); CASE(11); CASE(12); CASE(13); CASE(14); default: { fprintf(stderr, __FILE__ ": size not supported: %d\n", *nl); exit(1); } } } }
4782f3e5300cf9efd463e25e8c0f44056b950b2f.cu
/* Copyright (c) 2022, The Neko Authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the authors nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <device/device_config.h> #include <device/cuda/check.h> #include "fdm_kernel.h" #include <stdio.h> extern "C" { /** Fortran wrapper for tnsr3d **/ void cuda_fdm_do_fast(void *e, void *r, void *s, void *d, int *nl, int *nel, cudaStream_t stream) { const dim3 nthrds(1024, 1, 1); const dim3 nblcks(*nel, 1, 1); #define CASE(NL) \ case NL: \ fdm_do_fast_kernel<real,NL> \ <<<nblcks, nthrds, 0, stream>>>((real *) e, (real *) r, \ (real *) s,(real *) d); \ CUDA_CHECK(cudaGetLastError()); \ break; switch(*nl) { CASE(2); CASE(3); CASE(4); CASE(5); CASE(6); CASE(7); CASE(8); CASE(9); CASE(10); CASE(11); CASE(12); CASE(13); CASE(14); default: { fprintf(stderr, __FILE__ ": size not supported: %d\n", *nl); exit(1); } } } }
97c8797d6aad319d9fab12f85720aa2d2a966c0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <fcuda.h> #include "matrixMul.h" #include <string.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((DATATYPE*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) cutilBankChecker(((DATATYPE*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// #pragma FCUDA GRID x_dim=16 y_dim=16 #pragma FCUDA COREINFO num_cores=1 pipeline=no //#pragma FCUDA TBLK bdim_num=2 bdim_x=16 bdim_y=16 gdims=2 //#pragma FCUDA GRID x_dim=2 y_dim=2 concur=0 //#pragma FCUDA PORTMERGE remove_port_name=A port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=B port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=C port_id=0 __global__ void matrixMul( DATATYPE *C, DATATYPE *A, DATATYPE *B, int wA, int wB) { #pragma HLS INTERFACE ap_bus port=A depth=3840 #pragma HLS INTERFACE ap_bus port=B depth=6144 #pragma HLS INTERFACE ap_bus port=C depth=10240 // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; //#pragma FCUDA COMPUTE cores=[1] begin name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] DATATYPE Csub = 0; //#pragma FCUDA COMPUTE cores=[1] end name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] int a = 0, b = 0, k = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ DATATYPE As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ DATATYPE Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] begin name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] end name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix //#pragma FCUDA COMPUTE cores=[1] begin name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 lp1: for (k = 0; k < BLOCK_SIZE; ++k) Csub += AS(ty, k) * BS(k, tx); //#pragma FCUDA COMPUTE cores=[1] end name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] begin name=write unroll=1 mpart=1 array_split=[Csub_block|As] C[c + wB * ty + tx] = Csub; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] end name=write unroll=1 mpart=1 array_split=[Csub_block|As] } #endif // #ifndef _MATRIXMUL_KERNEL_H_
97c8797d6aad319d9fab12f85720aa2d2a966c0d.cu
/* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <fcuda.h> #include "matrixMul.h" #include <string.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((DATATYPE*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) cutilBankChecker(((DATATYPE*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// #pragma FCUDA GRID x_dim=16 y_dim=16 #pragma FCUDA COREINFO num_cores=1 pipeline=no //#pragma FCUDA TBLK bdim_num=2 bdim_x=16 bdim_y=16 gdims=2 //#pragma FCUDA GRID x_dim=2 y_dim=2 concur=0 //#pragma FCUDA PORTMERGE remove_port_name=A port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=B port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=C port_id=0 __global__ void matrixMul( DATATYPE *C, DATATYPE *A, DATATYPE *B, int wA, int wB) { #pragma HLS INTERFACE ap_bus port=A depth=3840 #pragma HLS INTERFACE ap_bus port=B depth=6144 #pragma HLS INTERFACE ap_bus port=C depth=10240 // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; //#pragma FCUDA COMPUTE cores=[1] begin name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] DATATYPE Csub = 0; //#pragma FCUDA COMPUTE cores=[1] end name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] int a = 0, b = 0, k = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ DATATYPE As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ DATATYPE Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] begin name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] AS(ty, tx) = A[a + wA * ty + tx]; BS(ty, tx) = B[b + wB * ty + tx]; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] end name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix //#pragma FCUDA COMPUTE cores=[1] begin name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 lp1: for (k = 0; k < BLOCK_SIZE; ++k) Csub += AS(ty, k) * BS(k, tx); //#pragma FCUDA COMPUTE cores=[1] end name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] begin name=write unroll=1 mpart=1 array_split=[Csub_block|As] C[c + wB * ty + tx] = Csub; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] end name=write unroll=1 mpart=1 array_split=[Csub_block|As] } #endif // #ifndef _MATRIXMUL_KERNEL_H_
1f435dbffd02079a96612f0902daf01f38b32552.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void UpdateExtNeuron(float *port_input_pt, float *port_value_pt, int n_node, int n_var, int n_port_var, int n_port) { int i_thread = threadIdx.x + blockIdx.x * blockDim.x; if (i_thread<n_node*n_port) { int i_port = i_thread%n_port; int i_node = i_thread/n_port; float *pip = port_input_pt + i_node*n_var + n_port_var*i_port; //printf("port %d node %d pip %f\n", i_port, i_node, *pip); port_value_pt[i_node*n_var + n_port_var*i_port] = *pip; *pip = 0.0; } }
1f435dbffd02079a96612f0902daf01f38b32552.cu
#include "includes.h" __global__ void UpdateExtNeuron(float *port_input_pt, float *port_value_pt, int n_node, int n_var, int n_port_var, int n_port) { int i_thread = threadIdx.x + blockIdx.x * blockDim.x; if (i_thread<n_node*n_port) { int i_port = i_thread%n_port; int i_node = i_thread/n_port; float *pip = port_input_pt + i_node*n_var + n_port_var*i_port; //printf("port %d node %d pip %f\n", i_port, i_node, *pip); port_value_pt[i_node*n_var + n_port_var*i_port] = *pip; *pip = 0.0; } }
a6c817e3b3b0916576e8dd2fe1b3230274b5f8b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <stdio.h> #include <errno.h> #include <unistd.h> #include <stdlib.h> #include <arpa/inet.h> #include <math.h> #include "cs_header.h" #include "cs_copy_box.h" #include "cs_dbg.h" #include "cs_cuda.h" #include "cs_helper.h" #include "cs_motion_detect_v4.h" #include "cs_analysis.h" #include "cs_mean_sd.h" #define CUDA_DBG // total is actually blk_in_y * blk_in_x __global__ void d_mean_sd_divide ( float *dp, struct cube *dcubep, int blk_in_x, int blk_in_y, int total ) { int i, ot_idx = blockIdx.x * blockDim.x + threadIdx.x; if ( ot_idx < total ) { i = get_blk_type_idx ( ot_idx, blk_in_x, blk_in_y ) ; i = dcubep[i].md_v4_record_length ; dp[ ot_idx ] /= i ; } } // total is actually blk_in_y * blk_in_x __global__ void d_mean_sd_sqrt ( float *dp, int total ) { int ot_idx = blockIdx.x * blockDim.x + threadIdx.x; if ( ot_idx < total ) dp[ ot_idx ] = sqrt( dp[ ot_idx ] ) ; } // mean and sd points to blk_in_x * blk_in_y * sizeof ( float ) each // total is actually blk_in_y * blk_in_x __global__ void d_mean_sd_sd1 ( float *odp, struct cube *dcubep, float *d_meanp, int blk_in_x, int blk_in_y, int max_in_blk, int blk_size, int total ) { int i, blk_idx, t_idx, ot_idx = blockIdx.x * blockDim.x + threadIdx.x; float *dp ; while ( ot_idx < total ) { t_idx = ot_idx ; blk_idx = t_idx / max_in_blk ; // blk idx i = get_blk_type_idx ( blk_idx, blk_in_x, blk_in_y ) ; i = dcubep[i].md_v4_record_length ; t_idx %= max_in_blk ; if ( t_idx < i ) { dp = odp + blk_idx * blk_size ; // dp points to beginning of block dp[ t_idx ] = (( dp[ t_idx ] - d_meanp[ blk_idx ]) * ( dp[ t_idx ] - d_meanp[ blk_idx ])) / ( i - 1 ) ; } ot_idx += CUDA_MAX_THREADS ; } } // mean and sd points to blk_in_x * blk_in_y * sizeof ( float ) each int h_mean_sd ( float *dp, float *tp, struct cube *hcubep, struct cube *d_cubep, int blk_in_x, int blk_in_y, int blk_size, float *d_meanp, float *d_sdp ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks, i ; int max_cnt ; struct cube cxyz[ CUBE_INFO_CNT ] ; memcpy ( cxyz, hcubep, sizeof ( *hcubep ) * CUBE_INFO_CNT ) ; #ifdef CUDA_OBS fprintf( stderr, "%s: dp %p tp %p blksize %d blk x/y %d %d\n", __func__, dp, tp, blk_size, blk_in_x, blk_in_y ) ; #endif max_cnt = 0 ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { #ifdef CUDA_OBS printf("%s :: cube before loop %d leng %d size %d \n", __func__, cxyz[i].md_v4_loopcnt, cxyz[i].md_v4_record_length, cxyz[i].size ) ; #endif cxyz[i].md_v4_loopcnt = 1 ; cxyz[i].md_v4_record_length = cxyz[i].size ; if ( cxyz[i].size > max_cnt ) max_cnt = cxyz[i].size ; #ifdef CUDA_OBS printf("%s :: cube after loop %d leng %d size %d \n", __func__, cxyz[i].md_v4_loopcnt, cxyz[i].md_v4_record_length, cxyz[i].size ) ; #endif } // save a copy from dp to tp i = blk_size * blk_in_x * blk_in_y ; h_do_copy_vec( dp, tp, i, blk_size, blk_size ) ; // get the sum h_do_l1_norm_step2_v4( tp, cxyz, d_cubep, blk_in_x, blk_in_y, blk_size, 0 ) ; // copy the sum to d_meanp h_do_copy_vec<float> ( tp, d_meanp, blk_in_x * blk_in_y, blk_size, 1 ) ; // divede the sum with size to get mean i = blk_in_y * blk_in_x ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; hipLaunchKernelGGL(( d_mean_sd_divide) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, d_meanp, d_cubep, blk_in_x, blk_in_y, i ) ; hipDeviceSynchronize() ; // ok get the sd ... // save a copy from dp to tp i = blk_size * blk_in_x * blk_in_y ; h_do_copy_vec<float> ( dp, tp, i, blk_size, blk_size ) ; // do the ( x - u )^2/(n-1) i = max_cnt * blk_in_y * blk_in_x ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; hipLaunchKernelGGL(( d_mean_sd_sd1), dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, tp, d_cubep, d_meanp, blk_in_x, blk_in_y, max_cnt, blk_size, i ) ; hipDeviceSynchronize() ; // do the sum again ... h_do_l1_norm_step2_v4( tp, cxyz, d_cubep, blk_in_x, blk_in_y, blk_size, 0 ) ; // copy the sum to sd h_do_copy_vec<float> ( tp, d_sdp, blk_in_x * blk_in_y, blk_size, 1 ) ; // do sqrt the to the sum's i = blk_in_y * blk_in_x ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; hipLaunchKernelGGL(( d_mean_sd_sqrt) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0, d_sdp, i ) ; hipDeviceSynchronize() ; h_set_cube_config ( d_cubep, hcubep ) ; return ( 1 ) ; }
a6c817e3b3b0916576e8dd2fe1b3230274b5f8b3.cu
#include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <stdio.h> #include <errno.h> #include <unistd.h> #include <stdlib.h> #include <arpa/inet.h> #include <math.h> #include "cs_header.h" #include "cs_copy_box.h" #include "cs_dbg.h" #include "cs_cuda.h" #include "cs_helper.h" #include "cs_motion_detect_v4.h" #include "cs_analysis.h" #include "cs_mean_sd.h" #define CUDA_DBG // total is actually blk_in_y * blk_in_x __global__ void d_mean_sd_divide ( float *dp, struct cube *dcubep, int blk_in_x, int blk_in_y, int total ) { int i, ot_idx = blockIdx.x * blockDim.x + threadIdx.x; if ( ot_idx < total ) { i = get_blk_type_idx ( ot_idx, blk_in_x, blk_in_y ) ; i = dcubep[i].md_v4_record_length ; dp[ ot_idx ] /= i ; } } // total is actually blk_in_y * blk_in_x __global__ void d_mean_sd_sqrt ( float *dp, int total ) { int ot_idx = blockIdx.x * blockDim.x + threadIdx.x; if ( ot_idx < total ) dp[ ot_idx ] = sqrt( dp[ ot_idx ] ) ; } // mean and sd points to blk_in_x * blk_in_y * sizeof ( float ) each // total is actually blk_in_y * blk_in_x __global__ void d_mean_sd_sd1 ( float *odp, struct cube *dcubep, float *d_meanp, int blk_in_x, int blk_in_y, int max_in_blk, int blk_size, int total ) { int i, blk_idx, t_idx, ot_idx = blockIdx.x * blockDim.x + threadIdx.x; float *dp ; while ( ot_idx < total ) { t_idx = ot_idx ; blk_idx = t_idx / max_in_blk ; // blk idx i = get_blk_type_idx ( blk_idx, blk_in_x, blk_in_y ) ; i = dcubep[i].md_v4_record_length ; t_idx %= max_in_blk ; if ( t_idx < i ) { dp = odp + blk_idx * blk_size ; // dp points to beginning of block dp[ t_idx ] = (( dp[ t_idx ] - d_meanp[ blk_idx ]) * ( dp[ t_idx ] - d_meanp[ blk_idx ])) / ( i - 1 ) ; } ot_idx += CUDA_MAX_THREADS ; } } // mean and sd points to blk_in_x * blk_in_y * sizeof ( float ) each int h_mean_sd ( float *dp, float *tp, struct cube *hcubep, struct cube *d_cubep, int blk_in_x, int blk_in_y, int blk_size, float *d_meanp, float *d_sdp ) { int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ; int nBlocks, i ; int max_cnt ; struct cube cxyz[ CUBE_INFO_CNT ] ; memcpy ( cxyz, hcubep, sizeof ( *hcubep ) * CUBE_INFO_CNT ) ; #ifdef CUDA_OBS fprintf( stderr, "%s: dp %p tp %p blksize %d blk x/y %d %d\n", __func__, dp, tp, blk_size, blk_in_x, blk_in_y ) ; #endif max_cnt = 0 ; for ( i = 0 ; i < CUBE_INFO_CNT ; i++ ) { #ifdef CUDA_OBS printf("%s :: cube before loop %d leng %d size %d \n", __func__, cxyz[i].md_v4_loopcnt, cxyz[i].md_v4_record_length, cxyz[i].size ) ; #endif cxyz[i].md_v4_loopcnt = 1 ; cxyz[i].md_v4_record_length = cxyz[i].size ; if ( cxyz[i].size > max_cnt ) max_cnt = cxyz[i].size ; #ifdef CUDA_OBS printf("%s :: cube after loop %d leng %d size %d \n", __func__, cxyz[i].md_v4_loopcnt, cxyz[i].md_v4_record_length, cxyz[i].size ) ; #endif } // save a copy from dp to tp i = blk_size * blk_in_x * blk_in_y ; h_do_copy_vec( dp, tp, i, blk_size, blk_size ) ; // get the sum h_do_l1_norm_step2_v4( tp, cxyz, d_cubep, blk_in_x, blk_in_y, blk_size, 0 ) ; // copy the sum to d_meanp h_do_copy_vec<float> ( tp, d_meanp, blk_in_x * blk_in_y, blk_size, 1 ) ; // divede the sum with size to get mean i = blk_in_y * blk_in_x ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; d_mean_sd_divide <<< nBlocks, nThreadsPerBlock >>> ( d_meanp, d_cubep, blk_in_x, blk_in_y, i ) ; cudaThreadSynchronize() ; // ok get the sd ... // save a copy from dp to tp i = blk_size * blk_in_x * blk_in_y ; h_do_copy_vec<float> ( dp, tp, i, blk_size, blk_size ) ; // do the ( x - u )^2/(n-1) i = max_cnt * blk_in_y * blk_in_x ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; d_mean_sd_sd1<<< nBlocks, nThreadsPerBlock >>> ( tp, d_cubep, d_meanp, blk_in_x, blk_in_y, max_cnt, blk_size, i ) ; cudaThreadSynchronize() ; // do the sum again ... h_do_l1_norm_step2_v4( tp, cxyz, d_cubep, blk_in_x, blk_in_y, blk_size, 0 ) ; // copy the sum to sd h_do_copy_vec<float> ( tp, d_sdp, blk_in_x * blk_in_y, blk_size, 1 ) ; // do sqrt the to the sum's i = blk_in_y * blk_in_x ; h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ; d_mean_sd_sqrt <<< nBlocks, nThreadsPerBlock >>> ( d_sdp, i ) ; cudaThreadSynchronize() ; h_set_cube_config ( d_cubep, hcubep ) ; return ( 1 ) ; }
4bf484cbebd0efe2292548b0b0e05a4bf4ce4011.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/merge.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/dictionary/detail/merge.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/strings/detail/merge.cuh> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/merge.h> #include <thrust/pair.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <queue> #include <vector> namespace cudf { namespace detail { namespace { using detail::side; using index_type = detail::index_type; /** * @brief Merges the bits of two validity bitmasks. * * Merges the bits from two column_device_views into the destination validity buffer * according to `merged_indices` map such that bit `i` in `out_validity` * will be equal to bit `thrust::get<1>(merged_indices[i])` from `left_dcol` * if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise, * from `right_dcol`. * * `left_dcol` and `right_dcol` must not overlap. * * @tparam left_have_valids Indicates whether left_dcol mask is unallocated (hence, ALL_VALID) * @tparam right_have_valids Indicates whether right_dcol mask is unallocated (hence ALL_VALID) * @param[in] left_dcol The left column_device_view whose bits will be merged * @param[in] right_dcol The right column_device_view whose bits will be merged * @param[out] out_validity The output validity buffer after merging the left and right buffers * @param[in] num_destination_rows The number of rows in the out_validity buffer * @param[in] merged_indices The map that indicates the source of the input and index * to be copied to the output. Length must be equal to `num_destination_rows` */ template <bool left_have_valids, bool right_have_valids> __global__ void materialize_merged_bitmask_kernel( column_device_view left_dcol, column_device_view right_dcol, bitmask_type* out_validity, size_type const num_destination_rows, index_type const* const __restrict__ merged_indices) { size_type destination_row = threadIdx.x + blockIdx.x * blockDim.x; auto active_threads = __ballot_sync(0xffffffff, destination_row < num_destination_rows); while (destination_row < num_destination_rows) { auto const [src_side, src_row] = merged_indices[destination_row]; bool const from_left{src_side == side::LEFT}; bool source_bit_is_valid{true}; if (left_have_valids && from_left) { source_bit_is_valid = left_dcol.is_valid_nocheck(src_row); } else if (right_have_valids && !from_left) { source_bit_is_valid = right_dcol.is_valid_nocheck(src_row); } // Use ballot to find all valid bits in this warp and create the output // bitmask element bitmask_type const result_mask{__ballot_sync(active_threads, source_bit_is_valid)}; // Only one thread writes output if (0 == threadIdx.x % warpSize) { out_validity[word_index(destination_row)] = result_mask; } destination_row += blockDim.x * gridDim.x; active_threads = __ballot_sync(active_threads, destination_row < num_destination_rows); } } void materialize_bitmask(column_view const& left_col, column_view const& right_col, bitmask_type* out_validity, size_type num_elements, index_type const* merged_indices, rmm::cuda_stream_view stream) { constexpr size_type BLOCK_SIZE{256}; detail::grid_1d grid_config{num_elements, BLOCK_SIZE}; auto p_left_dcol = column_device_view::create(left_col, stream); auto p_right_dcol = column_device_view::create(right_col, stream); auto left_valid = *p_left_dcol; auto right_valid = *p_right_dcol; if (left_col.has_nulls()) { if (right_col.has_nulls()) { hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<true, true>) , dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream.value(), left_valid, right_valid, out_validity, num_elements, merged_indices); } else { hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<true, false>) , dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream.value(), left_valid, right_valid, out_validity, num_elements, merged_indices); } } else { if (right_col.has_nulls()) { hipLaunchKernelGGL(( materialize_merged_bitmask_kernel<false, true>) , dim3(grid_config.num_blocks), dim3(grid_config.num_threads_per_block), 0, stream.value(), left_valid, right_valid, out_validity, num_elements, merged_indices); } else { CUDF_FAIL("materialize_merged_bitmask_kernel<false, false>() should never be called."); } } CUDF_CHECK_CUDA(stream.value()); } struct side_index_generator { side _side; __device__ index_type operator()(size_type i) const noexcept { return index_type{_side, i}; } }; /** * @brief Generates the row indices and source side (left or right) in accordance with the index * columns. * * * @tparam index_type Indicates the type to be used to collect index and side information; * @param[in] left_table The left table_view to be merged * @param[in] right_table The right table_view to be merged * @param[in] column_order Sort order types of index columns * @param[in] null_precedence Array indicating the order of nulls with respect to non-nulls for the * index columns * @param[in] nullable Flag indicating if at least one of the table_view arguments has nulls * (defaults to true) * @param[in] stream CUDA stream used for device memory operations and kernel launches. * * @return A device_uvector of merged indices */ index_vector generate_merged_indices(table_view const& left_table, table_view const& right_table, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, bool nullable = true, rmm::cuda_stream_view stream = cudf::default_stream_value) { const size_type left_size = left_table.num_rows(); const size_type right_size = right_table.num_rows(); const size_type total_size = left_size + right_size; auto left_gen = side_index_generator{side::LEFT}; auto right_gen = side_index_generator{side::RIGHT}; auto left_begin = cudf::detail::make_counting_transform_iterator(0, left_gen); auto right_begin = cudf::detail::make_counting_transform_iterator(0, right_gen); index_vector merged_indices(total_size, stream); auto lhs_device_view = table_device_view::create(left_table, stream); auto rhs_device_view = table_device_view::create(right_table, stream); auto d_column_order = cudf::detail::make_device_uvector_async(column_order, stream); if (nullable) { auto d_null_precedence = cudf::detail::make_device_uvector_async(null_precedence, stream); auto ineq_op = detail::row_lexicographic_tagged_comparator<true>( *lhs_device_view, *rhs_device_view, d_column_order.data(), d_null_precedence.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } else { auto ineq_op = detail::row_lexicographic_tagged_comparator<false>( *lhs_device_view, *rhs_device_view, d_column_order.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } CUDF_CHECK_CUDA(stream.value()); return merged_indices; } /** * @brief Generate merged column given row-order of merged tables * (ordered according to indices of key_cols) and the 2 columns to merge. */ struct column_merger { explicit column_merger(index_vector const& row_order) : row_order_(row_order) {} template <typename Element, CUDF_ENABLE_IF(not is_rep_layout_compatible<Element>())> std::unique_ptr<column> operator()(column_view const&, column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("Unsupported type for merge."); } // column merger operator; // template <typename Element> std::enable_if_t<is_rep_layout_compatible<Element>(), std::unique_ptr<column>> operator()( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const { auto lsz = lcol.size(); auto merged_size = lsz + rcol.size(); auto merged_col = cudf::detail::allocate_like(lcol.has_nulls() ? lcol : rcol, merged_size, cudf::mask_allocation_policy::RETAIN, stream, mr); //"gather" data from lcol, rcol according to row_order_ "map" //(directly calling gather() won't work because // lcol, rcol indices overlap!) // cudf::mutable_column_view merged_view = merged_col->mutable_view(); // initialize null_mask to all valid: // // Note: this initialization in conjunction with // _conditionally_ calling materialize_bitmask() below covers // the case materialize_merged_bitmask_kernel<false, false>() // which won't be called anymore (because of the _condition_ // below) // cudf::detail::set_null_mask(merged_view.null_mask(), 0, merged_view.size(), true, stream); // set the null count: // merged_col->set_null_count(lcol.null_count() + rcol.null_count()); // to resolve view.data()'s types use: Element // auto const d_lcol = lcol.data<Element>(); auto const d_rcol = rcol.data<Element>(); // capture lcol, rcol // and "gather" into merged_view.data()[indx_merged] // from lcol or rcol, depending on side; // thrust::transform(rmm::exec_policy(stream), row_order_.begin(), row_order_.end(), merged_view.begin<Element>(), [d_lcol, d_rcol] __device__(index_type const& index_pair) { auto const [side, index] = index_pair; return side == side::LEFT ? d_lcol[index] : d_rcol[index]; }); // CAVEAT: conditional call below is erroneous without // set_null_mask() call (see TODO above): // if (lcol.has_nulls() || rcol.has_nulls()) { // resolve null mask: // materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return merged_col; } private: index_vector const& row_order_; }; // specialization for strings template <> std::unique_ptr<column> column_merger::operator()<cudf::string_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto column = strings::detail::merge<index_type>(strings_column_view(lcol), strings_column_view(rcol), row_order_.begin(), row_order_.end(), stream, mr); if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = column->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return column; } // specialization for dictionary template <> std::unique_ptr<column> column_merger::operator()<cudf::dictionary32>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto result = cudf::dictionary::detail::merge( cudf::dictionary_column_view(lcol), cudf::dictionary_column_view(rcol), row_order_, stream, mr); // set the validity mask if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = result->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return result; } // specialization for structs template <> std::unique_ptr<column> column_merger::operator()<cudf::struct_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { // merge each child. auto const lhs = structs_column_view{lcol}; auto const rhs = structs_column_view{rcol}; auto it = cudf::detail::make_counting_transform_iterator( 0, [&, merger = column_merger{row_order_}](size_type i) { return cudf::type_dispatcher<dispatch_storage_type>( lhs.child(i).type(), merger, lhs.get_sliced_child(i), rhs.get_sliced_child(i), stream, mr); }); auto merged_children = std::vector<std::unique_ptr<column>>(it, it + lhs.num_children()); auto const merged_size = lcol.size() + rcol.size(); // materialize the output buffer rmm::device_buffer validity = lcol.has_nulls() || rcol.has_nulls() ? create_null_mask(merged_size, mask_state::UNINITIALIZED, stream, mr) : rmm::device_buffer{}; if (lcol.has_nulls() || rcol.has_nulls()) { materialize_bitmask(lcol, rcol, static_cast<bitmask_type*>(validity.data()), merged_size, row_order_.data(), stream); } return make_structs_column(merged_size, std::move(merged_children), lcol.null_count() + rcol.null_count(), std::move(validity), stream, mr); } using table_ptr_type = std::unique_ptr<cudf::table>; table_ptr_type merge(cudf::table_view const& left_table, cudf::table_view const& right_table, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // collect index columns for lhs, rhs, resp. // cudf::table_view index_left_view{left_table.select(key_cols)}; cudf::table_view index_right_view{right_table.select(key_cols)}; bool const nullable = cudf::has_nulls(index_left_view) || cudf::has_nulls(index_right_view); // extract merged row order according to indices: // auto const merged_indices = generate_merged_indices( index_left_view, index_right_view, column_order, null_precedence, nullable); // create merged table: // auto const n_cols = left_table.num_columns(); std::vector<std::unique_ptr<column>> merged_cols; merged_cols.reserve(n_cols); column_merger merger{merged_indices}; transform(left_table.begin(), left_table.end(), right_table.begin(), std::back_inserter(merged_cols), [&](auto const& left_col, auto const& right_col) { return cudf::type_dispatcher<dispatch_storage_type>( left_col.type(), merger, left_col, right_col, stream, mr); }); return std::make_unique<cudf::table>(std::move(merged_cols)); } struct merge_queue_item { table_view view; table_ptr_type table; // Priority is a separate member to ensure that moving from an object // does not change its priority (which would ruin the queue invariant) cudf::size_type priority = 0; merge_queue_item(table_view const& view, table_ptr_type&& table) : view{view}, table{std::move(table)}, priority{-view.num_rows()} { } bool operator<(merge_queue_item const& other) const { return priority < other.priority; } }; // Helper function to ensure that moving out of the priority_queue is "atomic" template <typename T> T top_and_pop(std::priority_queue<T>& q) { auto moved = std::move(const_cast<T&>(q.top())); q.pop(); return moved; } } // anonymous namespace table_ptr_type merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (tables_to_merge.empty()) { return std::make_unique<cudf::table>(); } auto const& first_table = tables_to_merge.front(); auto const n_cols = first_table.num_columns(); CUDF_EXPECTS(std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [n_cols](auto const& tbl) { return n_cols == tbl.num_columns(); }), "Mismatched number of columns"); CUDF_EXPECTS( std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [&](auto const& tbl) { return cudf::have_same_types(first_table, tbl); }), "Mismatched column types"); CUDF_EXPECTS(!key_cols.empty(), "Empty key_cols"); CUDF_EXPECTS(key_cols.size() <= static_cast<size_t>(n_cols), "Too many values in key_cols"); CUDF_EXPECTS(key_cols.size() == column_order.size(), "Mismatched size between key_cols and column_order"); // This utility will ensure all corresponding dictionary columns have matching keys. // It will return any new dictionary columns created as well as updated table_views. auto matched = cudf::dictionary::detail::match_dictionaries( tables_to_merge, stream, rmm::mr::get_current_device_resource()); auto merge_tables = matched.second; // A queue of (table view, table) pairs std::priority_queue<merge_queue_item> merge_queue; // The table pointer is null if we do not own the table (input tables) std::for_each(merge_tables.begin(), merge_tables.end(), [&](auto const& table) { if (table.num_rows() > 0) merge_queue.emplace(table, table_ptr_type()); }); // If there is only one non-empty table_view, return its copy if (merge_queue.size() == 1) { return std::make_unique<cudf::table>(merge_queue.top().view, stream, mr); } // No inputs have rows, return a table with same columns as the first one if (merge_queue.empty()) { return empty_like(first_table); } // Pick the two smallest tables and merge them // Until there is only one table left in the queue while (merge_queue.size() > 1) { // To delete the intermediate table at the end of the block auto const left_table = top_and_pop(merge_queue); // Deallocated at the end of the block auto const right_table = top_and_pop(merge_queue); // Only use mr for the output table auto const& new_tbl_mr = merge_queue.empty() ? mr : rmm::mr::get_current_device_resource(); auto merged_table = merge(left_table.view, right_table.view, key_cols, column_order, null_precedence, stream, new_tbl_mr); auto const merged_table_view = merged_table->view(); merge_queue.emplace(merged_table_view, std::move(merged_table)); } return std::move(top_and_pop(merge_queue).table); } } // namespace detail std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::merge( tables_to_merge, key_cols, column_order, null_precedence, cudf::default_stream_value, mr); } } // namespace cudf
4bf484cbebd0efe2292548b0b0e05a4bf4ce4011.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/merge.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/dictionary/detail/merge.hpp> #include <cudf/dictionary/detail/update_keys.hpp> #include <cudf/strings/detail/merge.cuh> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/merge.h> #include <thrust/pair.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <queue> #include <vector> namespace cudf { namespace detail { namespace { using detail::side; using index_type = detail::index_type; /** * @brief Merges the bits of two validity bitmasks. * * Merges the bits from two column_device_views into the destination validity buffer * according to `merged_indices` map such that bit `i` in `out_validity` * will be equal to bit `thrust::get<1>(merged_indices[i])` from `left_dcol` * if `thrust::get<0>(merged_indices[i])` equals `side::LEFT`; otherwise, * from `right_dcol`. * * `left_dcol` and `right_dcol` must not overlap. * * @tparam left_have_valids Indicates whether left_dcol mask is unallocated (hence, ALL_VALID) * @tparam right_have_valids Indicates whether right_dcol mask is unallocated (hence ALL_VALID) * @param[in] left_dcol The left column_device_view whose bits will be merged * @param[in] right_dcol The right column_device_view whose bits will be merged * @param[out] out_validity The output validity buffer after merging the left and right buffers * @param[in] num_destination_rows The number of rows in the out_validity buffer * @param[in] merged_indices The map that indicates the source of the input and index * to be copied to the output. Length must be equal to `num_destination_rows` */ template <bool left_have_valids, bool right_have_valids> __global__ void materialize_merged_bitmask_kernel( column_device_view left_dcol, column_device_view right_dcol, bitmask_type* out_validity, size_type const num_destination_rows, index_type const* const __restrict__ merged_indices) { size_type destination_row = threadIdx.x + blockIdx.x * blockDim.x; auto active_threads = __ballot_sync(0xffffffff, destination_row < num_destination_rows); while (destination_row < num_destination_rows) { auto const [src_side, src_row] = merged_indices[destination_row]; bool const from_left{src_side == side::LEFT}; bool source_bit_is_valid{true}; if (left_have_valids && from_left) { source_bit_is_valid = left_dcol.is_valid_nocheck(src_row); } else if (right_have_valids && !from_left) { source_bit_is_valid = right_dcol.is_valid_nocheck(src_row); } // Use ballot to find all valid bits in this warp and create the output // bitmask element bitmask_type const result_mask{__ballot_sync(active_threads, source_bit_is_valid)}; // Only one thread writes output if (0 == threadIdx.x % warpSize) { out_validity[word_index(destination_row)] = result_mask; } destination_row += blockDim.x * gridDim.x; active_threads = __ballot_sync(active_threads, destination_row < num_destination_rows); } } void materialize_bitmask(column_view const& left_col, column_view const& right_col, bitmask_type* out_validity, size_type num_elements, index_type const* merged_indices, rmm::cuda_stream_view stream) { constexpr size_type BLOCK_SIZE{256}; detail::grid_1d grid_config{num_elements, BLOCK_SIZE}; auto p_left_dcol = column_device_view::create(left_col, stream); auto p_right_dcol = column_device_view::create(right_col, stream); auto left_valid = *p_left_dcol; auto right_valid = *p_right_dcol; if (left_col.has_nulls()) { if (right_col.has_nulls()) { materialize_merged_bitmask_kernel<true, true> <<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>( left_valid, right_valid, out_validity, num_elements, merged_indices); } else { materialize_merged_bitmask_kernel<true, false> <<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>( left_valid, right_valid, out_validity, num_elements, merged_indices); } } else { if (right_col.has_nulls()) { materialize_merged_bitmask_kernel<false, true> <<<grid_config.num_blocks, grid_config.num_threads_per_block, 0, stream.value()>>>( left_valid, right_valid, out_validity, num_elements, merged_indices); } else { CUDF_FAIL("materialize_merged_bitmask_kernel<false, false>() should never be called."); } } CUDF_CHECK_CUDA(stream.value()); } struct side_index_generator { side _side; __device__ index_type operator()(size_type i) const noexcept { return index_type{_side, i}; } }; /** * @brief Generates the row indices and source side (left or right) in accordance with the index * columns. * * * @tparam index_type Indicates the type to be used to collect index and side information; * @param[in] left_table The left table_view to be merged * @param[in] right_table The right table_view to be merged * @param[in] column_order Sort order types of index columns * @param[in] null_precedence Array indicating the order of nulls with respect to non-nulls for the * index columns * @param[in] nullable Flag indicating if at least one of the table_view arguments has nulls * (defaults to true) * @param[in] stream CUDA stream used for device memory operations and kernel launches. * * @return A device_uvector of merged indices */ index_vector generate_merged_indices(table_view const& left_table, table_view const& right_table, std::vector<order> const& column_order, std::vector<null_order> const& null_precedence, bool nullable = true, rmm::cuda_stream_view stream = cudf::default_stream_value) { const size_type left_size = left_table.num_rows(); const size_type right_size = right_table.num_rows(); const size_type total_size = left_size + right_size; auto left_gen = side_index_generator{side::LEFT}; auto right_gen = side_index_generator{side::RIGHT}; auto left_begin = cudf::detail::make_counting_transform_iterator(0, left_gen); auto right_begin = cudf::detail::make_counting_transform_iterator(0, right_gen); index_vector merged_indices(total_size, stream); auto lhs_device_view = table_device_view::create(left_table, stream); auto rhs_device_view = table_device_view::create(right_table, stream); auto d_column_order = cudf::detail::make_device_uvector_async(column_order, stream); if (nullable) { auto d_null_precedence = cudf::detail::make_device_uvector_async(null_precedence, stream); auto ineq_op = detail::row_lexicographic_tagged_comparator<true>( *lhs_device_view, *rhs_device_view, d_column_order.data(), d_null_precedence.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } else { auto ineq_op = detail::row_lexicographic_tagged_comparator<false>( *lhs_device_view, *rhs_device_view, d_column_order.data()); thrust::merge(rmm::exec_policy(stream), left_begin, left_begin + left_size, right_begin, right_begin + right_size, merged_indices.begin(), ineq_op); } CUDF_CHECK_CUDA(stream.value()); return merged_indices; } /** * @brief Generate merged column given row-order of merged tables * (ordered according to indices of key_cols) and the 2 columns to merge. */ struct column_merger { explicit column_merger(index_vector const& row_order) : row_order_(row_order) {} template <typename Element, CUDF_ENABLE_IF(not is_rep_layout_compatible<Element>())> std::unique_ptr<column> operator()(column_view const&, column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("Unsupported type for merge."); } // column merger operator; // template <typename Element> std::enable_if_t<is_rep_layout_compatible<Element>(), std::unique_ptr<column>> operator()( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource()) const { auto lsz = lcol.size(); auto merged_size = lsz + rcol.size(); auto merged_col = cudf::detail::allocate_like(lcol.has_nulls() ? lcol : rcol, merged_size, cudf::mask_allocation_policy::RETAIN, stream, mr); //"gather" data from lcol, rcol according to row_order_ "map" //(directly calling gather() won't work because // lcol, rcol indices overlap!) // cudf::mutable_column_view merged_view = merged_col->mutable_view(); // initialize null_mask to all valid: // // Note: this initialization in conjunction with // _conditionally_ calling materialize_bitmask() below covers // the case materialize_merged_bitmask_kernel<false, false>() // which won't be called anymore (because of the _condition_ // below) // cudf::detail::set_null_mask(merged_view.null_mask(), 0, merged_view.size(), true, stream); // set the null count: // merged_col->set_null_count(lcol.null_count() + rcol.null_count()); // to resolve view.data()'s types use: Element // auto const d_lcol = lcol.data<Element>(); auto const d_rcol = rcol.data<Element>(); // capture lcol, rcol // and "gather" into merged_view.data()[indx_merged] // from lcol or rcol, depending on side; // thrust::transform(rmm::exec_policy(stream), row_order_.begin(), row_order_.end(), merged_view.begin<Element>(), [d_lcol, d_rcol] __device__(index_type const& index_pair) { auto const [side, index] = index_pair; return side == side::LEFT ? d_lcol[index] : d_rcol[index]; }); // CAVEAT: conditional call below is erroneous without // set_null_mask() call (see TODO above): // if (lcol.has_nulls() || rcol.has_nulls()) { // resolve null mask: // materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return merged_col; } private: index_vector const& row_order_; }; // specialization for strings template <> std::unique_ptr<column> column_merger::operator()<cudf::string_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto column = strings::detail::merge<index_type>(strings_column_view(lcol), strings_column_view(rcol), row_order_.begin(), row_order_.end(), stream, mr); if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = column->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return column; } // specialization for dictionary template <> std::unique_ptr<column> column_merger::operator()<cudf::dictionary32>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { auto result = cudf::dictionary::detail::merge( cudf::dictionary_column_view(lcol), cudf::dictionary_column_view(rcol), row_order_, stream, mr); // set the validity mask if (lcol.has_nulls() || rcol.has_nulls()) { auto merged_view = result->mutable_view(); materialize_bitmask( lcol, rcol, merged_view.null_mask(), merged_view.size(), row_order_.data(), stream); } return result; } // specialization for structs template <> std::unique_ptr<column> column_merger::operator()<cudf::struct_view>( column_view const& lcol, column_view const& rcol, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { // merge each child. auto const lhs = structs_column_view{lcol}; auto const rhs = structs_column_view{rcol}; auto it = cudf::detail::make_counting_transform_iterator( 0, [&, merger = column_merger{row_order_}](size_type i) { return cudf::type_dispatcher<dispatch_storage_type>( lhs.child(i).type(), merger, lhs.get_sliced_child(i), rhs.get_sliced_child(i), stream, mr); }); auto merged_children = std::vector<std::unique_ptr<column>>(it, it + lhs.num_children()); auto const merged_size = lcol.size() + rcol.size(); // materialize the output buffer rmm::device_buffer validity = lcol.has_nulls() || rcol.has_nulls() ? create_null_mask(merged_size, mask_state::UNINITIALIZED, stream, mr) : rmm::device_buffer{}; if (lcol.has_nulls() || rcol.has_nulls()) { materialize_bitmask(lcol, rcol, static_cast<bitmask_type*>(validity.data()), merged_size, row_order_.data(), stream); } return make_structs_column(merged_size, std::move(merged_children), lcol.null_count() + rcol.null_count(), std::move(validity), stream, mr); } using table_ptr_type = std::unique_ptr<cudf::table>; table_ptr_type merge(cudf::table_view const& left_table, cudf::table_view const& right_table, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // collect index columns for lhs, rhs, resp. // cudf::table_view index_left_view{left_table.select(key_cols)}; cudf::table_view index_right_view{right_table.select(key_cols)}; bool const nullable = cudf::has_nulls(index_left_view) || cudf::has_nulls(index_right_view); // extract merged row order according to indices: // auto const merged_indices = generate_merged_indices( index_left_view, index_right_view, column_order, null_precedence, nullable); // create merged table: // auto const n_cols = left_table.num_columns(); std::vector<std::unique_ptr<column>> merged_cols; merged_cols.reserve(n_cols); column_merger merger{merged_indices}; transform(left_table.begin(), left_table.end(), right_table.begin(), std::back_inserter(merged_cols), [&](auto const& left_col, auto const& right_col) { return cudf::type_dispatcher<dispatch_storage_type>( left_col.type(), merger, left_col, right_col, stream, mr); }); return std::make_unique<cudf::table>(std::move(merged_cols)); } struct merge_queue_item { table_view view; table_ptr_type table; // Priority is a separate member to ensure that moving from an object // does not change its priority (which would ruin the queue invariant) cudf::size_type priority = 0; merge_queue_item(table_view const& view, table_ptr_type&& table) : view{view}, table{std::move(table)}, priority{-view.num_rows()} { } bool operator<(merge_queue_item const& other) const { return priority < other.priority; } }; // Helper function to ensure that moving out of the priority_queue is "atomic" template <typename T> T top_and_pop(std::priority_queue<T>& q) { auto moved = std::move(const_cast<T&>(q.top())); q.pop(); return moved; } } // anonymous namespace table_ptr_type merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (tables_to_merge.empty()) { return std::make_unique<cudf::table>(); } auto const& first_table = tables_to_merge.front(); auto const n_cols = first_table.num_columns(); CUDF_EXPECTS(std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [n_cols](auto const& tbl) { return n_cols == tbl.num_columns(); }), "Mismatched number of columns"); CUDF_EXPECTS( std::all_of(tables_to_merge.cbegin(), tables_to_merge.cend(), [&](auto const& tbl) { return cudf::have_same_types(first_table, tbl); }), "Mismatched column types"); CUDF_EXPECTS(!key_cols.empty(), "Empty key_cols"); CUDF_EXPECTS(key_cols.size() <= static_cast<size_t>(n_cols), "Too many values in key_cols"); CUDF_EXPECTS(key_cols.size() == column_order.size(), "Mismatched size between key_cols and column_order"); // This utility will ensure all corresponding dictionary columns have matching keys. // It will return any new dictionary columns created as well as updated table_views. auto matched = cudf::dictionary::detail::match_dictionaries( tables_to_merge, stream, rmm::mr::get_current_device_resource()); auto merge_tables = matched.second; // A queue of (table view, table) pairs std::priority_queue<merge_queue_item> merge_queue; // The table pointer is null if we do not own the table (input tables) std::for_each(merge_tables.begin(), merge_tables.end(), [&](auto const& table) { if (table.num_rows() > 0) merge_queue.emplace(table, table_ptr_type()); }); // If there is only one non-empty table_view, return its copy if (merge_queue.size() == 1) { return std::make_unique<cudf::table>(merge_queue.top().view, stream, mr); } // No inputs have rows, return a table with same columns as the first one if (merge_queue.empty()) { return empty_like(first_table); } // Pick the two smallest tables and merge them // Until there is only one table left in the queue while (merge_queue.size() > 1) { // To delete the intermediate table at the end of the block auto const left_table = top_and_pop(merge_queue); // Deallocated at the end of the block auto const right_table = top_and_pop(merge_queue); // Only use mr for the output table auto const& new_tbl_mr = merge_queue.empty() ? mr : rmm::mr::get_current_device_resource(); auto merged_table = merge(left_table.view, right_table.view, key_cols, column_order, null_precedence, stream, new_tbl_mr); auto const merged_table_view = merged_table->view(); merge_queue.emplace(merged_table_view, std::move(merged_table)); } return std::move(top_and_pop(merge_queue).table); } } // namespace detail std::unique_ptr<cudf::table> merge(std::vector<table_view> const& tables_to_merge, std::vector<cudf::size_type> const& key_cols, std::vector<cudf::order> const& column_order, std::vector<cudf::null_order> const& null_precedence, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::merge( tables_to_merge, key_cols, column_order, null_precedence, cudf::default_stream_value, mr); } } // namespace cudf
32909aa554319982fd658f87b0f07968f459bd61.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <math.h> #include <iostream> __global__ void multiplyBy2(unsigned int *data, unsigned int n) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid<n) { data[tid] = 2 * data[tid]; } } int main() { thrust::host_vector<unsigned int> h_tab; thrust::device_vector<unsigned int> d_tab; for (int i = 1; i <= 10; i++) { h_tab.push_back(i);//dane wejsciowe } d_tab = h_tab; //Kopiowanie host->device multiplyBy2 << <1, 10 >> >(d_tab.data().get(), d_tab.size()); h_tab = d_tab; //Kopiowanie device->host for (int i = 0; i < 10; i++) { std::cout << h_tab[i] << "\n"; } return 0; }
32909aa554319982fd658f87b0f07968f459bd61.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <math.h> #include <iostream> __global__ void multiplyBy2(unsigned int *data, unsigned int n) { unsigned int tid = threadIdx.x + blockIdx.x*blockDim.x; if (tid<n) { data[tid] = 2 * data[tid]; } } int main() { thrust::host_vector<unsigned int> h_tab; thrust::device_vector<unsigned int> d_tab; for (int i = 1; i <= 10; i++) { h_tab.push_back(i);//dane wejsciowe } d_tab = h_tab; //Kopiowanie host->device multiplyBy2 << <1, 10 >> >(d_tab.data().get(), d_tab.size()); h_tab = d_tab; //Kopiowanie device->host for (int i = 0; i < 10; i++) { std::cout << h_tab[i] << "\n"; } return 0; }
25f75d1c7760f0af39f58c271b9e95694488008c.hip
// !!! This is a file automatically generated by hipify!!! #include "../NativeOps.h" #include <hip/hip_runtime.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include <loops/reduce3.h> #include <loops/reduce.h> #include <loops/indexreduce.h> #include <loops/pairwise_transform.h> #include <loops/transform.h> #include <loops/scalar.h> #include <loops/broadcasting.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> #include <thread> #include <map> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/grid.h> #include <loops/aggregates.h> //#include <sys/time.h> #include <hiprand/hiprand.h> hipDeviceProp_t *deviceProperties; hipFuncAttributes *funcAttributes = new hipFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(hipStream_t stream, hipError_t status, void *data){ SyncInfo *sync = (SyncInfo *) data; printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jIndex)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jIndex n,hipFuncAttributes attributes, hipDeviceProp_t properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, hipFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, hipFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (debug && verbose) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (debug && verbose) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (debug && verbose) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (debug && verbose) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, hipFuncAttributes funcAttr) { int xRank = shape::rank(xShapeInfo); int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo); int zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); int xLength = shape::length(xShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (debug && verbose) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param xShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, hipFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { int tadLength = 0; int numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; if (tadLength == 1) { if (debug && verbose) printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768); numTads = shape::length(xShapeInfo) / tadLength; } int xRank = shape::rank(xShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if ((debug && verbose ) ) { //|| launchDims.x == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, hipFuncAttributes attributes, hipDeviceProp_t properties) { int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (debug && verbose) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<int> * createScalarBuffer(hipStream_t stream) { int *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<int> *scalarDimension; nd4j::buffer::Buffer<int> *scalarShapeInfo; std::thread::id threadId; public: ScalarShapeInformation(hipStream_t stream) { int *scalarDimensionBuff = (int *) malloc(sizeof(int)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } int *getShapeInfoHostPointer() { return scalarShapeInfo->data; } int * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } int * getDimensionHostPointer() { return scalarDimension->data; } int * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; hipStream_t streamRef; public: ScalarInfo(hipStream_t stream) { T *scalarResult = (T*)malloc(sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ int *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the result pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ int *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D1 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 2); hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); checkCudaErrors(hipStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D2 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 2); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); hipLaunchKernelGGL(( indexReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *deviceTADOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("D3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, double, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *y, int yStride, double *result, int resultStride, double *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("D4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[25]); hipLaunchKernelGGL(( pairWiseTransformStridedDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("D5 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); hipLaunchKernelGGL(( pairWiseTransformDoubleIndex) , dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D6 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[23]); hipLaunchKernelGGL(( pairWiseTransformDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D7 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D8 opNum:[%i]\n", opNum); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); /** * We have separate kernels, optimized for different number of dimensions for reductions */ if (dimensionLength == 1) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ double NativeOps::execReduceScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("D9 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("D10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ double NativeOps::execReduce3ScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo){ if (debug && verbose) printf("D11 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); hipLaunchKernelGGL(( reduce3ScalarDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // since this method should return scalar value - we should block on this call checkCudaErrors(hipStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D12 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); hipLaunchKernelGGL(( reduce3Double), dim3(1),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int xStride, double *result, int resultStride, double scalar, double *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D13 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleStrided, double, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("D14 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams, Nd4jIndex n, int *xIndexes, int *resultIndexes){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("D15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]); hipLaunchKernelGGL(( scalarDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execSummaryStatsScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D16 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // this is blocking method since method should return scalar checkCudaErrors(hipStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo,bool biasCorrected) { if (debug && verbose) printf("D17 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8); // we have to limit grid size here, due to limited nature of reduction/allocation pointers launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D18 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], dimensionLength, sizeof(double), 8); // we're limiting maximum grid size for summaryStats ops launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceDouble), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *z, int zStride, double *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("D19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, double, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D20 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; // special pointer for special buffer for special ops double *specialPointer = reinterpret_cast<double *>(extraPointers[6]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4); /** * ops between 38 and 41 are special ops: * SoftMax, LogSoftMax, SoftMaxDerivative, IsMax * On cuda we execute them as */ // simple trick to get workaround over reductions into scalar if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block /* * For vector cases of everything, but IsMax (41) we go for single-kernel calls */ int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(256, length); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials // we'll do some pointers mangling here, and execute kernels one by one int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// SoftMax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // TODO: we could get rid of this one eventually hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream, dimension, maxDimension, maxShapeBuffer, shape[0]); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); // exp 3 execTransformDouble(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceDouble(tempPointers, 1, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastDouble(tempPointers, 3, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); // log 3 if (opNum == 40) execTransformDouble(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams); else if (opNum == 39) execTransformDouble(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; hipLaunchKernelGGL(( fillIsMaxDouble), dim3(1), dim3(128), 0, *stream , result, shape::length(hostXShapeInfo), targetIdx); } else { int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<double *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute filler hipLaunchKernelGGL(( fillDimensionalIsMaxDouble), dim3(blockLimit), dim3(64), funcAttributes[37].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(hipStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformDouble\n"); break; } } } } else { // for Im2Col & Col2Im we enforce higher dimensionality // TODO: investigate this on high-end gpus if (opNum == 37 || opNum == 36) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 768; } // Histogram op requires additional memory chunk // FIXME: make this one to use cache if (opNum == 48) { int length = shape::length(hostZShapeInfo); hipMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(double)); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (debug || opNum == 48) checkCudaErrors(hipStreamSynchronize(*stream)); // release Histogram memory if (opNum == 48) { hipFree((void *)maskedAllocPointer); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *resultIndexes) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("D21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]); hipLaunchKernelGGL(( transformDoubleIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execIndexReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ if (debug && verbose) printf("F1 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 2); if (debug && verbose && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // once again - since we return scalar value in this method, we should block this kernel launch checkCudaErrors(hipStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execIndexReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ if (debug && verbose) printf("H1 opNum:[%i]\n", opNum); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 2); if (debug && verbose && launchDims.x == 1) printf("AH1 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking for scalar output checkCudaErrors(hipStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 2); if (verbose && launchDims.x == 1) printf("AF2 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceFloat), dim3(launchDims.x), dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execIndexReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 2); if (verbose && launchDims.x == 1) printf("AH2 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( indexReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ /* hipEvent_t start; hipEventCreateWithFlags(&start, hipEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *deviceTADOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) /* SyncInfo *info = new SyncInfo(); info->streamId = 32; info->callId = 1234567890; timespec ts1; timespec ts2; clock_gettime(CLOCK_REALTIME, &ts1); */ /* broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), y, yShapeInfo, shape::rank(hostYShapeInfo), result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ); */ /* clock_gettime(CLOCK_REALTIME, &ts2); // hipEventRecord(start, 0); // hipStreamAddCallback(*stream, syncCallback, (void*)info, 0); */ if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); /* clock_gettime(CLOCK_REALTIME, &tsY); printf("Execution time: %i\n", (ts2.tv_nsec - ts1.tv_nsec)); printf("Overall time: %i\n", (tsY.tv_nsec - tsX.tv_nsec)); printf("Callback setup time: %i\n", (tsY.tv_nsec - ts2.tv_nsec)); printf("-------------------------------------\n"); */ } void NativeOps::execBroadcastHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *deviceTADOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float16, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *y, int yStride, float *result, int resultStride, float *extraParams, Nd4jIndex n){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]); if (verbose && launchDims.x == 1) printf("AF4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); hipLaunchKernelGGL(( pairWiseTransformStridedFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); if (debug) { checkCudaErrors(hipStreamSynchronize(*stream)); } } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *y, int yStride, float16 *result, int resultStride, float16 *extraParams, Nd4jIndex n){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]); if (verbose && launchDims.x == 1) printf("AH4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); hipLaunchKernelGGL(( pairWiseTransformStridedHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0); if (verbose && launchDims.x == 1) printf("AF5 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( pairWiseTransformFloatIndex), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float16), 0); if (verbose && launchDims.x == 1) printf("AH5 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( pairWiseTransformHalfIndex), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F6 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]); if (verbose && launchDims.x == 1) { printf("AF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y); shape::printShapeInfoLinear(hostXShapeInfo); } hipLaunchKernelGGL(( pairWiseTransformFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H6 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]); if (verbose && launchDims.x == 1) { printf("HF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y); shape::printShapeInfoLinear(hostXShapeInfo); } hipLaunchKernelGGL(( pairWiseTransformHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F7 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1); if (verbose && launchDims.x == 1) printf("AF7 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H7 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1); if (verbose && launchDims.x == 1) printf("AH7 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension,int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F8 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1); if (verbose && launchDims.x == 1) printf("AF8 opNum:[%i]\n", opNum); // we call different kernels optimized for different number of dimensions in TAD if (dimensionLength == 1) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension,int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H8 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1); if (verbose && launchDims.x == 1) printf("AH8 opNum:[%i]\n", opNum); // calling different kernels, depending on number of dimensions in TAD if (dimensionLength == 1) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ float NativeOps::execReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F9 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]); if (verbose && launchDims.x == 1) printf("AF9 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking this one checkCudaErrors(hipStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H9 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]); if (verbose && launchDims.x == 1) printf("AH9 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AF10 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AH10 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ float NativeOps::execReduce3ScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F11 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AF11 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduce3ScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H11 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AH11 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( reduce3ScalarHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z + 2048, *stream, opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AF12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AH12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int xStride, float *result, int resultStride, float scalar, float *extraParams, Nd4jIndex n){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); if (debug && verbose) printf("F13 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); if (verbose && launchDims.x == 1) printf("AF13 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleStrided, float, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int xStride, float16 *result, int resultStride, float scalar, float16 *extraParams, Nd4jIndex n){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); if (debug && verbose) printf("F13 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); if (verbose && launchDims.x == 1) printf("AF13 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); Nd4jIndex n = shape::length(hostXShapeInfo); if (debug && verbose) printf("F14 opNum:[%i]\n", opNum); //dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); if (verbose && launchDims.x == 1) printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *result, int *resultShapeInfo, float scalarF, float16 *extraParams){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); Nd4jIndex n = shape::length(hostXShapeInfo); if (debug && verbose) printf("H14 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); float16 scalar = (float16) scalarF; if (verbose && launchDims.x == 1) printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams, int *xIndexes, int *resultIndexes){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); if (debug && verbose) printf("F15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]); if (verbose && launchDims.x == 1) printf("AF15 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( scalarFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execSummaryStatsScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F16 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8); if (verbose && launchDims.x == 1) printf("AF16 opNum:[%i]\n", opNum); // we limit grid size for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execSummaryStatsScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H16 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float16), 8); if (verbose && launchDims.x == 1) printf("AH16 opNum:[%i]\n", opNum); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking call checkCudaErrors(hipStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F17 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8); if (verbose && launchDims.x == 1) printf("AF17 opNum:[%i]\n", opNum); // limiting number of blocks in grid, to match buffer memory size launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H17 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float16), 8); if (verbose && launchDims.x == 1) printf("AH17 opNum:[%i]\n", opNum); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F18 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(float), 8); if (verbose && launchDims.x == 1) printf("AF18 opNum:[%i]\n", opNum); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceFloat), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H18 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(float16), 8); if (verbose && launchDims.x == 1) printf("AH18 opNum:[%i]\n", opNum); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); hipLaunchKernelGGL(( summaryStatsReduceHalf), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *z, int zStride, float *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("F19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (verbose && launchDims.x == 1) printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *z, int zStride, float16 *extraParams, Nd4jIndex n) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("H19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (verbose && launchDims.x == 1) printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float16, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("F20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops float *specialPointer = reinterpret_cast<float *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4); int *maskedAllocPointer = allocPointer; dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (verbose && launchDims.x == 1) printf("AF20 opNum:[%i]\n", opNum); // simple trick to get workaround over reductions into scalar // that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); // exp 3 execTransformFloat(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceFloat(tempPointers, 1, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastFloat(tempPointers, 3, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformFloat(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams); else if (opNum == 39) execTransformFloat(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // if that's 1D input - we'll just go for single dim IMax op call + filler int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; hipLaunchKernelGGL(( fillIsMaxFloat), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx); checkCudaErrors(hipStreamSynchronize(*stream)); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute hipLaunchKernelGGL(( fillDimensionalIsMaxFloat), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(hipStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformFloat\n"); break; } } } } else { // we're enforcing larger grids for Col2Im & Im2Col // TODO: for high-end gpus we might use higher values here if (opNum == 37 || opNum == 36) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 384; } // histogram op requies additional memory chunk :( if (opNum == 48) { int length = shape::length(hostZShapeInfo); hipMalloc((void **) &maskedAllocPointer, length * launchDims.x * sizeof(float)); } DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (debug || opNum == 48) checkCudaErrors(hipStreamSynchronize(*stream)); // release memory chunk if (opNum == 48) { hipFree((void *) maskedAllocPointer); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("H20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float16 * special = (float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (verbose && launchDims.x == 1) printf("AH20 opNum:[%i]\n", opNum); // simple trick to get workaround over reductions into scalar // SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float16) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // FIXME: fix this hipLaunchKernelGGL(( prepareShapeBuffer) , dim3(1), dim3(1), 128, *stream , dimension, maxDimension, maxShapeBuffer, shape[0]); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); // exp 3 execTransformHalf(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceHalf(tempPointers, 1, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastHalf(tempPointers, 3, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (opNum == 40) { if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); execTransformHalf(tempPointers, 47, dx, xShapeInfo, dx, xShapeInfo, extraParams); } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformHalf(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams); else if (opNum == 39) execTransformHalf(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // 1D input, aka vector int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; hipLaunchKernelGGL(( fillIsMaxHalf), dim3(1), dim3(128), 1536, *stream , result, shape::length(hostXShapeInfo), targetIdx); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float16 *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute hipLaunchKernelGGL(( fillDimensionalIsMaxHalf), dim3(blockLimit), dim3(64), funcAttributes[36].sharedSizeBytes, *stream, special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(hipStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformHalf\n"); break; } } } } else { // Im2Col & Col2Im enforced grids if (opNum == 37 || opNum == 36) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 384; } // Histogram op requires additional memory chunk if (opNum == 48) { int length = shape::length(hostZShapeInfo); hipMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(float16)); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (debug || opNum == 48) checkCudaErrors(hipStreamSynchronize(*stream)); // release that histogram memory chunk if (opNum == 48) { hipFree((void *)maskedAllocPointer); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *resultIndexes) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("F21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (verbose && launchDims.x == 1) printf("AF21 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( transformFloatIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *resultIndexes) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("H21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (verbose && launchDims.x == 1) printf("AH21 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( transformHalfIndexes), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } template <typename T> __device__ void flattenKernelGeneric(int dOffset, char order, T *result, int *resultShapeInfo, T *input, int *inputShapeInfo, int *allocationPointer) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int *zShape = shape::shapeOf(resultShapeInfo); int *zStride = shape::stride(resultShapeInfo); int *yShape = shape::shapeOf(inputShapeInfo); int *yStride = shape::stride(inputShapeInfo); char yOrder = shape::order(inputShapeInfo); int len = shape::length(inputShapeInfo); int resultEWS = shape::elementWiseStride(resultShapeInfo); int inputEWS = shape::elementWiseStride(inputShapeInfo); if (yOrder == order) { if (resultEWS >= 1 && inputEWS >= 1) { for (int i = tid; i < len; i+= gridDim.x * blockDim.x) { result[i * resultEWS + dOffset] = input[i * inputEWS]; } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } } } extern "C" __global__ void flattenKernelDouble(int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<double>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelFloat(int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelHalf(int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float16>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenFloat( Nd4jPointer *extraPointers, int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (debug && verbose) printf("F22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (verbose && launchDims.x == 1) printf("AF222 opNum:[7]\n"); hipLaunchKernelGGL(( flattenKernelFloat), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::flattenHalf( Nd4jPointer *extraPointers, int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (debug && verbose) printf("H22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (verbose && launchDims.x == 1) printf("AH222 opNum:[7]\n"); hipLaunchKernelGGL(( flattenKernelHalf), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenDouble( Nd4jPointer *extraPointers, int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (debug && verbose) printf("D30 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]); hipLaunchKernelGGL(( flattenKernelDouble), dim3(launchDims.x),dim3(launchDims.y), launchDims.z, *stream, offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::checkP2P() { int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; hipSetDevice(x); hipDeviceCanAccessPeer(&canAccess, x , y); if (!canAccess) tempSupport = false; } } supportedP2P = tempSupport; hipSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; hipGetDevice(&curDevice); int devCnt = 0; hipGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; hipSetDevice(x); hipDeviceCanAccessPeer(&canAccess, x , y); if (canAccess) { if (enable) { hipDeviceEnablePeerAccess(y, 0); } else { hipDeviceDisablePeerAccess(y); } } else { if (verbose) printf("Peer access [%i] -> [%i] isn't possible\n", x, y); } } } hipSetDevice(curDevice); } allowedP2P = enable; hipSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; hipGetDeviceCount(&devCnt); deviceProperties = new hipDeviceProp_t[devCnt]; for (int i = 0; i < devCnt; i++) { hipSetDevice(i); hipGetDeviceProperties(&deviceProperties[i], i); hipDeviceSetLimit(hipLimitStackSize, 4096); } hipSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); hipFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes); //void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME hipFuncGetAttributes(&funcAttributes[1], transformFloatIndexes); //void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME hipFuncGetAttributes(&funcAttributes[2], transformFloatIndexes); hipFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat); hipFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes); // void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat; hipFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes); // void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat; hipFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes); hipFuncGetAttributes(&funcAttributes[7], reduce3Float); hipFuncGetAttributes(&funcAttributes[8], reduceSimpleGenericXD_0_float); // printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes); hipFuncGetAttributes(&funcAttributes[28], reduceSimpleGeneric1D_0_float); // 1D // printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes); hipFuncGetAttributes(&funcAttributes[29], reduceSimpleGeneric3D_0_float); // 6D // printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes); hipFuncGetAttributes(&funcAttributes[30], flattenKernelFloat); hipFuncGetAttributes(&funcAttributes[31], concatKernelFloat); hipFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat); hipFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex); hipFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat); hipFuncGetAttributes(&funcAttributes[12], broadcastSimple_0_float); hipFuncGetAttributes(&funcAttributes[13], indexReduceFloat); ///////////////////////////////////////// Doubles are separate, just in case of... hipFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes); // void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME hipFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes); //void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME hipFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes); hipFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble); hipFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes); //void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble; hipFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes); //void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble; hipFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes); hipFuncGetAttributes(&funcAttributes[21], reduce3Double); hipFuncGetAttributes(&funcAttributes[22], reduceSimpleGenericXD_0_double); hipFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble); hipFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex); hipFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble); hipFuncGetAttributes(&funcAttributes[26], broadcastSimple_0_double); hipFuncGetAttributes(&funcAttributes[27], indexReduceDouble); hipFuncGetAttributes(&funcAttributes[32], reduceSimpleGeneric1D_0_double); // 1D hipFuncGetAttributes(&funcAttributes[33], reduceSimpleGeneric3D_0_double); // 6D hipFuncGetAttributes(&funcAttributes[34], flattenKernelDouble); hipFuncGetAttributes(&funcAttributes[35], concatKernelDouble); hipFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat); hipFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble); hipFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat); hipFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble); hipFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat); hipFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble); hipFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat); hipFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble); ///////////////////////// hipFuncGetAttributes(&funcAttributes[44], averagingKernelHalf); hipFuncGetAttributes(&funcAttributes[45], averagingKernelFloat); hipFuncGetAttributes(&funcAttributes[46], averagingKernelDouble); // hipFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float); hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16); hipFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double); } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) { Nd4jPointer pointer; // hipHostMallocMapped |hipHostMallocPortable hipError_t res = hipHostMalloc((void **)&pointer, memorySize, hipHostMallocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; hipError_t res = hipMalloc((void **)&pointer, memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { hipError_t res = hipHostFree((void *) pointer); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { hipError_t res = hipFree((void *)pointer); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = 0; hipError_t result = hipStreamCreate((hipStream_t *) &nativeStream); checkCudaErrors(result); if (result != 0) return 0L; else return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= 0; hipError_t result = hipEventCreateWithFlags((hipEvent_t *) &nativeEvent, hipEventDisableTiming); checkCudaErrors(result); if (result != 0) return 0L; else return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t result = hipEventRecord(*pEvent, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); hipError_t result = hipSetDevice(deviceId); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jIndex) memFree; } Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; hipGetDevice(&orig); if (device >= 0 && device != orig) { hipSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; hipMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { hipSetDevice(orig); } return (Nd4jIndex) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; if (debug) checkCudaErrors(hipStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); if (result != 0) { checkCudaErrors(result); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result ); fflush(stdout); fflush(stderr); return 0L; } else return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipError_t result = hipMemset((void *) dst, value, (size_t) size); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipError_t result = hipMemsetAsync((void *) dst, value, (size_t) size, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t result = hipEventDestroy(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&stream); hipError_t result = hipStreamSynchronize(*pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { hipEvent_t *pEvent = reinterpret_cast<hipEvent_t *>(&event); hipError_t result = hipEventSynchronize(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; hipGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { debug = reallyEnable; } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { verbose = reallyEnable; } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatFloat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (debug && verbose) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelScalarFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (debug && verbose) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelVStackFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (debug && verbose) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelHStackFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (debug && verbose) printf("Going generic concat\n"); smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); int *devZOffsets = reinterpret_cast<int *>(extraPointers[11]); hipLaunchKernelGGL(( concatKernelFloat), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (debug && verbose) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::concatHalf( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (debug && verbose) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelScalarHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (debug && verbose) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelVStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (debug && verbose) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelHStackHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (debug && verbose) printf("Going generic concat\n"); smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); int *devZOffsets = reinterpret_cast<int *>(extraPointers[11]); hipLaunchKernelGGL(( concatKernelHalf), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (debug && verbose) printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(hipStreamSynchronize(*stream)); } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatDouble( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (debug && verbose) printf("Going scalar concat\n"); smem = funcAttributes[39].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelScalarDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (debug && verbose) printf("Going VStack concat\n"); smem = funcAttributes[41].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelVStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (debug && verbose) printf("Going HStack concat\n"); smem = funcAttributes[43].sharedSizeBytes; hipLaunchKernelGGL(( concatKernelHStackDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (debug && verbose) printf("Going generic concat\n"); smem = nd4j::math::nd4j_max<int>(funcAttributes[35].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); int *devZOffsets = reinterpret_cast<int *>(extraPointers[11]); hipLaunchKernelGGL(( concatKernelDouble), dim3(128), dim3(128), smem, *stream, dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (debug && verbose) printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(hipStreamSynchronize(*stream)); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(int *xShapeInfo, int *dimension, int dimensionLength, int *target, int *offsets) { shape::TAD *tad = new shape::TAD(); tad->init(xShapeInfo, dimension, dimensionLength); //tad->setOutputBuffer(target); tad->createTadOnlyShapeInfo(); tad->createOffsets(); std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int)); std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(int)); /* shape::printShapeInfoLinear(hostXShapeInfo); shape::printShapeInfoLinear(tad->tadOnlyShapeInfo); shape::printShapeInfoLinear(target); */ delete tad; } int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { hipStream_t *pStream = reinterpret_cast<hipStream_t *>(&reserved); hipMemcpyKind kind; if (debug) checkCudaErrors(hipStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = hipMemcpyHostToHost; } break; case 1: { kind = hipMemcpyHostToDevice; } break; case 2: { kind = hipMemcpyDeviceToHost; } case 3: { kind = hipMemcpyDeviceToDevice; } break; } //hipError_t result = hipMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); hipError_t result = hipMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream); checkCudaErrors(result); if (result != 0) { printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags ); return 0L; } else return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; hipError_t result = hipGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory); return dConstAddr; } void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, int *tadOffsets, int *zTadShapeInfo, int *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipLaunchKernelGGL(( pullRowsKernelHalf), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, float *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, int *tadOffsets, int *zTadShapeInfo, int *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipLaunchKernelGGL(( pullRowsKernelFloat), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, double *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, int *tadOffsets, int *zTadShapeInfo, int *zTadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipLaunchKernelGGL(( pullRowsKernelDouble), dim3(64), dim3(256), 1024, *stream, x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length, bool propagate) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); float16 **x = reinterpret_cast<float16 **>(dx); if (debug && verbose) printf("averageHalf called\n"); dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]); hipLaunchKernelGGL(( averagingKernelHalf), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length, bool propagate) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); float **x = reinterpret_cast<float **>(dx); if (debug && verbose) printf("averageFloat called\n"); dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]); hipLaunchKernelGGL(( averagingKernelFloat), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length, bool propagate) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); double **x = reinterpret_cast<double **>(dx); if (debug && verbose) printf("averageDouble called\n"); dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]); hipLaunchKernelGGL(( averagingKernelDouble), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, dz, n, length, propagate); checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); double **x = reinterpret_cast<double **>(dx); double **z = reinterpret_cast<double **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); int **tadOffset = reinterpret_cast<int **>(tadOffsets); hipLaunchKernelGGL(( shuffleKernelDouble), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); float **x = reinterpret_cast<float **>(dx); float **z = reinterpret_cast<float **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); int **tadOffset = reinterpret_cast<int **>(tadOffsets); hipLaunchKernelGGL(( shuffleKernelFloat), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); float16 **x = reinterpret_cast<float16 **>(dx); float16 **z = reinterpret_cast<float16 **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); int **tadOffset = reinterpret_cast<int **>(tadOffsets); hipLaunchKernelGGL(( shuffleKernelHalf), dim3(32), dim3(128), 1024, *stream, x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int xStride, float *dy, int yStride, float *dz, int zStride, float *extraA, float *extraB, float scalarA, float scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // metaPredicateStridedFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int xStride, double *dy, int yStride, double *dz, int zStride, double *extraA, double *extraB, double scalarA, double scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // metaPredicateStridedDouble<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int xStride, float16 *dy, int yStride, float16 *dz, int zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // metaPredicateStridedHalf<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, int *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) { // no-op hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); /* metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) { */ // metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned); if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) { // no-op; hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { // no-op; hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); // we have to converf float -> fp16 prior to kernel call float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) { // no-op; hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extras[1]); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } bool NativeOps::isExperimentalEnabled() { return experimentalSupport; } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; hipGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum, float *x, int *xShapeInfo, float *z, int *zShapeInfo, float *scalars, float *extraParams, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTadShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *tadOffsets = reinterpret_cast<int *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *tadOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0); // this macro builds bunch of IF/ELSE selectors for kernel launc h DISPATCH_SIMPLE(scalarAlongDimension, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *z, int *zShapeInfo, double *scalars, double *extraParams, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *tadOffsets = reinterpret_cast<int *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *tadOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarAlongDimension, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, float16 *scalars, float16 *extraParams, int *dimension, int dimensionLength) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *tadOffsets = reinterpret_cast<int *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *tadOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarAlongDimension, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum, float **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float *realArguments, int numRealArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum, double **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, double *realArguments, int numRealArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum, float16 **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float16 *realArguments, int numRealArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, int *zShapeBuffer, float *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (stateHost); Nd4jPointer state = buffer->getDevicePointer(); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float, PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *y, int *yShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (stateHost); Nd4jPointer state = buffer->getDevicePointer(); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float, PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (stateHost); Nd4jPointer state = buffer->getDevicePointer(); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float, PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, int *zShapeBuffer, double *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, double, PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *y, int *yShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, double, PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, double, PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, int *zShapeBuffer, float16 *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float16, PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *y, int *yShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float16, PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float16, PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(hipStreamSynchronize(*stream)); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // hipStreamSynchronize(*stream); unsigned long long *ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); nd4j::random::RandomBuffer *buffer = new nd4j::random::RandomBuffer(seed, bufferSize, (uint64_t *) ptrHost, (uint64_t *) ptrDev); buffer->propagateToDevice(buffer, *stream); checkCudaErrors(hipStreamSynchronize(*stream)); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice hipDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); hipStream_t *stream = reinterpret_cast<hipStream_t *>(&extraPointers[1]); hipStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); }
25f75d1c7760f0af39f58c271b9e95694488008c.cu
#include "../NativeOps.h" #include <cuda.h> #include <cuda_launch_config.h> #include <buffer.h> #include <helpers/shape.h> #include <loops/reduce3.h> #include <loops/reduce.h> #include <loops/indexreduce.h> #include <loops/pairwise_transform.h> #include <loops/transform.h> #include <loops/scalar.h> #include <loops/broadcasting.h> #include <loops/summarystatsreduce.h> #include <loops/random.h> #include <thread> #include <map> #include <cuda.h> #include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <cuda_device_runtime_api.h> #include <pointercast.h> #include <stdio.h> #include <stdlib.h> #include <loops/type_conversions.h> #include <op_boilerplate.h> #include <loops/grid.h> #include <loops/aggregates.h> //#include <sys/time.h> #include <curand.h> cudaDeviceProp *deviceProperties; cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64]; int blockLimit = 128; int maxThreads = 512; bool allowedP2P = false; bool supportedP2P = false; #ifdef __EXPERIMENTAL__ bool experimentalSupport = true; #else bool experimentalSupport = false; #endif int minThreads = 32; __constant__ char deviceConstantMemory[49152]; typedef struct { long streamId; long callId; } __syncInfo; typedef __syncInfo SyncInfo; // this method isn't used, left here for legacy and caution purposes // TLDR: don't use this way, it sucks void CUDART_CB syncCallback(cudaStream_t stream, cudaError_t status, void *data){ SyncInfo *sync = (SyncInfo *) data; printf("Finished stream: [%i], kernel call: [%i]\n", sync->streamId, sync->callId); } // this method just does type conversion in fancy way int getDeviceId(Nd4jPointer ptrToDeviceId) { return (int)(Nd4jIndex)ptrToDeviceId; } template <typename T> dim3 getOptimalDimensions(Nd4jIndex n,cudaFuncAttributes attributes, cudaDeviceProp properties) { // we can combine the two to compute a block size int num_threads = block_size_with_maximum_potential_occupancy(attributes, properties); // no real sense launching more threads, then number of elements we have if (num_threads > n) num_threads = n; if (maxThreads > 0 && num_threads > maxThreads) num_threads = maxThreads; // compute the number of blocks of size num_threads to launch int num_blocks = n / num_threads; // check for partial block at the end if (num_blocks > blockLimit) num_blocks = blockLimit; if (num_blocks < 4 && n > 128) { num_blocks = 4; num_threads = n / num_blocks; } if (num_threads >= 768) { num_blocks = num_blocks * 2; num_threads = num_threads / 2; } if(n % num_threads && num_blocks < blockLimit) ++num_blocks; //(num_threads * sizeof(T)) + attributes.sharedSizeBytes); return dim3(num_blocks,num_threads, 3000); } int getBaseMemorySize(int xRank, cudaFuncAttributes funcAttr) { int memory_limit = 256; //funcAttr.sharedSizeBytes; // TODO: remove this later memory_limit += sizeof(UnifiedSharedMemory) + 32; // sizeof(shape::TAD) + (xRank * 4 * 4) /* if (xRank == 0) xRank = 2; memory_limit += (xRank * 2 + 4) * 3 * 4; // we reserve memory for xShape + T1/T2 shapes memory_limit += yRank == 0 ? 0 : (yRank * 2 + 4) * 4; memory_limit += zRank == 0 ? 0 : (zRank * 2 + 4) * 4; memory_limit += (xRank * 4) * 6; memory_limit += MAX_RANK * 4; // special case, needed roughtly in one pase */ return memory_limit; } /* * Basic CUDA constants here: number of blocks per MP */ int getDeviceBlockThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; int blockThreshold = 8; if (ccMajor >= 5) blockThreshold = 32; else if (ccMajor == 3) blockThreshold = 16; else if (ccMajor < 3) blockThreshold = 8; return blockThreshold; } dim3 getBasicLaunchParams(int deviceId, long problemLength, int sharedMemoryPerThread, cudaFuncAttributes funcAttr) { int countMP = deviceProperties[deviceId].multiProcessorCount; int blockThreshold = getDeviceBlockThreshold(deviceId); int num_threads = problemLength / (countMP * blockThreshold); num_threads = nd4j::math::nd4j_min<int>(num_threads, maxThreads); num_threads = nd4j::math::nd4j_max<int>(num_threads, 64); num_threads = nd4j::math::nd4j_max<int>(num_threads, minThreads); int num_blocks = nd4j::math::nd4j_max<int>(problemLength / num_threads, 1); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); int memory_limit = (sharedMemoryPerThread * num_threads) + getBaseMemorySize(1, funcAttr); dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (debug && verbose) printf("Preliminary basic launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i]\n", num_blocks, num_threads, memory_limit); return launchDims; } /* * This message returns shared memory threshold value. default overflow ratio is 0.3 */ int getDeviceSharedThreshold(int deviceId) { int ccMinor = deviceProperties[deviceId].minor; int ccMajor = deviceProperties[deviceId].major; // please note threshold isn't multiple of 32, and that's NOT a mistake int shmemThreshold; if (ccMajor == 6 && ccMinor == 0) shmemThreshold = 65536; else if (ccMajor == 6 && ccMinor == 1) shmemThreshold = 49152; else if (ccMajor == 5 && ccMinor == 2) shmemThreshold = 98304; else if (ccMajor == 5) shmemThreshold = 65536; else if (ccMajor == 3 && ccMinor == 7) shmemThreshold = 114688; else shmemThreshold = 49152; return shmemThreshold / 0.3; } dim3 getBetterDimensions(int deviceId, int numTads, int tadLength, int xRank, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reduction) { int num_threads = nd4j::math::nd4j_min<int>(tadLength, maxThreads); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int warpSize = deviceProperties[deviceId].warpSize; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); // round num_threads to nearest warpSize num_threads -= num_threads % warpSize; num_threads = nd4j::math::nd4j_max<int>(1, num_threads); if (num_threads < warpSize && tadLength < warpSize) num_threads = tadLength; // since we use shared memory as fast memory for some cases - we need to count that in int memory_limit = getBaseMemorySize(xRank, funcAttr); int memory_floor = memory_limit; int effective_block_limit = countMP * blockThreshold; int num_blocks = numTads; //nd4j::math::nd4j_min<int>(numTads, effective_block_limit); int desiredShared = shmemThreshold / nd4j::math::nd4j_max<int>((num_blocks / countMP), 1); if (debug && verbose) printf("Launch context: numBlocks: [%i], numThreads: [%i], countMap: [%i], shmemThreshold: [%i], desiredShared: [%i], elementSize: [%i]\n", num_blocks, num_threads, countMP, shmemThreshold, desiredShared, elementSize); // at this moment we've stored all required information for things. time to count in reduction multipliers int reduction_per_block = 0; bool found = false; if (reduction > 0) while (!found) { reduction_per_block = (num_threads * elementSize * reduction); if (memory_limit + reduction_per_block < desiredShared) { memory_limit += reduction_per_block; found = true; } else { if (num_threads > minThreads) { num_threads -= 32; } else { memory_limit += reduction_per_block; found = true; } } } // at this moment we know total memory used per block, and we also know per-mp limit. int max_active_blocks = shmemThreshold / nd4j::math::nd4j_max<int>(memory_limit, 1); if (debug && verbose) printf("MAB: [%i], memory_floor: [%i], memory_limit: [%i], reductionPerBlock: [%i]\n", max_active_blocks, memory_floor, memory_limit, reduction_per_block); // we don't want to spawn more blocks, that gpu can actually handle without queue //num_blocks = nd4j::math::nd4j_min<int>(num_blocks, max_active_blocks); num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // if (num_blocks > countMP) // num_blocks = num_blocks - (num_blocks % countMP); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } reduction_per_block = (num_threads * elementSize * reduction); memory_limit = memory_floor + reduction_per_block; } if (debug && verbose) printf("Preliminary reduce launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], reduction_per_block: [%i], blocksPerMP: [%i]\n", num_blocks, num_threads, memory_limit, reduction_per_block, targetBlocksPerMP); return dim3(num_blocks,num_threads, memory_limit); } /* * This method returns kernel launch param for linear memory access */ dim3 getFlatLaunchParams(int deviceId, int *xShapeInfo, int *yShapeInfo, cudaFuncAttributes funcAttr) { int xRank = shape::rank(xShapeInfo); int yRank = yShapeInfo == nullptr ? 0 : shape::rank(yShapeInfo); int zRank = 0; int memory_limit = getBaseMemorySize(xRank, funcAttr); int countMP = deviceProperties[deviceId].multiProcessorCount; int regPerBlock = deviceProperties[deviceId].regsPerBlock; int blockThreshold = getDeviceBlockThreshold(deviceId); int shmemThreshold = getDeviceSharedThreshold(deviceId); int xLength = shape::length(xShapeInfo); int effective_block_limit = countMP * blockThreshold; // for flat calls we just want as much concurrent blocks, as possible, and we're not tied to TAD here int num_threads = xLength / effective_block_limit; if (num_threads < minThreads) num_threads = minThreads; num_threads = num_threads - (num_threads % 32); int memory_floor = memory_limit; int num_blocks = xLength / num_threads; num_blocks = nd4j::math::nd4j_min<int>(num_blocks, blockLimit); // num_blocks = nd4j::math::nd4j_min<int>(num_blocks, effective_block_limit); num_blocks = nd4j::math::nd4j_max<int>(num_blocks, 1); int targetBlocksPerMP = num_blocks / countMP; // now we know desired number of blocks wrt to shared memory. So, now we should take in account number of threads per SM if (targetBlocksPerMP * num_threads > 2048 && num_threads >= 128) { while (targetBlocksPerMP * num_threads > 2048) { if (num_threads <= minThreads) break; num_threads -= 32; } } if (xLength / num_threads > blockLimit) num_blocks *= 2; dim3 launchDims = dim3(num_blocks, num_threads, memory_limit); if (debug && verbose) printf("Preliminary scalar launch params: gridSize: [%i], blockSize: [%i], base shmem: [%i], blocksPerMP: [%i], problemLength: [%i], effectiveBlockLimit: [%i]\n", num_blocks, num_threads, memory_limit, targetBlocksPerMP, xLength, effective_block_limit); return launchDims; } /** * This method returns kernel launch params with TAD-based memory access * * @param deviceId * @param xShapeInfo * @param tadShapeInfo * @param funcAttr * @param dimensionLength * @param elementSize * @param reductionSize * @return */ dim3 getReduceLaunchParams(int deviceId, int *xShapeInfo, int *tadShapeInfo, cudaFuncAttributes funcAttr, int dimensionLength, int elementSize, int reductionSize) { int tadLength = 0; int numTads = 0; if (tadShapeInfo != nullptr) { tadLength = shape::length(tadShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; if (tadLength == 1) { if (debug && verbose) printf("A xLength: [%i], zLength: [%i]\n", shape::length(xShapeInfo), shape::length(tadShapeInfo)); } } else{ // we have special case - reduction along all dimensions tadLength = nd4j::math::nd4j_min<int>(shape::length(xShapeInfo), 768); numTads = shape::length(xShapeInfo) / tadLength; } int xRank = shape::rank(xShapeInfo); int zRank = tadShapeInfo == nullptr ? 0 : shape::rank(tadShapeInfo); dim3 launchDims = getBetterDimensions(deviceId, numTads, tadLength, xRank, funcAttr, dimensionLength, elementSize, reductionSize); if ((debug && verbose ) ) { //|| launchDims.x == 1 printf("Reduce LaunchParams: xLength: [%i], numTads: [%i], tadLength: [%i], launchDims.x: [%i], launchDims.y: [%i], launchDims.z: [%i]\n", shape::length(xShapeInfo), numTads, tadLength, launchDims.x, launchDims.y, launchDims.z); } return launchDims; } /** * Returns optimal launch parameters * given the extra pointers passed in. * The extra pointer should be * the host pointer for the shape information * associated with the data. * From there it is used to obtain the length * from which we can derive the optimal launch parameters. * */ template <typename T> dim3 getOptimalLaunchParameters(Nd4jPointer *extraPointers, cudaFuncAttributes attributes, cudaDeviceProp properties) { int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); dim3 launchDims = getOptimalDimensions<T>(n,attributes, properties); if (debug && verbose) printf("Params: gridSize: [%i], blockSize: [%i], shMem: [%i], problemLength: [%i], totalThreads:[%i]\n", launchDims.x, launchDims.y, launchDims.z, n, (launchDims.x * launchDims.y)); return launchDims; } nd4j::buffer::Buffer<int> * createScalarBuffer(cudaStream_t stream) { int *scalarShapeInfo = shape::createScalarShapeInfo(); nd4j::buffer::Buffer<int> *buff = nd4j::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream); nd4j::buffer::copyDataToGpu(&buff, stream); return buff; } class ScalarShapeInformation { private: nd4j::buffer::Buffer<int> *scalarDimension; nd4j::buffer::Buffer<int> *scalarShapeInfo; std::thread::id threadId; public: ScalarShapeInformation(cudaStream_t stream) { int *scalarDimensionBuff = (int *) malloc(sizeof(int)); scalarDimensionBuff[0] = MAX_DIMENSION; scalarDimension = nd4j::buffer::createBuffer(scalarDimensionBuff,1, stream); scalarShapeInfo = createScalarBuffer(stream); threadId = std::this_thread::get_id(); } ~ScalarShapeInformation() { nd4j::buffer::freeBuffer(&scalarShapeInfo); nd4j::buffer::freeBuffer(&scalarDimension); } int *getShapeInfoHostPointer() { return scalarShapeInfo->data; } int * getShapeInfoGpuPointer() { return scalarShapeInfo->gData; } int * getDimensionHostPointer() { return scalarDimension->data; } int * getDimensionGpuPointer() { return scalarDimension->gData; } }; template <typename T> class ScalarInfo { nd4j::buffer::Buffer<T> *scalarData; ScalarShapeInformation *shapeInfo; T finalResult; cudaStream_t streamRef; public: ScalarInfo(cudaStream_t stream) { T *scalarResult = (T*)malloc(sizeof(T)); shapeInfo = new ScalarShapeInformation(stream); scalarData = nd4j::buffer::createBuffer(scalarResult,1, stream); streamRef = stream; nd4j::buffer::copyDataToGpu(&scalarData, stream); } T getFinalResultFromDevice() { nd4j::buffer::copyDataFromGpu(&scalarData, streamRef); return scalarData->data[0]; } /** * Get the device shape information * representing a scalar */ int *getDeviceShapeInfo() { return shapeInfo->getShapeInfoGpuPointer(); } /** * Get the result pointers */ T *getDevicePointer() { return scalarData->gData; } /** * Get the infinite dimension device pointer */ int *getDimensionDevicePointer() { return shapeInfo->getDimensionGpuPointer(); } ~ScalarInfo() { nd4j::buffer::freeBuffer(&scalarData); delete shapeInfo; } }; /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execIndexReduceScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D1 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], 1, sizeof(double), 2); indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); checkCudaErrors(cudaStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D2 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[27], dimensionLength, sizeof(double), 2); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); indexReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastDouble(Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *deviceTADOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("D3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[26], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, double, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *y, int yStride, double *result, int resultStride, double *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("D4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[25]); pairWiseTransformStridedDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>> ( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("D5 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[24]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); pairWiseTransformDoubleIndex <<<launchDims.x, launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, double *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D6 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[23]); pairWiseTransformDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D7 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D8 opNum:[%i]\n", opNum); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); /** * We have separate kernels, optimized for different number of dimensions for reductions */ if (dimensionLength == 1) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[32], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[33], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], dimensionLength, sizeof(double), 2); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, double, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ double NativeOps::execReduceScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("D9 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[22], 1, sizeof(double), 1); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, double, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("D10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ double NativeOps::execReduce3ScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo){ if (debug && verbose) printf("D11 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); reduce3ScalarDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // since this method should return scalar value - we should block on this call checkCudaErrors(cudaStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Double( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *y, int *yShapeInfo, double *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D12 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[21]); reduce3Double<<<1,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int xStride, double *result, int resultStride, double scalar, double *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D13 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[20]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleStrided, double, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("D14 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[19]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleShaped, double, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *result, int *resultShapeInfo, double scalar, double *extraParams, Nd4jIndex n, int *xIndexes, int *resultIndexes){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("D15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[18]); scalarDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ double NativeOps::execSummaryStatsScalarDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D16 opNum:[%i]\n", opNum); double *resultPointer = reinterpret_cast<double *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // this is blocking method since method should return scalar checkCudaErrors(cudaStreamSynchronize(*stream)); double result = resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo,bool biasCorrected) { if (debug && verbose) printf("D17 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], 1, sizeof(double), 8); // we have to limit grid size here, due to limited nature of reduction/allocation pointers launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsDouble( Nd4jPointer *extraPointers, int opNum, double *x, int *xShapeInfo, double *extraParams, double *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("D18 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[17], dimensionLength, sizeof(double), 8); // we're limiting maximum grid size for summaryStats ops launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceDouble<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int xStride, double *z, int zStride, double *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("D19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[16]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, double, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); if (debug && verbose) printf("D20 opNum:[%i]\n", opNum); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; // special pointer for special buffer for special ops double *specialPointer = reinterpret_cast<double *>(extraPointers[6]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; double * special = (double *) maxShapeBuffer + (MAX_RANK * 2 + 4); /** * ops between 38 and 41 are special ops: * SoftMax, LogSoftMax, SoftMaxDerivative, IsMax * On cuda we execute them as */ // simple trick to get workaround over reductions into scalar if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block /* * For vector cases of everything, but IsMax (41) we go for single-kernel calls */ int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(256, length); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(double) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials // we'll do some pointers mangling here, and execute kernels one by one int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// SoftMax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // TODO: we could get rid of this one eventually prepareShapeBuffer <<<1, 1, 128, *stream>>> (dimension, maxDimension, maxShapeBuffer, shape[0]); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceDouble(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastDouble(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); // exp 3 execTransformDouble(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceDouble(tempPointers, 1, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastDouble(tempPointers, 3, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); // log 3 if (opNum == 40) execTransformDouble(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams); else if (opNum == 39) execTransformDouble(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { /** * In case of vector-input for IsMax, it just turns into IndexReduce call + further filler call */ int maxIdx = (int) execIndexReduceScalarDouble(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; fillIsMaxDouble<<< 1, 128, 0, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx); } else { int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<double *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceDouble(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute filler fillDimensionalIsMaxDouble<<<blockLimit, 64, funcAttributes[37].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(cudaStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformDouble\n"); break; } } } } else { // for Im2Col & Col2Im we enforce higher dimensionality // TODO: investigate this on high-end gpus if (opNum == 37 || opNum == 36) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 768; } // Histogram op requires additional memory chunk // FIXME: make this one to use cache if (opNum == 48) { int length = shape::length(hostZShapeInfo); cudaMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(double)); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, double, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (debug || opNum == 48) checkCudaErrors(cudaStreamSynchronize(*stream)); // release Histogram memory if (opNum == 48) { cudaFree((void *)maskedAllocPointer); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformDouble( Nd4jPointer *extraPointers, int opNum, double *dx, int *xShapeInfo, double *result, int *resultShapeInfo, double *extraParams, int *xIndexes, int *resultIndexes) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("D21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); double *reductionPointer = reinterpret_cast<double *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[14]); transformDoubleIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execIndexReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ if (debug && verbose) printf("F1 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float), 2); if (debug && verbose && launchDims.x == 1) printf("AF1 opNum:[%i]\n", opNum); indexReduceFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // once again - since we return scalar value in this method, we should block this kernel launch checkCudaErrors(cudaStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execIndexReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ if (debug && verbose) printf("H1 opNum:[%i]\n", opNum); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], 1, sizeof(float16), 2); if (debug && verbose && launchDims.x == 1) printf("AH1 opNum:[%i]\n", opNum); indexReduceHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking for scalar output checkCudaErrors(cudaStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execIndexReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float), 2); if (verbose && launchDims.x == 1) printf("AF2 opNum:[%i]\n", opNum); indexReduceFloat<<<launchDims.x, launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execIndexReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H2 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[13], dimensionLength, sizeof(float16), 2); if (verbose && launchDims.x == 1) printf("AH2 opNum:[%i]\n", opNum); indexReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execBroadcastFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ /* cudaEvent_t start; cudaEventCreateWithFlags(&start, cudaEventDisableTiming); timespec tsX; timespec tsY; clock_gettime(CLOCK_REALTIME, &tsX); */ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *deviceTADOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) /* SyncInfo *info = new SyncInfo(); info->streamId = 32; info->callId = 1234567890; timespec ts1; timespec ts2; clock_gettime(CLOCK_REALTIME, &ts1); */ /* broadcastFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), y, yShapeInfo, shape::rank(hostYShapeInfo), result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ); */ /* clock_gettime(CLOCK_REALTIME, &ts2); // cudaEventRecord(start, 0); // cudaStreamAddCallback(*stream, syncCallback, (void*)info, 0); */ if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); /* clock_gettime(CLOCK_REALTIME, &tsY); printf("Execution time: %i\n", (ts2.tv_nsec - ts1.tv_nsec)); printf("Overall time: %i\n", (tsY.tv_nsec - tsX.tv_nsec)); printf("Callback setup time: %i\n", (tsY.tv_nsec - ts2.tv_nsec)); printf("-------------------------------------\n"); */ } void NativeOps::execBroadcastHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *deviceTADShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *deviceTADOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H3 opNum:[%i]\n", opNum); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[12], 1, sizeof(float16), 0); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(broadcastSimple, float16, PARAMS(x, xShapeInfo, y, yShapeInfo, result, resultShapeInfo, dimension, dimensionLength, deviceTADShapeInfo, deviceTADOffsets, deviceTADShapeInfoZ, deviceTADOffsetsZ), OPS_A(BROADCAST_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param y * @param yStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *y, int yStride, float *result, int resultStride, float *extraParams, Nd4jIndex n){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]); if (verbose && launchDims.x == 1) printf("AF4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); pairWiseTransformStridedFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); if (debug) { checkCudaErrors(cudaStreamSynchronize(*stream)); } } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *y, int yStride, float16 *result, int resultStride, float16 *extraParams, Nd4jIndex n){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H4 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[11]); if (verbose && launchDims.x == 1) printf("AH4 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); pairWiseTransformStridedHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, n, dx, y, xStride, yStride, extraParams, result, resultStride, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n * @param xIndexes * @param yIndexes * @param resultIndexes */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float), 0); if (verbose && launchDims.x == 1) printf("AF5 opNum:[%i]\n", opNum); pairWiseTransformFloatIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *yIndexes, int *resultIndexes){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H5 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[10], 1, sizeof(float16), 0); if (verbose && launchDims.x == 1) printf("AH5 opNum:[%i]\n", opNum); pairWiseTransformHalfIndex<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), xIndexes, yIndexes, resultIndexes, allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execPairwiseTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, float *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F6 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]); if (verbose && launchDims.x == 1) { printf("AF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y); shape::printShapeInfoLinear(hostXShapeInfo); } pairWiseTransformFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execPairwiseTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H6 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[9]); if (verbose && launchDims.x == 1) { printf("HF6 opNum:[%i], launchDims.x: [%i], launchDims.y: [%i]\n", opNum, launchDims.x, launchDims.y); shape::printShapeInfoLinear(hostXShapeInfo); } pairWiseTransformHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>( opNum, dx, y, extraParams, result, xShapeInfo, shape::rank(hostXShapeInfo), yShapeInfo, shape::rank(hostYShapeInfo), resultShapeInfo, shape::rank(hostZShapeInfo), allocationPointer, deviceTADShapeInfo); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F7 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float), 1); if (verbose && launchDims.x == 1) printf("AF7 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H7 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], 1, sizeof(float16), 1); if (verbose && launchDims.x == 1) printf("AH7 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execReduceFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension,int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F8 opNum:[%i]\n", opNum); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float), 1); if (verbose && launchDims.x == 1) printf("AF8 opNum:[%i]\n", opNum); // we call different kernels optimized for different number of dimensions in TAD if (dimensionLength == 1) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduceHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension,int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H8 opNum:[%i]\n", opNum); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[8], dimensionLength, sizeof(float16), 1); if (verbose && launchDims.x == 1) printf("AH8 opNum:[%i]\n", opNum); // calling different kernels, depending on number of dimensions in TAD if (dimensionLength == 1) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric1D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else if (shape::rank(hostTADShapeInfo) <= 3) { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGeneric3D, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } else { // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceSimpleGenericXD, float16, PARAMS(x, xShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, reductionPointer, deviceTADShapeInfo, deviceTADOffsets), OPS_A(REDUCE_OPS)) } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @return */ float NativeOps::execReduceScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F9 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[8]); if (verbose && launchDims.x == 1) printf("AF9 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking this one checkCudaErrors(cudaStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduceScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H9 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 2, funcAttributes[8]); if (verbose && launchDims.x == 1) printf("AH9 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(reduceScalarSimple, float16, PARAMS(x, xShapeInfo, extraParams, resultPointer, nullptr, nullptr,1 , reductionPointer, deviceTADShapeInfo), OPS_A(REDUCE_OPS)) // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AF10 opNum:[%i]\n", opNum); reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H10 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AH10 opNum:[%i]\n", opNum); reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo */ float NativeOps::execReduce3ScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F11 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 32, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AF11 opNum:[%i]\n", opNum); reduce3ScalarFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execReduce3ScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H11 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AH11 opNum:[%i]\n", opNum); reduce3ScalarHalf<<<launchDims.x,launchDims.y,launchDims.z + 2048, *stream>>>( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, resultPointer, nullptr, nullptr, 1, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParamsVals * @param y * @param yShapeInfo * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execReduce3Float( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *y, int *yShapeInfo, float *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("F12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 16, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AF12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarFloat << < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Float << < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execReduce3Half( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *y, int *yShapeInfo, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); int *yDeviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[12]); int *yDeviceTADOffsets = reinterpret_cast<int *>(extraPointers[13]); if (debug && verbose) printf("H12 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostXShapeInfo), 8, funcAttributes[7]); if (verbose && launchDims.x == 1) printf("AH12 opNum:[%i]\n", opNum); if (shape::isScalar(hostZShapeInfo) || dimension == nullptr) { reduce3ScalarHalf<< < launchDims.x, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } else { reduce3Half<< < 1, launchDims.y, launchDims.z, *stream >> > ( opNum, x, xShapeInfo, y, yShapeInfo, extraParams, result, resultShapeInfo, dimension, dimensionLength, 1, allocationPointer, deviceTADShapeInfo, deviceTADOffsets, yDeviceTADShapeInfo, yDeviceTADOffsets); } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xStride * @param result * @param resultStride * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int xStride, float *result, int resultStride, float scalar, float *extraParams, Nd4jIndex n){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); if (debug && verbose) printf("F13 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); if (verbose && launchDims.x == 1) printf("AF13 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleStrided, float, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int xStride, float16 *result, int resultStride, float scalar, float16 *extraParams, Nd4jIndex n){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); if (debug && verbose) printf("F13 opNum:[%i]\n", opNum); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[6]); if (verbose && launchDims.x == 1) printf("AF13 opNum:[%i]\n", opNum); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleStrided, float16, PARAMS(n, scalar, x, xStride, extraParams, result, resultStride, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); Nd4jIndex n = shape::length(hostXShapeInfo); if (debug && verbose) printf("F14 opNum:[%i]\n", opNum); //dim3 launchDims = getOptimalLaunchParameters<float>(&extraPointers[0], funcAttributes[5], deviceProperties[getDeviceId(extraPointers[2])]); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); if (verbose && launchDims.x == 1) printf("AF14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleShaped, float, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *result, int *resultShapeInfo, float scalarF, float16 *extraParams){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); Nd4jIndex n = shape::length(hostXShapeInfo); if (debug && verbose) printf("H14 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[5]); float16 scalar = (float16) scalarF; if (verbose && launchDims.x == 1) printf("AH14 opNum:[%i], xLength:[%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarSimpleShaped, float16, PARAMS(scalar, x, xShapeInfo, extraParams, result, resultShapeInfo, allocPointer), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param result * @param resultShapeInfo * @param scalar * @param extraParams * @param n * @param xIndexes * @param resultIndexes */ void NativeOps::execScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *result, int *resultShapeInfo, float scalar, float *extraParams, int *xIndexes, int *resultIndexes){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); Nd4jIndex n = shape::length(hostXShapeInfo); if (debug && verbose) printf("F15 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[4]); if (verbose && launchDims.x == 1) printf("AF15 opNum:[%i]\n", opNum); scalarFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, n, scalar, x, extraParams, result, resultIndexes, allocPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams */ float NativeOps::execSummaryStatsScalarFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("F16 opNum:[%i]\n", opNum); float *resultPointer = reinterpret_cast<float *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8); if (verbose && launchDims.x == 1) printf("AF16 opNum:[%i]\n", opNum); // we limit grid size for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = resultPointer[0]; return result; } float NativeOps::execSummaryStatsScalarHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); if (debug && verbose) printf("H16 opNum:[%i]\n", opNum); float16 *resultPointer = reinterpret_cast<float16 *>(extraPointers[5]); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float16), 8); if (verbose && launchDims.x == 1) printf("AH16 opNum:[%i]\n", opNum); launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, resultPointer, nullptr, 0, nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); // blocking call checkCudaErrors(cudaStreamSynchronize(*stream)); float result = (float) resultPointer[0]; return result; } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F17 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float), 8); if (verbose && launchDims.x == 1) printf("AF17 opNum:[%i]\n", opNum); // limiting number of blocks in grid, to match buffer memory size launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H17 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], 1, sizeof(float16), 8); if (verbose && launchDims.x == 1) printf("AH17 opNum:[%i]\n", opNum); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), nullptr, 1, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param x * @param xShapeInfo * @param extraParams * @param result * @param resultShapeInfo * @param dimension * @param dimensionLength */ void NativeOps::execSummaryStatsFloat( Nd4jPointer *extraPointers, int opNum, float *x, int *xShapeInfo, float *extraParams, float *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("F18 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(float), 8); if (verbose && launchDims.x == 1) printf("AF18 opNum:[%i]\n", opNum); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceFloat<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execSummaryStatsHalf( Nd4jPointer *extraPointers, int opNum, float16 *x, int *xShapeInfo, float16 *extraParams, float16 *result, int *resultShapeInfo, int *dimension, int dimensionLength,bool biasCorrected){ cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); int *hostTADShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *deviceTADShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *deviceTADOffsets = reinterpret_cast<int *>(extraPointers[11]); if (debug && verbose) printf("H18 opNum:[%i]\n", opNum); int *allocationPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostTADShapeInfo, funcAttributes[3], dimensionLength, sizeof(float16), 8); if (verbose && launchDims.x == 1) printf("AH18 opNum:[%i]\n", opNum); // as everywhere else, we limit maximal number of blocks for SummaryStats calls launchDims.x = nd4j::math::nd4j_min<int>(512, launchDims.x); summaryStatsReduceHalf<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), dimension, dimensionLength, 1,biasCorrected, allocationPointer, reductionPointer, deviceTADShapeInfo, deviceTADOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xStride * @param result * @param resultStride * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int xStride, float *z, int zStride, float *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("F19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (verbose && launchDims.x == 1) printf("AF19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int xStride, float16 *z, int zStride, float16 *extraParams, Nd4jIndex n) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("H19 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[2]); if (verbose && launchDims.x == 1) printf("AH19 opNum:[%i], xLength: [%i]\n", opNum, shape::length(hostXShapeInfo)); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformStrided, float16, PARAMS(n, dx, xStride, extraParams, z, zStride, allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat(Nd4jPointer *extraPointers,int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("F20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); // special pointer for special buffer for special ops float *specialPointer = reinterpret_cast<float *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float * special = (float *) maxShapeBuffer + (MAX_RANK * 2 + 4); int *maskedAllocPointer = allocPointer; dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (verbose && launchDims.x == 1) printf("AF20 opNum:[%i]\n", opNum); // simple trick to get workaround over reductions into scalar // that's special ops: SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceFloat(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastFloat(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); // exp 3 execTransformFloat(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceFloat(tempPointers, 1, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastFloat(tempPointers, 3, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformFloat(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams); else if (opNum == 39) execTransformFloat(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // if that's 1D input - we'll just go for single dim IMax op call + filler int maxIdx = (int) execIndexReduceScalarFloat(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; fillIsMaxFloat<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx); checkCudaErrors(cudaStreamSynchronize(*stream)); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceFloat(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute fillDimensionalIsMaxFloat<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(cudaStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformFloat\n"); break; } } } } else { // we're enforcing larger grids for Col2Im & Im2Col // TODO: for high-end gpus we might use higher values here if (opNum == 37 || opNum == 36) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 384; } // histogram op requies additional memory chunk :( if (opNum == 48) { int length = shape::length(hostZShapeInfo); cudaMalloc((void **) &maskedAllocPointer, length * launchDims.x * sizeof(float)); } DISPATCH_SIMPLE(transformShaped, float, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (debug || opNum == 48) checkCudaErrors(cudaStreamSynchronize(*stream)); // release memory chunk if (opNum == 48) { cudaFree((void *) maskedAllocPointer); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execTransformHalf(Nd4jPointer *extraPointers,int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); int *hostZShapeInfo = reinterpret_cast<int *>(extraPointers[8]); if (debug && verbose) printf("H20 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); int *maskedAllocPointer = allocPointer; float16 *specialPointer = reinterpret_cast<float16 *>(extraPointers[6]); int *dimension = (int *) specialPointer; int *maxDimension = dimension + 1; int *maxShapeBuffer = (int *) maxDimension + 1; float16 * special = (float16 *) maxShapeBuffer + (MAX_RANK * 2 + 4); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, hostZShapeInfo, funcAttributes[1]); if (verbose && launchDims.x == 1) printf("AH20 opNum:[%i]\n", opNum); // simple trick to get workaround over reductions into scalar // SoftMax, SoftMaxDerivative, LogSoftMax, IsMax if (opNum >= 38 && opNum <= 41) { if (shape::isVector(hostXShapeInfo) && opNum != 41) { // if that's vector, we just go directly to op in 1 block int length = shape::length(hostXShapeInfo); int block = nd4j::math::nd4j_min<int>(length, 256); launchDims.x = 1; launchDims.y = block; launchDims.z += (block * sizeof(float16) * 4); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), allocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) } else { // going for blockwise specials int *shape = shape::shapeOf(hostXShapeInfo); switch (opNum) { case 40: // LogSoftMax case 39: // SoftMax Derivative case 38: {// softmax Nd4jPointer tempPointers[16]; tempPointers[0] = extraPointers[0]; tempPointers[1] = extraPointers[1]; tempPointers[2] = extraPointers[2]; tempPointers[3] = extraPointers[3]; tempPointers[4] = extraPointers[4]; tempPointers[5] = extraPointers[5]; tempPointers[6] = extraPointers[6]; tempPointers[7] = extraPointers[7]; tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[12]; tempPointers[13] = extraPointers[13]; tempPointers[14] = extraPointers[14]; tempPointers[15] = extraPointers[15]; int maxShape[2] = {shape::shapeOf(hostXShapeInfo)[0], 1}; int *hostMaxShapeBuffer = shape::shapeBuffer(2, maxShape); tempPointers[7] = (Nd4jPointer) hostMaxShapeBuffer; tempPointers[8] = (Nd4jPointer) hostMaxShapeBuffer; // FIXME: fix this prepareShapeBuffer <<< 1, 1, 128, *stream >>> (dimension, maxDimension, maxShapeBuffer, shape[0]); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); //shape::printShapeInfo(maxShapeBuffer); tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; // max 3 execReduceHalf(tempPointers, 3, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // sub 1 execBroadcastHalf(tempPointers, 1, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); // exp 3 execTransformHalf(extraPointers, 3, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = tempPointers[7]; tempPointers[9] = extraPointers[12]; tempPointers[10] = extraPointers[13]; tempPointers[11] = extraPointers[14]; //sum 1 execReduceHalf(tempPointers, 1, dx, xShapeInfo, extraParams, special, maxShapeBuffer, maxDimension, 1); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); tempPointers[8] = extraPointers[8]; tempPointers[9] = extraPointers[9]; tempPointers[10] = extraPointers[10]; tempPointers[11] = extraPointers[11]; tempPointers[12] = extraPointers[10]; tempPointers[13] = extraPointers[11]; // divide 3 execBroadcastHalf(tempPointers, 3, dx, xShapeInfo, special, maxShapeBuffer, dx, xShapeInfo, dimension, 1); if (opNum == 40) { if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); execTransformHalf(tempPointers, 47, dx, xShapeInfo, dx, xShapeInfo, extraParams); } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); // log 3 if (opNum == 40) execTransformHalf(extraPointers, 5, dx, xShapeInfo, dx, xShapeInfo, extraParams); else if (opNum == 39) execTransformHalf(extraPointers, 42, dx, xShapeInfo, dx, xShapeInfo, extraParams); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); delete hostMaxShapeBuffer; break; } case 41: { // IsMax along all dimensions bool scalarCheat = false; if (extraParams == nullptr) { scalarCheat = true; } if (scalarCheat) { // 1D input, aka vector int maxIdx = (int) execIndexReduceScalarHalf(extraPointers, 0, dx, xShapeInfo, extraParams); int targetIdx = 0; if (shape::order(hostXShapeInfo) == 'c' || shape::order(hostXShapeInfo) == 'f' && maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1] >= shape::length(hostXShapeInfo)) targetIdx = maxIdx; else targetIdx = maxIdx * shape::stride(hostXShapeInfo)[shape::rank(hostXShapeInfo) - 1]; fillIsMaxHalf<<< 1, 128, 1536, *stream >>>(result, shape::length(hostXShapeInfo), targetIdx); } else { // going for dimension-based IsMax int *tadMaxShapeInfo = reinterpret_cast<int *> (extraPointers[10]); int *tadMaxOffsets = reinterpret_cast<int *> (extraPointers[11]); int *dimension = reinterpret_cast<int *> (extraPointers[15]); special = reinterpret_cast<float16 *>(extraPointers[17]); int dimensionLength = getDeviceId(extraPointers[18]); // we call for IMax on specified dimension execIndexReduceHalf(extraPointers, 0, dx, xShapeInfo, extraParams, special, hostYShapeInfo, dimension, dimensionLength); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); // at this point, all IMax indexes are gathered, and we execute fillDimensionalIsMaxHalf<<<blockLimit, 64, funcAttributes[36].sharedSizeBytes, *stream>>>(special, hostYShapeInfo, result, resultShapeInfo, tadMaxShapeInfo, dimension, dimensionLength, tadMaxOffsets ); checkCudaErrors(cudaStreamSynchronize(*stream)); } break; } default: { printf("Bad case for transformHalf\n"); break; } } } } else { // Im2Col & Col2Im enforced grids if (opNum == 37 || opNum == 36) { launchDims.x = 512; launchDims.y = 512; launchDims.z += 384; } // Histogram op requires additional memory chunk if (opNum == 48) { int length = shape::length(hostZShapeInfo); cudaMalloc((void **)&maskedAllocPointer, length * launchDims.x * sizeof(float16)); } // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(transformShaped, float16, PARAMS(dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultShapeInfo, shape::rank(hostZShapeInfo), maskedAllocPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // we need guaranteed sync here, due to temp memory release if (debug || opNum == 48) checkCudaErrors(cudaStreamSynchronize(*stream)); // release that histogram memory chunk if (opNum == 48) { cudaFree((void *)maskedAllocPointer); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * * @param opNum * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ void NativeOps::execTransformFloat( Nd4jPointer *extraPointers, int opNum, float *dx, int *xShapeInfo, float *result, int *resultShapeInfo, float *extraParams, int *xIndexes, int *resultIndexes) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("F21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float *reductionPointer = reinterpret_cast<float *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (verbose && launchDims.x == 1) printf("AF21 opNum:[%i]\n", opNum); transformFloatIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execTransformHalf( Nd4jPointer *extraPointers, int opNum, float16 *dx, int *xShapeInfo, float16 *result, int *resultShapeInfo, float16 *extraParams, int *xIndexes, int *resultIndexes) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); if (debug && verbose) printf("H21 opNum:[%i]\n", opNum); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); float16 *reductionPointer = reinterpret_cast<float16 *>(extraPointers[4]); dim3 launchDims = getFlatLaunchParams(getDeviceId(extraPointers[2]), hostXShapeInfo, nullptr, funcAttributes[0]); if (verbose && launchDims.x == 1) printf("AH21 opNum:[%i]\n", opNum); transformHalfIndexes<<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, dx, xShapeInfo, shape::rank(hostXShapeInfo), extraParams, result, resultIndexes, allocPointer, reductionPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } template <typename T> __device__ void flattenKernelGeneric(int dOffset, char order, T *result, int *resultShapeInfo, T *input, int *inputShapeInfo, int *allocationPointer) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 4, 4, sizeof(shape::TAD), 2); } __syncthreads(); int tid = blockIdx.x * blockDim.x + threadIdx.x; int *zShape = shape::shapeOf(resultShapeInfo); int *zStride = shape::stride(resultShapeInfo); int *yShape = shape::shapeOf(inputShapeInfo); int *yStride = shape::stride(inputShapeInfo); char yOrder = shape::order(inputShapeInfo); int len = shape::length(inputShapeInfo); int resultEWS = shape::elementWiseStride(resultShapeInfo); int inputEWS = shape::elementWiseStride(inputShapeInfo); if (yOrder == order) { if (resultEWS >= 1 && inputEWS >= 1) { for (int i = tid; i < len; i+= gridDim.x * blockDim.x) { result[i * resultEWS + dOffset] = input[i * inputEWS]; } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i + dOffset] = input[offset]; } } } } else { int rank = shape::rank(inputShapeInfo); int coord[MAX_RANK]; if(order == 'f') { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2sub(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } else { for(int i = tid; i < len; i+= gridDim.x * blockDim.x) { shape::ind2subC(rank,yShape,i,coord); int offset = shape::getOffset(0,yShape,yStride,coord,rank); result[i+dOffset] = input[offset]; } } } } extern "C" __global__ void flattenKernelDouble(int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<double>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelFloat(int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } extern "C" __global__ void flattenKernelHalf(int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo, int *allocationPointer) { flattenKernelGeneric<float16>( offset, order, result, resultShapeInfo, input, inputShapeInfo, allocationPointer); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenFloat( Nd4jPointer *extraPointers, int offset, char order, float *result, int *resultShapeInfo, float *input, int *inputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (debug && verbose) printf("F22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (verbose && launchDims.x == 1) printf("AF222 opNum:[7]\n"); flattenKernelFloat<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::flattenHalf( Nd4jPointer *extraPointers, int offset, char order, float16 *result, int *resultShapeInfo, float16 *input, int *inputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (debug && verbose) printf("H22 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[30]); if (verbose && launchDims.x == 1) printf("AH222 opNum:[7]\n"); flattenKernelHalf<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * Append an input array * to the end of a flat array * in a particular order * @param offset the offset of the array to start at * @param order the order * @param result the result array * @param resultShapeInfo the shape info for te array * @param input the input for the array * @param inputShapeInfo the shape information for that array */ void NativeOps::flattenDouble( Nd4jPointer *extraPointers, int offset, char order, double *result, int *resultShapeInfo, double *input, int *inputShapeInfo) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostYShapeInfo = reinterpret_cast<int *>(extraPointers[7]); if (debug && verbose) printf("D30 opNum:[7]\n"); int *allocPointer = reinterpret_cast<int *>(extraPointers[3]); dim3 launchDims = getBasicLaunchParams(getDeviceId(extraPointers[2]), shape::length(hostYShapeInfo), 2, funcAttributes[34]); flattenKernelDouble<<<launchDims.x,launchDims.y, launchDims.z, *stream>>>(offset, order, result, resultShapeInfo, input, inputShapeInfo, allocPointer); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::checkP2P() { int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; bool tempSupport = true; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; cudaSetDevice(x); cudaDeviceCanAccessPeer(&canAccess, x , y); if (!canAccess) tempSupport = false; } } supportedP2P = tempSupport; cudaSetDevice(curDevice); } else { // if we have only 1 device - we say that we support P2P, since all data will be on 1 device supportedP2P = true; } } void NativeOps::enableP2P(bool enable) { if (enable == allowedP2P) return; int curDevice = 0; cudaGetDevice(&curDevice); int devCnt = 0; cudaGetDeviceCount(&devCnt); if (curDevice < 0 && curDevice > devCnt) curDevice = 0; if (devCnt > 1) { for (int x = 0; x < devCnt; x++) { for (int y = 0; y < devCnt; y++) { if (x == y) continue; int canAccess = 0; cudaSetDevice(x); cudaDeviceCanAccessPeer(&canAccess, x , y); if (canAccess) { if (enable) { cudaDeviceEnablePeerAccess(y, 0); } else { cudaDeviceDisablePeerAccess(y); } } else { if (verbose) printf("Peer access [%i] -> [%i] isn't possible\n", x, y); } } } cudaSetDevice(curDevice); } allowedP2P = enable; cudaSetDevice(curDevice); } bool NativeOps::isP2PAvailable() { return supportedP2P; } void NativeOps::initializeDevicesAndFunctions() { int devCnt = 0; cudaGetDeviceCount(&devCnt); deviceProperties = new cudaDeviceProp[devCnt]; for (int i = 0; i < devCnt; i++) { cudaSetDevice(i); cudaGetDeviceProperties(&deviceProperties[i], i); cudaDeviceSetLimit(cudaLimitStackSize, 4096); } cudaSetDevice(0); checkP2P(); // enabling p2p gpu access if it's supported if (supportedP2P && devCnt > 1) enableP2P(allowedP2P); cudaFuncGetAttributes(&funcAttributes[0], (void *)transformFloatIndexes); //void (*transformFloatPointer1)(int opNum, float *dy,int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME cudaFuncGetAttributes(&funcAttributes[1], transformFloatIndexes); //void (*transformFloatPointer2)(int opNum, Nd4jIndex n, float *dy, int incy, float *params, float *result,int resultStride, int *allocationPointer, float *reductionPointer) = transformFloat; // FIXME cudaFuncGetAttributes(&funcAttributes[2], transformFloatIndexes); cudaFuncGetAttributes(&funcAttributes[3], (void *)summaryStatsReduceFloat); cudaFuncGetAttributes(&funcAttributes[4], (void *)scalarFloatIndexes); // void (*scalarFloatPointer1)(int opNum, float dx,float *dy, int *shapeInfo, int xRank, float *params, float *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarFloat; cudaFuncGetAttributes(&funcAttributes[5], scalarFloatIndexes); // void (*scalarFloatPointer2)(int opNum, Nd4jIndex n,float dx, float *dy, int incy, float *params, float *result,int resultStride, int *allocPointer) = scalarFloat; cudaFuncGetAttributes(&funcAttributes[6], scalarFloatIndexes); cudaFuncGetAttributes(&funcAttributes[7], reduce3Float); cudaFuncGetAttributes(&funcAttributes[8], reduceSimpleGenericXD_0_float); // printf("reduceFloat regs: [%i], static shmem: [%i]\n", funcAttributes[8].numRegs, funcAttributes[8].sharedSizeBytes); cudaFuncGetAttributes(&funcAttributes[28], reduceSimpleGeneric1D_0_float); // 1D // printf("reduceFloat1D regs: [%i], static shmem: [%i]\n", funcAttributes[28].numRegs, funcAttributes[28].sharedSizeBytes); cudaFuncGetAttributes(&funcAttributes[29], reduceSimpleGeneric3D_0_float); // 6D // printf("reduceFloat6D regs: [%i], static shmem: [%i]\n", funcAttributes[29].numRegs, funcAttributes[29].sharedSizeBytes); cudaFuncGetAttributes(&funcAttributes[30], flattenKernelFloat); cudaFuncGetAttributes(&funcAttributes[31], concatKernelFloat); cudaFuncGetAttributes(&funcAttributes[9], pairWiseTransformFloat); cudaFuncGetAttributes(&funcAttributes[10], pairWiseTransformFloatIndex); cudaFuncGetAttributes(&funcAttributes[11], pairWiseTransformStridedFloat); cudaFuncGetAttributes(&funcAttributes[12], broadcastSimple_0_float); cudaFuncGetAttributes(&funcAttributes[13], indexReduceFloat); ///////////////////////////////////////// Doubles are separate, just in case of... cudaFuncGetAttributes(&funcAttributes[14], transformDoubleIndexes); // void (*transformDoublePointer1)(int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME cudaFuncGetAttributes(&funcAttributes[15], transformDoubleIndexes); //void (*transformDoublePointer2)(int opNum, Nd4jIndex n, double *dy, int incy, double *params, double *result,int resultStride, int *allocationPointer, double *reductionPointer) = transformDouble; // FIXME cudaFuncGetAttributes(&funcAttributes[16], transformDoubleIndexes); cudaFuncGetAttributes(&funcAttributes[17], summaryStatsReduceDouble); cudaFuncGetAttributes(&funcAttributes[18], scalarDoubleIndexes); //void (*scalarDoublePointer1)(int opNum, double dx,double *dy, int *shapeInfo, int xRank, double *params, double *result,int *resultShapeInfo, int zRank, int *allocPointer) = scalarDouble; cudaFuncGetAttributes(&funcAttributes[19], scalarDoubleIndexes); //void (*scalarDoublePointer2)(int opNum, Nd4jIndex n,double dx, double *dy, int incy, double *params, double *result,int resultStride, int *allocPointer) = scalarDouble; cudaFuncGetAttributes(&funcAttributes[20], scalarDoubleIndexes); cudaFuncGetAttributes(&funcAttributes[21], reduce3Double); cudaFuncGetAttributes(&funcAttributes[22], reduceSimpleGenericXD_0_double); cudaFuncGetAttributes(&funcAttributes[23], pairWiseTransformDouble); cudaFuncGetAttributes(&funcAttributes[24], pairWiseTransformDoubleIndex); cudaFuncGetAttributes(&funcAttributes[25], pairWiseTransformStridedDouble); cudaFuncGetAttributes(&funcAttributes[26], broadcastSimple_0_double); cudaFuncGetAttributes(&funcAttributes[27], indexReduceDouble); cudaFuncGetAttributes(&funcAttributes[32], reduceSimpleGeneric1D_0_double); // 1D cudaFuncGetAttributes(&funcAttributes[33], reduceSimpleGeneric3D_0_double); // 6D cudaFuncGetAttributes(&funcAttributes[34], flattenKernelDouble); cudaFuncGetAttributes(&funcAttributes[35], concatKernelDouble); cudaFuncGetAttributes(&funcAttributes[36], fillDimensionalIsMaxFloat); cudaFuncGetAttributes(&funcAttributes[37], fillDimensionalIsMaxDouble); cudaFuncGetAttributes(&funcAttributes[38], concatKernelScalarFloat); cudaFuncGetAttributes(&funcAttributes[39], concatKernelScalarDouble); cudaFuncGetAttributes(&funcAttributes[40], concatKernelVStackFloat); cudaFuncGetAttributes(&funcAttributes[41], concatKernelVStackDouble); cudaFuncGetAttributes(&funcAttributes[42], concatKernelHStackFloat); cudaFuncGetAttributes(&funcAttributes[43], concatKernelHStackDouble); ///////////////////////// cudaFuncGetAttributes(&funcAttributes[44], averagingKernelHalf); cudaFuncGetAttributes(&funcAttributes[45], averagingKernelFloat); cudaFuncGetAttributes(&funcAttributes[46], averagingKernelDouble); // cudaFuncGetAttributes(&funcAttributes[47], scalarAlongDimension_0_float); cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_float16); cudaFuncGetAttributes(&funcAttributes[48], scalarAlongDimension_0_double); } /** * This method acquires memory chunk of requested size on host side * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param flags optional parameter */ Nd4jPointer NativeOps::mallocHost(Nd4jIndex memorySize, int flags) { Nd4jPointer pointer; // cudaHostAllocMapped |cudaHostAllocPortable cudaError_t res = cudaHostAlloc((void **)&pointer, memorySize, cudaHostAllocDefault); if (res != 0) pointer = 0L; return pointer; } /** * This method acquires memory chunk of requested size on specified device * * @param pointer pointer that'll be used for allocation * @param memorySize memory size, in bytes * @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc * @param flags optional parameter */ Nd4jPointer NativeOps::mallocDevice(Nd4jIndex memorySize, Nd4jPointer ptrToDeviceId, int flags) { Nd4jPointer pointer; cudaError_t res = cudaMalloc((void **)&pointer, memorySize); if (res != 0) pointer = 0L; return pointer; } /** * This method releases previously allocated host memory space * * @param pointer pointer that'll be freed */ int NativeOps::freeHost(Nd4jPointer pointer) { cudaError_t res = cudaFreeHost((void *) pointer); if (res != 0) pointer = 0L; return 1L; } /** * This method releases previously allocated memory space on device * * @param pointer pointer that'll be freed * @param ptrToDeviceId pointer to deviceId. */ int NativeOps::freeDevice(Nd4jPointer pointer, Nd4jPointer ptrToDeviceId) { cudaError_t res = cudaFree((void *)pointer); if (res != 0) pointer = 0L; return 1L; } Nd4jPointer NativeOps::createContext() { return 0L; } Nd4jPointer NativeOps::createStream() { Nd4jPointer nativeStream = 0; cudaError_t result = cudaStreamCreate((cudaStream_t *) &nativeStream); checkCudaErrors(result); if (result != 0) return 0L; else return nativeStream; } Nd4jPointer NativeOps::createEvent() { Nd4jPointer nativeEvent= 0; cudaError_t result = cudaEventCreateWithFlags((cudaEvent_t *) &nativeEvent, cudaEventDisableTiming); checkCudaErrors(result); if (result != 0) return 0L; else return nativeEvent; } int NativeOps::registerEvent(Nd4jPointer event, Nd4jPointer stream) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t result = cudaEventRecord(*pEvent, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::setDevice(Nd4jPointer ptrToDeviceId) { int deviceId = getDeviceId(ptrToDeviceId); cudaError_t result = cudaSetDevice(deviceId); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } Nd4jIndex NativeOps::getDeviceFreeMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jIndex) memFree; } Nd4jIndex NativeOps::getDeviceTotalMemory(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); int orig = -1; cudaGetDevice(&orig); if (device >= 0 && device != orig) { cudaSetDevice(device); } size_t memFree = 0; size_t memTotal = 0; cudaMemGetInfo(&memFree, &memTotal); if (device >= 0 && device != orig) { cudaSetDevice(orig); } return (Nd4jIndex) memTotal; } int NativeOps::memcpy(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { return memcpyAsync(dst, src, size, flags, reserved); } int NativeOps::memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; if (debug) checkCudaErrors(cudaStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; default: { printf("UNDEFINED MEMCPY!\n"); break; } } cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); if (result != 0) { checkCudaErrors(result); printf("Failed on [%lu] -> [%lu], size: [%i], direction: [%i], result: [%i]\n", src, dst, size, flags, (int) result ); fflush(stdout); fflush(stderr); return 0L; } else return 1; } int NativeOps::memset(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaError_t result = cudaMemset((void *) dst, value, (size_t) size); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::memsetAsync(Nd4jPointer dst, int value, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaError_t result = cudaMemsetAsync((void *) dst, value, (size_t) size, *pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::destroyEvent(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t result = cudaEventDestroy(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1; } int NativeOps::streamSynchronize(Nd4jPointer stream) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&stream); cudaError_t result = cudaStreamSynchronize(*pStream); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::eventSynchronize(Nd4jPointer event) { cudaEvent_t *pEvent = reinterpret_cast<cudaEvent_t *>(&event); cudaError_t result = cudaEventSynchronize(*pEvent); checkCudaErrors(result); if (result != 0) return 0L; else return 1L; } int NativeOps::getAvailableDevices() { int devCnt = 0; cudaGetDeviceCount(&devCnt); return devCnt; } void NativeOps::enableDebugMode(bool reallyEnable) { debug = reallyEnable; } void NativeOps::setGridLimit(int gridSize) { if (gridSize > 8192) gridSize = 8192; if (gridSize < 1) gridSize = 1; blockLimit = gridSize; } int NativeOps::ompGetMaxThreads() { return maxThreads; } int NativeOps::ompGetNumThreads() { return maxThreads; } void NativeOps::setOmpNumThreads(int threads) { if (threads > 1024) threads = 1024; if (threads < 32) threads = 32; maxThreads = threads; } void NativeOps::enableVerboseMode(bool reallyEnable) { verbose = reallyEnable; } int NativeOps::getDeviceMajor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].major; } int NativeOps::getDeviceMinor(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].minor; } const char * NativeOps::getDeviceName(Nd4jPointer ptrToDeviceId) { int device = getDeviceId(ptrToDeviceId); return deviceProperties[device].name; } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatFloat( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (debug && verbose) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; concatKernelScalarFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (debug && verbose) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; concatKernelVStackFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (debug && verbose) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; concatKernelHStackFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (debug && verbose) printf("Going generic concat\n"); smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); int *devZOffsets = reinterpret_cast<int *>(extraPointers[11]); concatKernelFloat<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (debug && verbose) printf("sharedMemory requested for concatFloat: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::concatHalf( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (debug && verbose) printf("Going scalar concat\n"); smem = funcAttributes[38].sharedSizeBytes; concatKernelScalarHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (debug && verbose) printf("Going VStack concat\n"); smem = funcAttributes[40].sharedSizeBytes; concatKernelVStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (debug && verbose) printf("Going HStack concat\n"); smem = funcAttributes[42].sharedSizeBytes; concatKernelHStackHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (debug && verbose) printf("Going generic concat\n"); smem = nd4j::math::nd4j_max<int>(funcAttributes[31].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); int *devZOffsets = reinterpret_cast<int *>(extraPointers[11]); concatKernelHalf<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (debug && verbose) printf("sharedMemory requested for concatHalf: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * Concatneate multi array of the same shape together * along a particular dimension */ void NativeOps::concatDouble( Nd4jPointer *extraPointers, int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int **hostShapePointers = reinterpret_cast<int **>(extraPointers[9]); // numArrays will be used as number of TADs, so each block process 1 input int smem = 0; bool isVstack = false; bool isScalar = true; bool isHstack = false; for (int i = 0; i < numArrays; i++) { if (!shape::isScalar(hostShapePointers[i])) { isScalar = false; break; } } if (!isScalar && dimension == 0 && shape::rank(hostXShapeInfo) == 2 && shape::order(hostXShapeInfo) == 'c' ) { isVstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0 || shape::order(hostShapePointers[i]) != 'c') { isVstack = false; break; } } } if (!isScalar && !isVstack && dimension == 1 && shape::isVector(hostXShapeInfo)) { isHstack = true; for (int i = 0; i < numArrays; i++) { if (!shape::isVector(hostShapePointers[i]) || shape::elementWiseStride(hostShapePointers[i]) <= 0) { isHstack = false; break; } } } if (isScalar) { if (debug && verbose) printf("Going scalar concat\n"); smem = funcAttributes[39].sharedSizeBytes; concatKernelScalarDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isVstack) { if (debug && verbose) printf("Going VStack concat\n"); smem = funcAttributes[41].sharedSizeBytes; concatKernelVStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else if (isHstack) { if (debug && verbose) printf("Going HStack concat\n"); smem = funcAttributes[43].sharedSizeBytes; concatKernelHStackDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0]); } else { if (debug && verbose) printf("Going generic concat\n"); smem = nd4j::math::nd4j_max<int>(funcAttributes[35].sharedSizeBytes + 768, 1280); int *devZTadShape = reinterpret_cast<int *>(extraPointers[10]); int *devZOffsets = reinterpret_cast<int *>(extraPointers[11]); concatKernelDouble<<< 128, 128, smem, *stream>>> (dimension, numArrays, (Nd4jPointer *) data[0], (Nd4jPointer *) inputShapeInfo[0], result, resultShapeInfo, (Nd4jPointer *) tadPointers[0], (Nd4jPointer *) offsetPointers[0], devZTadShape, devZOffsets); } if (debug && verbose) printf("sharedMemory requested for concatDouble: [%i], registers: [%i]\n", smem, funcAttributes[31].numRegs); checkCudaErrors(cudaStreamSynchronize(*stream)); } /** * This method saves */ void NativeOps::tadOnlyShapeInfo(int *xShapeInfo, int *dimension, int dimensionLength, int *target, int *offsets) { shape::TAD *tad = new shape::TAD(); tad->init(xShapeInfo, dimension, dimensionLength); //tad->setOutputBuffer(target); tad->createTadOnlyShapeInfo(); tad->createOffsets(); std::memcpy((void *) target, tad->tadOnlyShapeInfo, (tad->tadOnlyShapeInfo[0] * 2 + 4) * sizeof(int)); std::memcpy((void *) offsets, tad->tadOffsets, tad->numTads * sizeof(int)); /* shape::printShapeInfoLinear(hostXShapeInfo); shape::printShapeInfoLinear(tad->tadOnlyShapeInfo); shape::printShapeInfoLinear(target); */ delete tad; } int NativeOps::memcpyConstantAsync(Nd4jIndex dst, Nd4jPointer src, Nd4jIndex size, int flags, Nd4jPointer reserved) { cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(&reserved); cudaMemcpyKind kind; if (debug) checkCudaErrors(cudaStreamSynchronize(*pStream)); switch (flags) { case 0: { kind = cudaMemcpyHostToHost; } break; case 1: { kind = cudaMemcpyHostToDevice; } break; case 2: { kind = cudaMemcpyDeviceToHost; } case 3: { kind = cudaMemcpyDeviceToDevice; } break; } //cudaError_t result = cudaMemcpyAsync((void *) dst, (const void *) src, (size_t) size, kind, *pStream); cudaError_t result = cudaMemcpyToSymbolAsync(deviceConstantMemory, (const void *) src, size, dst, kind, *pStream); checkCudaErrors(result); if (result != 0) { printf("Symbol failed on [%lu] -> [%lu], size: [%i], direction: [%i]\n", src, dst, size, flags ); return 0L; } else return 1; } Nd4jPointer NativeOps::getConstantSpace() { Nd4jPointer dConstAddr; cudaError_t result = cudaGetSymbolAddress((void **)&dConstAddr, deviceConstantMemory); return dConstAddr; } void NativeOps::pullRowsHalf(Nd4jPointer *extraPointers, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, int *tadOffsets, int *zTadShapeInfo, int *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); pullRowsKernelHalf<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::pullRowsFloat(Nd4jPointer *extraPointers, float *x, int *xShapeInfo, float *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, int *tadOffsets, int *zTadShapeInfo, int *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); pullRowsKernelFloat<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::pullRowsDouble(Nd4jPointer *extraPointers, double *x, int *xShapeInfo, double *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, int *tadOffsets, int *zTadShapeInfo, int *zTadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); pullRowsKernelDouble<<<64, 256, 1024, *stream>>>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::averageHalf(Nd4jPointer *extras, Nd4jPointer *dx, float16 *dz, int n, Nd4jIndex length, bool propagate) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); float16 **x = reinterpret_cast<float16 **>(dx); if (debug && verbose) printf("averageHalf called\n"); dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float16), funcAttributes[44]); averagingKernelHalf<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::averageFloat(Nd4jPointer *extras, Nd4jPointer *dx, float *dz, int n, Nd4jIndex length, bool propagate) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); float **x = reinterpret_cast<float **>(dx); if (debug && verbose) printf("averageFloat called\n"); dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(float), funcAttributes[45]); averagingKernelFloat<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::averageDouble(Nd4jPointer *extras, Nd4jPointer *dx, double *dz, int n, Nd4jIndex length, bool propagate) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); double **x = reinterpret_cast<double **>(dx); if (debug && verbose) printf("averageDouble called\n"); dim3 launchDims = getBasicLaunchParams(getDeviceId(extras[2]), length, sizeof(double), funcAttributes[46]); averagingKernelDouble<<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, dz, n, length, propagate); checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::shuffleDouble(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); double **x = reinterpret_cast<double **>(dx); double **z = reinterpret_cast<double **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); int **tadOffset = reinterpret_cast<int **>(tadOffsets); shuffleKernelDouble<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::shuffleFloat(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); float **x = reinterpret_cast<float **>(dx); float **z = reinterpret_cast<float **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); int **tadOffset = reinterpret_cast<int **>(tadOffsets); shuffleKernelFloat<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::shuffleHalf(Nd4jPointer *extras, Nd4jPointer *dx, Nd4jPointer *xShapeInfo, Nd4jPointer *dz, Nd4jPointer *zShapeInfo, int N, int *shuffleMap, Nd4jPointer *tadShapeInfo, Nd4jPointer *tadOffsets) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); float16 **x = reinterpret_cast<float16 **>(dx); float16 **z = reinterpret_cast<float16 **>(dz); int **xShape = reinterpret_cast<int **>(xShapeInfo); int **zShape = reinterpret_cast<int **>(zShapeInfo); int **tadOnlyShapeInfo = reinterpret_cast<int **>(tadShapeInfo); int **tadOffset = reinterpret_cast<int **>(tadOffsets); shuffleKernelHalf<<<32, 128, 1024, *stream>>>(x, xShape, z, zShape, N, shuffleMap, tadOnlyShapeInfo, tadOffset); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int xStride, float *dy, int yStride, float *dz, int zStride, float *extraA, float *extraB, float scalarA, float scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // metaPredicateStridedFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int xStride, double *dy, int yStride, double *dz, int zStride, double *extraA, double *extraB, double scalarA, double scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // metaPredicateStridedDouble<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateStridedHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int xStride, float16 *dy, int yStride, float16 *dz, int zStride, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // metaPredicateStridedHalf<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalarA, scalarB); float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseStrided_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xStride, dy, yStride, dz, zStride, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateReduceFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *dimension, int dimensionLength, int *tadShapeInfo, int *tadOffsets, float *extraA, float *extraB, float scalarA, float scalarB, bool scalarReturned) { // no-op cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); /* metaPredicateReduceFloat(const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, int *tadShapeInfo, int *tadOffsets, float *reductionBuffer, float *extraA, float *extraB, float scalarA, float scalarB) { */ // metaPredicateReduceFloat<<<256, 256, 1024, *stream>>>(opTypeA, opNumA, opTypeB, opNumB, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, dimension, dimensionLength, tadShapeInfo, tadOffsets, nullptr, extraA, extraB, scalarA, scalarB, scalarReturned); if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeDouble(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, double *dx, int *xShapeInfo, double *dy, int *yShapeInfo, double *dz, int *zShapeInfo, double *extraA, double *extraB, double scalarA, double scalarB) { // no-op; cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), double, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeHalf(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float16 *dx, int *xShapeInfo, float16 *dy, int *yShapeInfo, float16 *dz, int *zShapeInfo, float16 *extraA, float16 *extraB, float scalarA, float scalarB) { // no-op; cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); // we have to converf float -> fp16 prior to kernel call float16 scalA = (float16) scalarA; float16 scalB = (float16) scalarB; if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalA, scalB), float16, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execMetaPredicateShapeFloat(Nd4jPointer *extras, const int opTypeA, const int opNumA, const int opTypeB, const int opNumB, long N, float *dx, int *xShapeInfo, float *dy, int *yShapeInfo, float *dz, int *zShapeInfo, float *extraA, float *extraB, float scalarA, float scalarB) { // no-op; cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extras[1]); if (opTypeA == 2) { if (opTypeB == 0) { DISPATCH_METAOP(invertedMetaPairwiseShaped_Pairwise_Scalar, PARAMS(opTypeA, opTypeB, N, dx, xShapeInfo, dy, yShapeInfo, dz, zShapeInfo, extraA, extraB, scalarA, scalarB), float, OPS_A(PAIRWISE_TRANSFORM_OPS), OPS_B(SCALAR_OPS)); } } if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } bool NativeOps::isExperimentalEnabled() { return experimentalSupport; } void NativeOps::setOmpMinThreads(int threads) { minThreads = nd4j::math::nd4j_max<int>(32, threads); minThreads = nd4j::math::nd4j_min<int>(maxThreads, minThreads); } int NativeOps::getDevice() { int curDevice = -1; cudaGetDevice(&curDevice); return curDevice; } void NativeOps::setElementThreshold(int num) { // this is no-op for CUDA } void NativeOps::setTADThreshold(int num) { // this is no-op for CUDA } void NativeOps::execScalarFloat(Nd4jPointer *extraPointers,int opNum, float *x, int *xShapeInfo, float *z, int *zShapeInfo, float *scalars, float *extraParams, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int *hostXShapeInfo = reinterpret_cast<int *>(extraPointers[0]); int *hostTadShapeInfo = reinterpret_cast<int *>(extraPointers[9]); int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *tadOffsets = reinterpret_cast<int *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *tadOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); dim3 launchDims = getReduceLaunchParams(getDeviceId(extraPointers[2]),hostXShapeInfo, hostTadShapeInfo, funcAttributes[47] ,dimensionLength, sizeof(float), 0); // this macro builds bunch of IF/ELSE selectors for kernel launc h DISPATCH_SIMPLE(scalarAlongDimension, float, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarDouble(Nd4jPointer *extraPointers,int opNum, double *x, int *xShapeInfo, double *z, int *zShapeInfo, double *scalars, double *extraParams, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *tadOffsets = reinterpret_cast<int *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *tadOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarAlongDimension, double, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execScalarHalf(Nd4jPointer *extraPointers,int opNum, float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, float16 *scalars, float16 *extraParams, int *dimension, int dimensionLength) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(256, 256, 1024); int *tadShapeInfo = reinterpret_cast<int *>(extraPointers[10]); int *tadOffsets = reinterpret_cast<int *>(extraPointers[11]); int *tadShapeInfoZ = reinterpret_cast<int *>(extraPointers[12]); int *tadOffsetsZ = reinterpret_cast<int *>(extraPointers[13]); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(scalarAlongDimension, float16, PARAMS(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), OPS_A(SCALAR_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateFloat(Nd4jPointer *extraPointers,int opNum, float **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float *realArguments, int numRealArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateDouble(Nd4jPointer *extraPointers,int opNum, double **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, double *realArguments, int numRealArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, double, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateHalf(Nd4jPointer *extraPointers,int opNum, float16 **arguments, int numArguments, int **shapes, int numShapes, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, float16 *realArguments, int numRealArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numBlocks, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateSimple, float16, PARAMS(arguments, numArguments, shapes, numShapes, indexArguments, numIndexArguments, intArrays, numIntArrays, realArguments, numRealArguments), OPS_A(AGGREGATE_OPS)) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchFloat(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchDouble(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, double, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execAggregateBatchHalf(Nd4jPointer *extraPointers, int numAggregates, int opNum, int maxArgs, int maxShapes, int maxIntArrays, int maxIntArraySize, int maxIdx, int maxReals, void *ptrToArguments) { // not implemented yet cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); int numBlocks = getDeviceId(extraPointers[2]); int numThreads = getDeviceId(extraPointers[3]); int shmem = getDeviceId(extraPointers[4]); dim3 launchDims = dim3(numAggregates, numThreads, shmem); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(aggregateBatchSimple, float16, PARAMS(numAggregates, opNum, maxArgs, maxShapes, maxIntArrays, maxIntArraySize, maxIdx, maxReals, ptrToArguments), OPS_A(AGGREGATE_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *z, int *zShapeBuffer, float *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (stateHost); Nd4jPointer state = buffer->getDevicePointer(); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float, PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *y, int *yShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (stateHost); Nd4jPointer state = buffer->getDevicePointer(); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float, PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomFloat(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost, float *x, int *xShapeBuffer, float *z, int *zShapeBuffer, float *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float)) ); nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (stateHost); Nd4jPointer state = buffer->getDevicePointer(); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float, PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *z, int *zShapeBuffer, double *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, double, PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *y, int *yShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, double, PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomDouble(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, double *x, int *xShapeBuffer, double *z, int *zShapeBuffer, double *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(double))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, double, PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *z, int *zShapeBuffer, float16 *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomSingle, float16, PARAMS(state, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *y, int *yShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomTriple, float16, PARAMS(state, x, xShapeBuffer, y, yShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } void NativeOps::execRandomHalf(Nd4jPointer *extraPointers, int opNum, Nd4jPointer state, float16 *x, int *xShapeBuffer, float16 *z, int *zShapeBuffer, float16 *extraArguments) { cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); dim3 launchDims = dim3(512, 512, sizeof(nd4j::random::RandomBuffer) + (560 * sizeof(float16))); // this macro builds bunch of IF/ELSE selectors for kernel launch DISPATCH_SIMPLE(randomDouble, float16, PARAMS(state, x, xShapeBuffer, z, zShapeBuffer, extraArguments), OPS_A(RANDOM_OPS)) if (debug) checkCudaErrors(cudaStreamSynchronize(*stream)); } Nd4jPointer NativeOps::initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) { unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); // we don't synchronize at random initialization, it's safe to go unsync here // cudaStreamSynchronize(*stream); unsigned long long *ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer); nd4j::random::RandomBuffer *buffer = new nd4j::random::RandomBuffer(seed, bufferSize, (uint64_t *) ptrHost, (uint64_t *) ptrDev); buffer->propagateToDevice(buffer, *stream); checkCudaErrors(cudaStreamSynchronize(*stream)); // we generate sequence in the host memory nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // and copy it to gpu cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream); return buffer; } void NativeOps::destroyRandom(Nd4jPointer ptrBuffer) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrBuffer); // FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice cudaDeviceSynchronize(); delete buffer; } void NativeOps::refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); uint64_t *ptrDev = buffer->getDeviceBuffer(); // update rng state buffer->setSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); // refresh buffer on host size nd4j::random::Xoroshiro128 generator(buffer); generator.refreshBuffer(); // copy back to gpu cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream); } void NativeOps::reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) { nd4j::random::RandomBuffer *buffer = reinterpret_cast<nd4j::random::RandomBuffer *> (ptrRandom); cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(&extraPointers[1]); cudaStreamSynchronize(*stream); // update rng state buffer->reSeed(seed); buffer->setOffset(0); buffer->propagateToDevice(buffer, *stream); }
269e255f6d02a3eb2a2758053e002ebe6e81ef09.hip
// !!! This is a file automatically generated by hipify!!! #include "Test.h" void testPyramidLevels(int w, int h, float **d_iPyRef, float **d_iPyCrr, float **d_dPyRef, float **d_dPyCrr, float *d_res, int showLvl, bool showCur, int showType) { // calculate width and height of the image to show (level dependent) size_t n_w = w; size_t n_h = h; for (int i = 0; i<showLvl; i++) { n_w = (n_w+1)/2; n_h = (n_h+1)/2; } // initialize image output for testing cv::Mat pyLvlOut(n_h, n_w, CV_32FC1); // select image from pyramids depending on args and show it switch (showType) { case 0: { if (showCur) {hipMemcpy((void*)pyLvlOut.data, d_iPyCrr[showLvl], n_w*n_h*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;} else {hipMemcpy((void*)pyLvlOut.data, d_iPyRef[showLvl], n_w*n_h*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;} break; } case 1: { if (showCur) {hipMemcpy((void*)pyLvlOut.data, d_dPyCrr[showLvl], n_w*n_h*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;} else {hipMemcpy((void*)pyLvlOut.data, d_dPyRef[showLvl], n_w*n_h*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK;} break; } case 2: { hipMemcpy((void*)pyLvlOut.data, d_res, n_w*n_h*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK; break; } default: { break; } } showImage("PyramidLvl", pyLvlOut, w+110, 100); cv::waitKey(0); // cv::imwrite("residual.png",pyLvlOut*255.f); } void testJacobian(float *d_J, int lvl, int w, int h) { for (int i=0; i<lvl; i++) { w = (w+1)/2; h = (h+1)/2; } float *j1 = new float[w*h]; float *j2 = new float[w*h]; float *j3 = new float[w*h]; float *j4 = new float[w*h]; float *j5 = new float[w*h]; float *j6 = new float[w*h]; float *j = new float[w*h*6]; hipMemcpy(j, d_J, w*h*6*sizeof(float), hipMemcpyDeviceToHost); CUDA_CHECK; hipDeviceSynchronize(); for (int i=0; i<w*h; i++) { j1[i] = j[i*6]; j2[i] = j[i*6+1]; j3[i] = j[i*6+2]; j4[i] = j[i*6+3]; j5[i] = j[i*6+4]; j6[i] = j[i*6+5]; } cv::Mat pyLvlOut1(h, w, CV_32FC1); cv::Mat pyLvlOut2(h, w, CV_32FC1); cv::Mat pyLvlOut3(h, w, CV_32FC1); cv::Mat pyLvlOut4(h, w, CV_32FC1); cv::Mat pyLvlOut5(h, w, CV_32FC1); cv::Mat pyLvlOut6(h, w, CV_32FC1); convert_layered_to_mat(pyLvlOut1, j1); convert_layered_to_mat(pyLvlOut2, j2); convert_layered_to_mat(pyLvlOut3, j3); convert_layered_to_mat(pyLvlOut4, j4); convert_layered_to_mat(pyLvlOut5, j5); convert_layered_to_mat(pyLvlOut6, j6); showImage("Jacobian lvl 5", pyLvlOut6, 1680-w, h+63); showImage("Jacobian lvl 4", pyLvlOut5, 1680-w, 24); showImage("Jacobian lvl 3", pyLvlOut4, 840-w/2, h+63); showImage("Jacobian lvl 2", pyLvlOut3, 840-w/2, 24); showImage("Jacobian lvl 1", pyLvlOut2, 65, h+63); showImage("Jacobian lvl 0", pyLvlOut1, 65, 24); cv::waitKey(0); delete[] j1; delete[] j2; delete[] j3; delete[] j4; delete[] j5; delete[] j6; }
269e255f6d02a3eb2a2758053e002ebe6e81ef09.cu
#include "Test.h" void testPyramidLevels(int w, int h, float **d_iPyRef, float **d_iPyCrr, float **d_dPyRef, float **d_dPyCrr, float *d_res, int showLvl, bool showCur, int showType) { // calculate width and height of the image to show (level dependent) size_t n_w = w; size_t n_h = h; for (int i = 0; i<showLvl; i++) { n_w = (n_w+1)/2; n_h = (n_h+1)/2; } // initialize image output for testing cv::Mat pyLvlOut(n_h, n_w, CV_32FC1); // select image from pyramids depending on args and show it switch (showType) { case 0: { if (showCur) {cudaMemcpy((void*)pyLvlOut.data, d_iPyCrr[showLvl], n_w*n_h*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;} else {cudaMemcpy((void*)pyLvlOut.data, d_iPyRef[showLvl], n_w*n_h*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;} break; } case 1: { if (showCur) {cudaMemcpy((void*)pyLvlOut.data, d_dPyCrr[showLvl], n_w*n_h*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;} else {cudaMemcpy((void*)pyLvlOut.data, d_dPyRef[showLvl], n_w*n_h*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;} break; } case 2: { cudaMemcpy((void*)pyLvlOut.data, d_res, n_w*n_h*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK; break; } default: { break; } } showImage("PyramidLvl", pyLvlOut, w+110, 100); cv::waitKey(0); // cv::imwrite("residual.png",pyLvlOut*255.f); } void testJacobian(float *d_J, int lvl, int w, int h) { for (int i=0; i<lvl; i++) { w = (w+1)/2; h = (h+1)/2; } float *j1 = new float[w*h]; float *j2 = new float[w*h]; float *j3 = new float[w*h]; float *j4 = new float[w*h]; float *j5 = new float[w*h]; float *j6 = new float[w*h]; float *j = new float[w*h*6]; cudaMemcpy(j, d_J, w*h*6*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK; cudaDeviceSynchronize(); for (int i=0; i<w*h; i++) { j1[i] = j[i*6]; j2[i] = j[i*6+1]; j3[i] = j[i*6+2]; j4[i] = j[i*6+3]; j5[i] = j[i*6+4]; j6[i] = j[i*6+5]; } cv::Mat pyLvlOut1(h, w, CV_32FC1); cv::Mat pyLvlOut2(h, w, CV_32FC1); cv::Mat pyLvlOut3(h, w, CV_32FC1); cv::Mat pyLvlOut4(h, w, CV_32FC1); cv::Mat pyLvlOut5(h, w, CV_32FC1); cv::Mat pyLvlOut6(h, w, CV_32FC1); convert_layered_to_mat(pyLvlOut1, j1); convert_layered_to_mat(pyLvlOut2, j2); convert_layered_to_mat(pyLvlOut3, j3); convert_layered_to_mat(pyLvlOut4, j4); convert_layered_to_mat(pyLvlOut5, j5); convert_layered_to_mat(pyLvlOut6, j6); showImage("Jacobian lvl 5", pyLvlOut6, 1680-w, h+63); showImage("Jacobian lvl 4", pyLvlOut5, 1680-w, 24); showImage("Jacobian lvl 3", pyLvlOut4, 840-w/2, h+63); showImage("Jacobian lvl 2", pyLvlOut3, 840-w/2, 24); showImage("Jacobian lvl 1", pyLvlOut2, 65, h+63); showImage("Jacobian lvl 0", pyLvlOut1, 65, 24); cv::waitKey(0); delete[] j1; delete[] j2; delete[] j3; delete[] j4; delete[] j5; delete[] j6; }
409c48c2bafbfb3b06702f09125a1eb0cf4e2c8a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void concatCuda(const int numOfArrs, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) { __shared__ int arrIdx, blocksPerArr; __shared__ T *x, *z; __shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLen, arrLenPerBlock, start, end; if (threadIdx.x == 0) { blocksPerArr = (gridDim.x + numOfArrs - 1) / numOfArrs; // ceil arrIdx = blockIdx.x / blocksPerArr; x = reinterpret_cast<T*>(reinterpret_cast<void**>(pVx)[arrIdx]); z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[arrIdx]); xShapeInfo = reinterpret_cast<Nd4jLong**>(pxShapeInfo)[arrIdx]; zShapeInfo = reinterpret_cast<Nd4jLong**>(pzShapeInfo)[arrIdx]; arrLen = shape::length(xShapeInfo); arrLenPerBlock = (arrLen + blocksPerArr - 1) / blocksPerArr; // ceil start = (blockIdx.x % blocksPerArr) * arrLenPerBlock; end = (start + arrLenPerBlock) > arrLen ? arrLen : (start + arrLenPerBlock); } __syncthreads(); for (Nd4jLong i = start + threadIdx.x; i < end; i += blockDim.x) z[shape::getIndexOffset(i, zShapeInfo, arrLen)] = x[shape::getIndexOffset(i, xShapeInfo, arrLen)]; } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void concatCudaLauncher(const int numOfArrs, const hipStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) { hipLaunchKernelGGL(( concatCuda<T>), dim3(512), dim3(256), 1024, *stream, numOfArrs, pVx, pxShapeInfo, pVz, pzShapeInfo); } BUILD_SINGLE_TEMPLATE(template void concatCudaLauncher, (const int numOfArrs, const hipStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// // x - input, y - paddings, z - output template<typename X, typename Y> __global__ static void padCuda(const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void *vPadVal) { const X padVal = *reinterpret_cast<const X*>(vPadVal); const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank, rankMinusOne; __shared__ Nd4jLong zLen, yLen, totalThreads, *coords, *xShape, *zShape, *xStride, *zStride, shift1, shift2, yStride0; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)); zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)); xStride = shape::stride(const_cast<Nd4jLong*>(xShapeInfo)); zStride = shape::stride(const_cast<Nd4jLong*>(zShapeInfo)); yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0]; rank = shape::rank(xShapeInfo); zLen = shape::length(zShapeInfo); yLen = 2 * rank; rankMinusOne = rank - 1; totalThreads = gridDim.x * blockDim.x; shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC } __syncthreads(); auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(mode == 0) { // CONSTANT case for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(rank, zShape, i, zLen, xzCoord); const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank); bool within = true; for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)]; if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;} else {xzCoord[j] = xzCoord[j] - left;} } if(within) z[zOffset] = x[shape::getOffset(0, xShape, xStride, xzCoord, rank)]; else z[zOffset] = padVal; } } else { // REFLECT and SYMMETRIC cases for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(rank, zShape, i, zLen, xzCoord); const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank); for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)]; // are ready to fill middle (within input dimension range) if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right } const auto xOffset = shape::getOffset(0, xShape, xStride, xzCoord, rank); z[zOffset] = x[xOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* padVal) { hipLaunchKernelGGL(( padCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal); } BUILD_DOUBLE_TEMPLATE(template void padCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* vPadVal), LIBND4J_TYPES, INTEGER_TYPES); /////////////////////////////////////////////////////////////////// void pad(nd4j::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) { PointersManager manager(context, "pad"); NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue}); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128; const auto xType = input.dataType(); const auto yType = paddings.dataType(); BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.getSpecialBuffer(), input.getSpecialShapeInfo(), paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), padValue.getSpecialBuffer()), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len); const Nd4jLong index = x[xOffset]; const auto zOffset = shape::getIndexOffset(index, zShapeInfo, len); z[zOffset] = i; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { hipLaunchKernelGGL(( invertPermutationCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo); } BUILD_SINGLE_TEMPLATE(template void invertPermutationCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo), LIBND4J_TYPES); //////////////////////////////////////////////////////////////////////// void invertPermutation(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "invertPermutation"); NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ T* sharedMem; __shared__ int xRank, zRank; // xRank = zRank + 2 __shared__ Nd4jLong xLen, zLen, *coordsMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<T*>(shmem); coordsMem = reinterpret_cast<Nd4jLong*>(shmem + blockDim.x * sizeof(T)); xRank = shape::rank(xShapeInfo); zRank = shape::rank(zShapeInfo); xLen = shape::length(xShapeInfo); zLen = shape::length(zShapeInfo); // corresponds to number of matrices } __syncthreads(); Nd4jLong* coords = coordsMem + threadIdx.x * xRank; for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), m, zLen, coords); const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank); sharedMem[threadIdx.x] = 0; for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) { coords[zRank] = coords[zRank + 1] = i; const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank); sharedMem[threadIdx.x] += x[xOffset]; } __syncthreads(); // aggregate sum for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[zOffset] = *sharedMem; } } /////////////////////////////////////////////////////////////////// template<typename T> static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint diagLen) { hipLaunchKernelGGL(( traceCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diagLen); } BUILD_SINGLE_TEMPLATE(template void traceCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void trace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) { PointersManager manager(context, "trace"); const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * (sizeof(Nd4jLong) * input.rankOf() + input.sizeOfT()) + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int rank, areSameOffsets; // xRank = zRank __shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen = zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); rank = shape::rank(xShapeInfo); len = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { shape::index2coords(rank, zShapeInfo + 1, i, len, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col z[zOffset] = 0; else z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { hipLaunchKernelGGL(( triuBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diag); } BUILD_SINGLE_TEMPLATE(template void triuBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void triuBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) { const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * gradO.rankOf() + 128; PointersManager manager(context, "triuBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int xRank, zRank; // xRank >= zRank __shared__ Nd4jLong numOfXOffsets, zLen, totalThreads, *sharedMem; // xLen >= zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); xRank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); numOfXOffsets = shape::length(xShapeInfo) / zLen; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto memBuff = sharedMem + threadIdx.x * 2 * xRank; auto xOffsets = globMem + tid * numOfXOffsets; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const auto zOffset = shape::getIndexOffset(i, zShapeInfo, zLen); shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff); z[zOffset] = x[xOffsets[0]]; // first offset for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets z[zOffset] += x[xOffsets[j]]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { hipLaunchKernelGGL(( tileBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, globMem); } BUILD_SINGLE_TEMPLATE(template void tileBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem), FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// void tileBP(nd4j::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) { NDArray memBuff('c', gradO.getShapeAsVector(), nd4j::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 2 * gradO.rankOf() + 128; PointersManager manager(context, "tileBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff}); BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfInd; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX); const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { hipLaunchKernelGGL(( scatterUpdateCuda<T>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfInd, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(nd4j::LaunchContext* context, NDArray& input, NDArray& updates, const std::vector<int>* intArgs) { const int opCode = (*intArgs)[0]; const int numOfDims = (*intArgs)[1]; const int numOfInd = (*intArgs)[2 + numOfDims]; std::vector<int> tadDimensions(numOfDims); for (int e = 2; e < 2 + numOfDims; e++) tadDimensions[e-2] = (*intArgs)[e]; auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), tadDimensions); auto packY = ConstantTadHelper::getInstance()->tadForDimensions(updates.getShapeInfo(), tadDimensions); NDArray indices(const_cast<int*>(intArgs->data()) + numOfDims + 3, 'c', {numOfInd}, nd4j::DataType::INT32, context); PointersManager manager(context, "scatterUpdate"); NDArray::prepareSpecialUse({&input}, {&input, &updates, &indices}); BUILD_SINGLE_SELECTOR(input.dataType(), scatterUpdateCudaLauncher, (context->getCudaStream(), opCode, numOfInd, input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), updates.specialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), reinterpret_cast<int*>(indices.getSpecialBuffer())), LIBND4J_TYPES); NDArray::registerSpecialUse({&input}, {&input, &updates, &indices}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// // x - input, y - indices, z - output template<typename X, typename Y> __global__ static void gatherNDCuda(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int xRank, yRank, zRank, maxRank, yLastDim; __shared__ Nd4jLong zLen, totalThreads, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); xRank = shape::rank(xShapeInfo); yRank = shape::rank(yShapeInfo); zRank = shape::rank(zShapeInfo); maxRank = nd4j::math::nd4j_max<int>(yRank, nd4j::math::nd4j_max<int>(xRank, zRank)); zLen = shape::length(zShapeInfo); yLastDim = yShapeInfo[yRank]; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coord = sharedMem + threadIdx.x * maxRank; Nd4jLong *zCoordStart, *xCoordStart; if(yLastDim == xRank) { zCoordStart = coord; xCoordStart = coord; } if(zRank >= xRank) { zCoordStart = coord; xCoordStart = coord + zRank - xRank; } else { zCoordStart = coord + xRank - zRank; xCoordStart = coord; } const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(zRank, zShapeInfo + 1, i, zLen, zCoordStart); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + zRank + 1, zCoordStart, zRank); // last y coordinate int coordToRestore; if(yLastDim != xRank) coordToRestore = static_cast<int>(zCoordStart[yRank - 1]); zCoordStart[yRank - 1] = 0; // last y coordinate const auto yOffset = shape::getOffset(0, yShapeInfo + 1, yShapeInfo + yRank + 1, zCoordStart, yRank); //restore z coordinate if(yLastDim != xRank) zCoordStart[yRank - 1] = coordToRestore; // construct coordinates for x for(uint j = 0; j < yLastDim; ++j) xCoordStart[j] = y[yOffset + j * yShapeInfo[2 * yRank]]; // last stride const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + xRank + 1, xCoordStart, xRank); z[zOffset] = x[xOffset]; } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void gatherNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { hipLaunchKernelGGL(( gatherNDCuda<X,Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } BUILD_DOUBLE_TEMPLATE(template void gatherNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo), LIBND4J_TYPES, INTEGER_TYPES); /////////////////////////////////////////////////////////////////// void gatherND(nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) { const int maxRank = nd4j::math::nd4j_max<int>(indices.rankOf(), nd4j::math::nd4j_max<int>(input.rankOf(), output.rankOf())); const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * maxRank + 128; const auto xType = input.dataType(); const auto yType = indices.dataType(); PointersManager manager(context, "gatherND"); NDArray::prepareSpecialUse({&output}, {&input, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, gatherNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), indices.getSpecialBuffer(), indices.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&output}, {&input, &indices}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// // x - input, y - gradO, z - gradI template<typename X, typename Z> __global__ static void clipByNormBPWholeArrCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= shape::length(zShapeInfo)) return; const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Z*>(vy); auto z = reinterpret_cast<Z*>(vz); auto reducBuff = reinterpret_cast<Z*>(vreducBuff); uint* count = reinterpret_cast<uint*>(vreducBuff) + 16384; __shared__ Z* shMem; __shared__ Nd4jLong len; __shared__ bool amIinLastBlock; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shMem = reinterpret_cast<Z*>(shmem); len = shape::length(zShapeInfo); // xLen = yLen = zLen } __syncthreads(); // fill shared memory with array elements const auto xVal = x[shape::getIndexOffset(tid, xShapeInfo, len)]; const auto yVal = y[shape::getIndexOffset(tid, yShapeInfo, len)]; shMem[2*threadIdx.x] = static_cast<Z>(xVal * xVal); // for norm shMem[2*threadIdx.x + 1] = static_cast<Z>(xVal * yVal); // for input * gradO __syncthreads(); // accumulate sum per block for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads && tid + activeThreads < len) { shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)]; shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1]; } __syncthreads(); } // store accumulated sums in reduction buffer (reducBuff) if (threadIdx.x == 0) { reducBuff[2*blockIdx.x] = shMem[0]; reducBuff[2*blockIdx.x + 1] = shMem[1]; __threadfence(); amIinLastBlock = gridDim.x == 1 || (atomicInc(count, gridDim.x) == gridDim.x - 1); } __syncthreads(); // shared memory of last block is used for final summation of values stored in reduction buffer if (amIinLastBlock) { for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { shMem[2*threadIdx.x] = (i == threadIdx.x ) ? reducBuff[2*i] : reducBuff[2*i] + shMem[2*threadIdx.x]; shMem[2*threadIdx.x + 1] = (i == threadIdx.x ) ? reducBuff[2*i + 1] : reducBuff[2*i + 1] + shMem[2*threadIdx.x + 1]; } __syncthreads(); // accumulate sum for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < gridDim.x) { shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)]; shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1]; } __syncthreads(); } if (threadIdx.x == 0) { reducBuff[0] = math::nd4j_sqrt<Z,Z>(shMem[0]); reducBuff[1] = shMem[1]; count = 0; } } } ////////////////////////////////////////////////////////////////////////// // x - input, y - gradO, z - gradI template<typename X, typename Z> __global__ static void clipByNormBPCalcGradCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const Nd4jLong len = shape::length(zShapeInfo); // xLen = yLen = zLen if(tid >= len) return; const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Z*>(vy); auto z = reinterpret_cast<Z*>(vz); __shared__ Z norm, sumOfProd; if (threadIdx.x == 0) { norm = reinterpret_cast<Z*>(vreducBuff)[0]; sumOfProd = reinterpret_cast<Z*>(vreducBuff)[1]; } __syncthreads(); const auto yOffset = shape::getIndexOffset(tid, yShapeInfo, len); const auto zOffset = shape::getIndexOffset(tid, zShapeInfo, len); if(norm > clipNormVal) { const auto xOffset = shape::getIndexOffset(tid, xShapeInfo, len); const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm) z[zOffset] = clipNormVal * (factor1 * y[yOffset] - factor2 * sumOfProd * x[xOffset]); } else { z[zOffset] = y[yOffset]; } } ////////////////////////////////////////////////////////////////////////// // x - input, y - gradO, z - gradI template<typename X, typename Z> __global__ static void clipByNormBPTadsCuda(const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const void* vy, const Nd4jLong* yTadShapeInfo, const Nd4jLong* yTadOffsets, void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const Z clipNormVal) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Z*>(vy); auto z = reinterpret_cast<Z*>(vz); __shared__ Z* shMem; __shared__ Nd4jLong tadLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shMem = reinterpret_cast<Z*>(shmem); tadLen = shape::length(zTadShapeInfo); // xTadLen = yTadLen = zTadLen } __syncthreads(); const auto* xTad = x + xTadOffsets[blockIdx.x]; const auto* yTad = y + yTadOffsets[blockIdx.x]; auto* zTad = z + zTadOffsets[blockIdx.x]; // *** FIRST STAGE - ACCUMULATE REQUIRED SUMS *** // Z norm = 0; Z sumOfProd = 0; for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen); const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen); shMem[2*threadIdx.x] = static_cast<Z>(xTad[xOffset] * xTad[xOffset]); // for norm shMem[2*threadIdx.x + 1] = static_cast<Z>(xTad[xOffset] * yTad[yOffset]); // for input * gradO __syncthreads(); // accumulate sum per block for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads && i + activeThreads < tadLen) { shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)]; shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1]; } __syncthreads(); } norm += shMem[0]; sumOfProd += shMem[1]; } // *** SECOND STAGE - GRADIENT CALCULATION *** // norm = math::nd4j_sqrt<Z,Z>(norm); for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) { const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen); const auto zOffset = shape::getIndexOffset(i, zTadShapeInfo, tadLen); if(norm > clipNormVal) { const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen); const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm) zTad[zOffset] = clipNormVal * (factor1 * yTad[yOffset] - factor2 * sumOfProd * xTad[xOffset]); } else { zTad[zOffset] = yTad[yOffset]; } } } ////////////////////////////////////////////////////////////////////////// template<typename X, typename Z> static void clipByNormBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, const void* vy, const Nd4jLong* yShapeInfo, const Nd4jLong* yTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, void* vreducBuff, const double clipNormVal) { if(xTadOffsets == nullptr) { // means whole array hipLaunchKernelGGL(( clipByNormBPWholeArrCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal)); hipLaunchKernelGGL(( clipByNormBPCalcGradCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal)); } else // means tads using hipLaunchKernelGGL(( clipByNormBPTadsCuda<X,Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, xTadOffsets, vy, yShapeInfo, yTadOffsets, vz, zShapeInfo, zTadOffsets, static_cast<Z>(clipNormVal)); } BUILD_DOUBLE_TEMPLATE(template void clipByNormBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const Nd4jLong* xTadOffsets, const void *vy, const Nd4jLong *yShapeInfo, const Nd4jLong* yTadOffsets, void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, void* vreducBuff, const double clipNormVal), LIBND4J_TYPES, FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// void clipByNormBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) { PointersManager manager(context, "clipByNormBP"); const double clipNormVal = clipNorm.e<double>(0); const auto xType = input.dataType(); const auto zType = gradI.dataType(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int sharedMem = threadsPerBlock * 2 * input.sizeOfT() + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); if(dimensions.empty() || dimensions.size() == input.rankOf()) { // means whole array const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), nullptr, gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), nullptr, gradI.getSpecialBuffer(), gradI.getSpecialShapeInfo(), nullptr, context->getReductionPointer(), clipNormVal), LIBND4J_TYPES, FLOAT_TYPES); } else { // means tads using auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions); auto packY = ConstantTadHelper::getInstance()->tadForDimensions(gradO.getShapeInfo(), dimensions); auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), dimensions); const int blocksPerGrid = packX.numberOfTads(); BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradO.getSpecialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), gradI.getSpecialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), nullptr, clipNormVal), LIBND4J_TYPES, FLOAT_TYPES); } NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } template <typename T> static __global__ void swapShuffleKernel(T* input, Nd4jLong* shape, Nd4jLong firstDim, Nd4jLong len, nd4j::graph::RandomGenerator* rng) { auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; if (i != r) { T e0 = input[shape::getIndexOffset(i, shape, len)]; T e1 = input[shape::getIndexOffset(r, shape, len)]; //math::nd4j_swap<T>(input(i), input(r)); input[shape::getIndexOffset(i, shape, len)] = e1; input[shape::getIndexOffset(r, shape, len)] = e0; } } } template <typename T> static __global__ void fillShuffleKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong firstDim, Nd4jLong len, int* indices, nd4j::graph::RandomGenerator* rng) { // PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold()) auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; output[shape::getIndexOffset(i, outputShape, len)] = input[shape::getIndexOffset(indices[r], inputShape, len)]; if(i != r) { output[shape::getIndexOffset(r, outputShape, len)] = input[shape::getIndexOffset(indices[i], inputShape, len)]; // output.p(r, input.e<T>(indices[i])); // math::nd4j_swap<int>(indices[i], indices[r]); atomicExch(&indices[i], indices[r]); } } } ////////////////////////////////////////////////////////////////////////// template <typename T> void randomShuffle_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) { // check edge cases first int temp; const int firstDim = input.sizeAt(0); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input}); if(input.lengthOf() == 1 || firstDim == 1) { if(!isInplace) output.assign(input); } else if (input.isVector() || shape::isLikeVector(input.getShapeInfo(), temp)) { // apply Fisher-Yates shuffle nd4j::graph::RandomGenerator* dRandom = nullptr; hipMalloc(&dRandom, sizeof(nd4j::graph::RandomGenerator)); hipMemcpy(dRandom, &rng, sizeof(nd4j::graph::RandomGenerator), hipMemcpyHostToDevice); T* inputBuf = reinterpret_cast<T*>(input.specialBuffer()); if(isInplace) { hipLaunchKernelGGL(( swapShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), firstDim, input.lengthOf(), dRandom); } else { std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); hipMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), hipMemcpyDeviceToDevice); //output.p<T>(Nd4jLong(0), input.e<T>(0)); PointersManager pointersManager(context, "helper::randomShuffle_"); int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int))); T* outputBuf = reinterpret_cast<T*>(output.specialBuffer()); hipLaunchKernelGGL(( fillShuffleKernel<T>), dim3(128), dim3(256), 1024, *stream, inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, input.lengthOf(), indicesDev, dRandom); pointersManager.synchronize(); } // rng.rewindH(firstDim - 1); hipFree(dRandom); } else { // evaluate sub-arrays list of input array through all dimensions excluding first one std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0}); auto subArrsListIn = input.allTensorsAlongDimension(dimensions); // apply Fisher-Yates shuffle if(isInplace) { PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->elementwiseThreshold()) for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; if(i != r) subArrsListIn->at(i)->swapUnsafe(*subArrsListIn->at(r)); } } else { // evaluate sub-arrays list of output array through all dimensions excluding first one auto subArrsListOut = output.allTensorsAlongDimension(dimensions); std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); bool isZeroShuffled = false; PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold()) for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; subArrsListOut->at(i)->assign(subArrsListIn->at(indices[r])); if(r == 0) isZeroShuffled = true; if(i != r) { subArrsListOut->at(r)->assign(subArrsListIn->at(indices[i])); math::nd4j_swap<int>(indices[i], indices[r]); } } if(!isZeroShuffled) subArrsListOut->at(0)->assign(subArrsListIn->at(0)); delete subArrsListOut; } rng.rewindH(firstDim-1); delete subArrsListIn; } NDArray::registerSpecialUse({&output}, {&input}); } void randomShuffle(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) { BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES); ////////////////////////////////////////////////////////////////////////// void eye(nd4j::LaunchContext * context, NDArray& output) { output.setIdentity(); } ////////////////////////////////////////////////////////////////////////// template <typename T, typename Z> static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<Z*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T mVal = -DataTypeUtils::max<T>(); Z mIdx(0); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); auto val = x[shape::getIndexOffset(e, xShape, length)];; if (mVal < val) mIdx = static_cast<Z>(e); } __syncthreads(); output[shape::getIndexOffset(e, outputShape, length)] = mIdx; } } template <typename T, typename Z> static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeMaxIndex"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); hipLaunchKernelGGL(( global_mergeMaxIndex_<T,Z>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INTEGER_TYPES); } BUILD_DOUBLE_TEMPLATE(template void mergeMaxIndex_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T mVal = -DataTypeUtils::max<T>(); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); auto val = x[shape::getIndexOffset(e, xShape, length)];; if (mVal < val) mVal = val; } __syncthreads(); output[shape::getIndexOffset(e, outputShape, length)] = mVal; } } template<typename T> static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeMax"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); hipLaunchKernelGGL(( global_mergeMax_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } BUILD_SINGLE_TEMPLATE(template void mergeMax_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES); void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T sum(0.0f); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); sum += x[shape::getIndexOffset(e, xShape, length)]; } output[shape::getIndexOffset(e, outputShape, length)] = sum / numArrays; } } template<typename T> static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeAvg"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); hipLaunchKernelGGL(( global_mergeAvg_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } BUILD_SINGLE_TEMPLATE(template void mergeAvg_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES); void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), LIBND4J_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T sum(0.0f); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); sum += x[shape::getIndexOffset(e, xShape, length)]; } output[shape::getIndexOffset(e, outputShape, length)] = sum; } } template<typename T> static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeAdd"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); hipLaunchKernelGGL(( global_mergeAdd_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES); void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), LIBND4J_TYPES); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void clipByNormInplaceKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) { for (int arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) { __shared__ T* z; __shared__ Nd4jLong len; if (threadIdx.x == 0) { len = shape::length(shape); z = inputBuffer + inputOffsets[arr]; } __syncthreads(); for (int j = threadIdx.x; j < len; j+= blockDim.x) { auto xIndex = shape::getIndexOffset(j, shape, len); if(norm2Buf[arr] > clipNorm) z[xIndex] *= clipNorm / norm2Buf[arr]; // case with ews = 1 and ordering is 'c' } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void clipByNormKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* outputBuffer, Nd4jLong* outputShape, Nd4jLong* outputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) { for (Nd4jLong arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) { __shared__ T* x, *z; __shared__ Nd4jLong lenX, lenZ; __shared__ T norm2; if (threadIdx.x == 0) { lenX = shape::length(shape); x = inputBuffer + inputOffsets[arr]; z = outputBuffer + outputOffsets[arr]; lenZ = shape::length(outputShape); norm2 = norm2Buf[shape::getIndexOffset(arr, norm2shape, numOfSubArrs)]; //printf("%d: %lf (vs %lf) %lld %lld\n", arr, norm2, clipNorm, lenX, lenZ); } __syncthreads(); for (Nd4jLong j = threadIdx.x; j < lenZ; j+= blockDim.x) { auto xIndex = shape::getIndexOffset(j, shape, lenX); auto zIndex = shape::getIndexOffset(j, outputShape, lenZ); if(norm2 > clipNorm) { z[zIndex] = x[xIndex] * clipNorm / norm2; // case with ews = 1 and ordering is 'c' } else { z[zIndex] = x[xIndex]; } //printf("%lld: %lf %lf\n", j, z[zIndex], x[xIndex]); } __syncthreads(); } } ////////////////////////////////////////////////////////////////////////// template<typename T> static void clipByNorm_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, NDArray const& clipNormA, const bool isInplace) { const int rank = input.rankOf(); auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions); clipNormA.syncToHost(); //norm2.printBuffer("Norm2"); T const clipNorm = clipNormA.e<T>(0); //clipNormA.printBuffer("ClipNorm"); auto stream = context->getCudaStream(); if (isInplace) { if(norm2.lengthOf() == 1) { norm2.syncToHost(); T norm2Val = norm2.e<T>(0); if(norm2Val > clipNorm) input *= clipNorm / norm2Val; } else { std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions); //auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimsToExclude); T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer()); T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer()); hipLaunchKernelGGL(( clipByNormInplaceKernel<T>), dim3(256), dim3(512), 1024, *stream, numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm); } } else { if(norm2.lengthOf() == 1) { norm2.syncToHost(); T norm2Val = norm2.e<T>(0); if(norm2Val > clipNorm) output.assign( input * (clipNorm / norm2Val)); else output.assign( input ); } else { std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimensions); T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer()); T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer()); T* outputBuffer = reinterpret_cast<T*>(output.specialBuffer()); hipLaunchKernelGGL(( clipByNormKernel<T>), dim3(256), dim3(512), 1024, *stream, numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), outputBuffer, packZ.specialShapeInfo(), packZ.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm); } } } void clipByNorm(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) { BUILD_SINGLE_SELECTOR(output.dataType(), clipByNorm_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByNorm_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES); template <typename T> static void clipByGlobalNorm_(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) { } void clipByGlobalNorm(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) { BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// template<typename T> static void clipByAveraged_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) { auto cn = clipNorm.e<T>(0); if (dimensions.size() == 0) { // all-reduce T n2 = input.reduceNumber(reduce::Norm2).e<T>(0) / input.lengthOf(); if (n2 <= cn) { if (!isInplace) output.assign(input); } else { const T factor = cn / n2; //auto lambda = LAMBDA_T(_x, factor) { return _x * factor; }; //input.applyLambda<T>(lambda, &output); output.assign(input * factor); } } else { // along dimension auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions, false); if (!isInplace) output.assign(input); auto tads = output.allTensorsAlongDimension(dimensions); auto outTads = output.allTensorsAlongDimension(dimensions); // TODO: make this CUDA-compliant somehow for (int e = 0; e < tads->size(); e++) { T n2 = norm2.e<T>(e) / tads->at(e)->lengthOf(); const T factor = cn / n2; if (n2 > cn) { //auto lambda = LAMBDA_T(_x, factor) {return _x * factor;}; tads->at(e)->applyScalar(scalar::Multiply, factor, outTads->at(e));//applyLambda<T>(lambda, &output); } } delete tads; delete outTads; } } void clipByAveraged(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) { BUILD_SINGLE_SELECTOR(input.dataType(), clipByAveraged_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByAveraged_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES); /* if (d1 > params[1]) return params[1]; else if (d1 < params[0]) return params[0]; else return d1; */ template <typename T> static void __global__ clipByValueKernel(void* input, Nd4jLong* inputShape, void* output, Nd4jLong* outputShape, double leftBound, double rightBound) { __shared__ T* outputBuf; __shared__ T* inputBuf; __shared__ Nd4jLong length; __shared__ bool linearBuffers; if (threadIdx.x == 0) { outputBuf = reinterpret_cast<T *>(output); inputBuf = reinterpret_cast<T *>(input); length = shape::length(inputShape); linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1; } __syncthreads(); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { if (linearBuffers) { if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound; else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound; else outputBuf[e] = inputBuf[e]; } else { auto inputOffset = shape::getIndexOffset(e, inputShape, length); auto outputOffset = shape::getIndexOffset(e, outputShape, length); if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound; else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound; else outputBuf[outputOffset] = inputBuf[outputOffset]; } } } template <typename T> static void clipByValue_(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) { auto stream = context->getCudaStream(); if (!input.isActualOnDeviceSide()) input.syncToDevice(); NDArray::prepareSpecialUse({&output}, {&input}); hipLaunchKernelGGL(( clipByValueKernel<T>), dim3(256), dim3(512), 8192, *stream, input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound); NDArray::registerSpecialUse({&output}, {&input}); } void clipByValue(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) { BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByValue_, (nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mirrorPadLinearKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) { __shared__ T const* x; __shared__ T* z; if (threadIdx.x == 0) { x = reinterpret_cast<T const*>(vx); z = reinterpret_cast<T*>(vz); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for(int i = start; i < zLen; i+= step) { auto zIndex = shape::getIndexOffset(i, zShape, zLen); auto xIndex = shape::getIndexOffset(len - i, xShape, xLen); if (i < leftSide) // left side xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape, xLen); else if(i >= leftSide && i < leftSide + xLen) // middle xIndex = shape::getIndexOffset(i - leftSide, xShape, xLen); // else // right side // z[i] = x[len - i]; z[zIndex] = x[xIndex]; } } template <typename F, typename I> static __global__ void mirrorPadKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, Nd4jLong* paddingShape, int reflBorder) { __shared__ F const* x; __shared__ I const* pads; __shared__ F* z; __shared__ Nd4jLong zRank, rank; __shared__ Nd4jLong* xShapeOf, *xStrideOf, *padsShapeOf, *padsStrideOf; __shared__ Nd4jLong* zShapeOf, *zStrideOf; __shared__ Nd4jLong* xIdx; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; xIdx = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(xShape); x = reinterpret_cast<F const*>(vx);// pads = reinterpret_cast<I const*>(paddings); z = reinterpret_cast<F*>(vz); xShapeOf = shape::shapeOf(xShape); xStrideOf = shape::stride(xShape); zShapeOf = shape::shapeOf(zShape); zRank = shape::rank(zShape); zStrideOf = shape::stride(zShape); padsShapeOf = shape::shapeOf(paddingShape); padsStrideOf = shape::stride(paddingShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(Nd4jLong i = start; i < outLen; i+= step) { auto xzCoord = xIdx + threadIdx.x * rank; //auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank; shape::index2coords(rank, zShapeOf, i, xzCoord); auto outOffset = shape::getOffset(0, zShapeOf, zStrideOf, xzCoord, rank); // auto intStep = blockDim.y * gridDim.y; for(int j = 0; j < rank; j++) { const Nd4jLong inLen = shape::sizeAt(xShape, j); Nd4jLong coords[2] = {j, 0}; auto padOffset = shape::getOffset(0, padsShapeOf, padsStrideOf, coords, 2); // padding already has rank 2 const auto leftSide = pads[padOffset]; const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder; if(xzCoord[j] < leftSide) // left side xzCoord[j] = leftSideCorrected - xzCoord[j]; else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle xzCoord[j] = xzCoord[j] - leftSide; else if (len > xzCoord[j]) // right side xzCoord[j] = len - xzCoord[j]; else xzCoord[j] = xzCoord[j] - len; } auto inOffset = shape::getOffset(0, xShapeOf, xStrideOf, xzCoord, rank); z[outOffset] = x[inOffset]; } } template<typename F, typename I> static void mirrorPad_(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { // mode: 0 - REFLECT, else - SYMMETRIC const int reflBorder = (bool)mode ? 1 : 0; const int rank = input.rankOf(); const Nd4jLong outLen = output.lengthOf(); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input, &paddings}); if(rank <= 1) { const Nd4jLong inLen = input.lengthOf(); const auto leftSide = paddings.e<Nd4jLong>(0); const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder; hipLaunchKernelGGL(( mirrorPadLinearKernel<F>), dim3(256), dim3(512), 256, *stream, input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen); nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed"); } else { hipLaunchKernelGGL(( mirrorPadKernel<F, I>), dim3(256), dim3(256), 8192, *stream, input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), reflBorder); nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed"); } NDArray::registerSpecialUse({&output}, {&input, &paddings}); } void mirrorPad(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INTEGER_TYPES); } BUILD_DOUBLE_TEMPLATE(template void mirrorPad_, (nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// void concat(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output, const int axis) { const int numOfArrs = inArrs.size(); for(int i = 0; i < numOfArrs; ++i) if(!inArrs[i]->isActualOnDeviceSide()) inArrs[i]->syncToDevice(); const int rank = inArrs[0]->rankOf(); const int rank2 = 2*rank; std::vector<std::vector<Nd4jLong>> indices(numOfArrs, std::vector<Nd4jLong>(rank2,0)); // take into account indices for first array indices[0][2 * axis + 1] = inArrs[0]->sizeAt(axis); // loop through the rest of input arrays for(int i = 1; i < numOfArrs; ++i) { indices[i][2 * axis] = indices[i-1][2 * axis + 1]; // index start from indices[i][2 * axis + 1] = indices[i-1][2 * axis + 1] + inArrs[i]->sizeAt(axis); // index end with (excluding) } std::vector<NDArray*> outSubArrs(numOfArrs); for(int i = 0; i < numOfArrs; ++i) outSubArrs[i] = new NDArray(output(indices[i], true)); // prepare arrays of pointers on buffers and shapes std::vector<void*> hOutBuffers(numOfArrs), hInBuffers(numOfArrs); std::vector<Nd4jLong*> hOutShapeInfo(numOfArrs), hInShapeInfo(numOfArrs); for(int i = 0; i < numOfArrs; ++i) { hOutBuffers[i] = outSubArrs[i]->getSpecialBuffer(); hInBuffers[i] = inArrs[i]->getSpecialBuffer(); hOutShapeInfo[i] = outSubArrs[i]->getSpecialShapeInfo(); hInShapeInfo[i] = inArrs[i]->getSpecialShapeInfo(); } // allocate and copy all buffers and shapes arrays to global memory PointersManager manager(context, "helpers::concat"); void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*)); void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*)); void* dOutShapeInfo = manager.replicatePointer(hOutShapeInfo.data(), hOutShapeInfo.size() * sizeof(Nd4jLong*)); BUILD_SINGLE_SELECTOR(inArrs[0]->dataType(), concatCudaLauncher, (numOfArrs, context->getCudaStream(), dInBuffers, dInShapeInfo, dOutBuffers, dOutShapeInfo), LIBND4J_TYPES); manager.synchronize(); for(int i = 0; i < numOfArrs; ++i) delete outSubArrs[i]; for(int i = 0; i < numOfArrs; ++i) inArrs[i]->tickReadHost(); output.tickWriteDevice(); } template <typename X, typename Y> static _CUDA_G void scatterSimpleKernel(void *vx, Nd4jLong *xTadShape, Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, void *vi, Nd4jLong *iShapeInfo, Nd4jLong iLength, void *vu, Nd4jLong *uShapeInfo, Nd4jLong uLength) { auto u = reinterpret_cast<X*>(vu); auto indices = reinterpret_cast<Y*>(vi); auto tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i]; auto idx = indices[shape::getIndexOffset(i, iShapeInfo, iLength)]; x[shape::getIndexOffset(idx, xTadShape, xLength)] = u[shape::getIndexOffset(i, uShapeInfo, uLength)]; } } template <typename X, typename Y> void scatterSimple_(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dims); auto xLength = shape::length(packX.primaryShapeInfo()); auto iLength = indices.lengthOf(); auto uLength = updates.lengthOf(); hipLaunchKernelGGL(( scatterSimpleKernel<X,Y>), dim3(256), dim3(256), 1024, *context->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.getSpecialBuffer(), indices.getSpecialShapeInfo(), iLength, updates.getSpecialBuffer(), updates.getSpecialShapeInfo(), uLength); } void scatterSimple(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto xType = input.dataType(); auto yType = indices.dataType(); if (opId != 6) throw std::runtime_error("scatterSimple: only copy op is supported"); NDArray::prepareSpecialUse({&input}, {&updates, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&input}, {&updates, &indices}); } } } }
409c48c2bafbfb3b06702f09125a1eb0cf4e2c8a.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void concatCuda(const int numOfArrs, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) { __shared__ int arrIdx, blocksPerArr; __shared__ T *x, *z; __shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLen, arrLenPerBlock, start, end; if (threadIdx.x == 0) { blocksPerArr = (gridDim.x + numOfArrs - 1) / numOfArrs; // ceil arrIdx = blockIdx.x / blocksPerArr; x = reinterpret_cast<T*>(reinterpret_cast<void**>(pVx)[arrIdx]); z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[arrIdx]); xShapeInfo = reinterpret_cast<Nd4jLong**>(pxShapeInfo)[arrIdx]; zShapeInfo = reinterpret_cast<Nd4jLong**>(pzShapeInfo)[arrIdx]; arrLen = shape::length(xShapeInfo); arrLenPerBlock = (arrLen + blocksPerArr - 1) / blocksPerArr; // ceil start = (blockIdx.x % blocksPerArr) * arrLenPerBlock; end = (start + arrLenPerBlock) > arrLen ? arrLen : (start + arrLenPerBlock); } __syncthreads(); for (Nd4jLong i = start + threadIdx.x; i < end; i += blockDim.x) z[shape::getIndexOffset(i, zShapeInfo, arrLen)] = x[shape::getIndexOffset(i, xShapeInfo, arrLen)]; } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void concatCudaLauncher(const int numOfArrs, const cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo) { concatCuda<T><<<512, 256, 1024, *stream>>>(numOfArrs, pVx, pxShapeInfo, pVz, pzShapeInfo); } BUILD_SINGLE_TEMPLATE(template void concatCudaLauncher, (const int numOfArrs, const cudaStream_t *stream, void* pVx, void* pxShapeInfo, void* pVz, void* pzShapeInfo), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// // x - input, y - paddings, z - output template<typename X, typename Y> __global__ static void padCuda(const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void *vPadVal) { const X padVal = *reinterpret_cast<const X*>(vPadVal); const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int rank, rankMinusOne; __shared__ Nd4jLong zLen, yLen, totalThreads, *coords, *xShape, *zShape, *xStride, *zStride, shift1, shift2, yStride0; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; coords = reinterpret_cast<Nd4jLong*>(shmem); zLen = shape::length(zShapeInfo); xShape = shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)); zShape = shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)); xStride = shape::stride(const_cast<Nd4jLong*>(xShapeInfo)); zStride = shape::stride(const_cast<Nd4jLong*>(zShapeInfo)); yStride0 = shape::stride(const_cast<Nd4jLong*>(yShapeInfo))[0]; rank = shape::rank(xShapeInfo); zLen = shape::length(zShapeInfo); yLen = 2 * rank; rankMinusOne = rank - 1; totalThreads = gridDim.x * blockDim.x; shift1 = mode == 1 ? 0 : 1; // REFLECT : SYMMETRIC shift2 = mode == 1 ? 2 : 1; // REFLECT : SYMMETRIC } __syncthreads(); auto xzCoord = coords + threadIdx.x * rank; // we use xzCoord storage both for x and z arrays const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(mode == 0) { // CONSTANT case for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(rank, zShape, i, zLen, xzCoord); const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank); bool within = true; for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; const auto left = y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)]; if(xzCoord[j] < left || xzCoord[j] >= left + xShape[j]) {within = false; break;} else {xzCoord[j] = xzCoord[j] - left;} } if(within) z[zOffset] = x[shape::getOffset(0, xShape, xStride, xzCoord, rank)]; else z[zOffset] = padVal; } } else { // REFLECT and SYMMETRIC cases for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(rank, zShape, i, zLen, xzCoord); const auto zOffset = shape::getOffset(0, zShape, zStride, xzCoord, rank); for(int j = rankMinusOne; j >= 0; --j) { if(xShape[j] == zShape[j]) continue; xzCoord[j] = xzCoord[j] - y[shape::getIndexOffset(yStride0 * j, yShapeInfo, yLen)]; // are ready to fill middle (within input dimension range) if(xzCoord[j] < 0) xzCoord[j] = -xzCoord[j] - shift1; // means fill from left else if(xzCoord[j] >= xShape[j]) xzCoord[j] = 2 * xShape[j] - xzCoord[j] - shift2; // means fill from right } const auto xOffset = shape::getOffset(0, xShape, xStride, xzCoord, rank); z[zOffset] = x[xOffset]; } } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void padCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* padVal) { padCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(mode, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, padVal); } BUILD_DOUBLE_TEMPLATE(template void padCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const int mode, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const void* vPadVal), LIBND4J_TYPES, INTEGER_TYPES); /////////////////////////////////////////////////////////////////// void pad(nd4j::LaunchContext * context, const int mode, const NDArray& input, const NDArray& paddings, NDArray& output, const NDArray& padValue) { PointersManager manager(context, "pad"); NDArray::prepareSpecialUse({&output}, {&input, &paddings, &padValue}); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * output.rankOf() + 128; const auto xType = input.dataType(); const auto yType = paddings.dataType(); BUILD_DOUBLE_SELECTOR(xType, yType, padCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), mode, input.getSpecialBuffer(), input.getSpecialShapeInfo(), paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), padValue.getSpecialBuffer()), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&output}, {&input, &paddings, &padValue}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, len); const Nd4jLong index = x[xOffset]; const auto zOffset = shape::getIndexOffset(index, zShapeInfo, len); z[zOffset] = i; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { invertPermutationCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo); } BUILD_SINGLE_TEMPLATE(template void invertPermutationCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo), LIBND4J_TYPES); //////////////////////////////////////////////////////////////////////// void invertPermutation(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "invertPermutation"); NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ T* sharedMem; __shared__ int xRank, zRank; // xRank = zRank + 2 __shared__ Nd4jLong xLen, zLen, *coordsMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<T*>(shmem); coordsMem = reinterpret_cast<Nd4jLong*>(shmem + blockDim.x * sizeof(T)); xRank = shape::rank(xShapeInfo); zRank = shape::rank(zShapeInfo); xLen = shape::length(xShapeInfo); zLen = shape::length(zShapeInfo); // corresponds to number of matrices } __syncthreads(); Nd4jLong* coords = coordsMem + threadIdx.x * xRank; for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix shape::index2coords(zRank, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), m, zLen, coords); const auto zOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(zShapeInfo)), shape::stride(const_cast<Nd4jLong*>(zShapeInfo)), coords, zRank); sharedMem[threadIdx.x] = 0; for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) { coords[zRank] = coords[zRank + 1] = i; const auto xOffset = shape::getOffset(0, shape::shapeOf(const_cast<Nd4jLong*>(xShapeInfo)), shape::stride(const_cast<Nd4jLong*>(xShapeInfo)), coords, xRank); sharedMem[threadIdx.x] += x[xOffset]; } __syncthreads(); // aggregate sum for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[zOffset] = *sharedMem; } } /////////////////////////////////////////////////////////////////// template<typename T> static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint diagLen) { traceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diagLen); } BUILD_SINGLE_TEMPLATE(template void traceCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void trace(nd4j::LaunchContext* context, const NDArray& input, NDArray& output) { PointersManager manager(context, "trace"); const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2); const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * (sizeof(Nd4jLong) * input.rankOf() + input.sizeOfT()) + 128; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int rank, areSameOffsets; // xRank = zRank __shared__ Nd4jLong len, totalThreads, *sharedMem; // xLen = zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); rank = shape::rank(xShapeInfo); len = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coords = sharedMem + threadIdx.x * rank; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { shape::index2coords(rank, zShapeInfo + 1, i, len, coords); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + rank + 1, coords, rank); if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col z[zOffset] = 0; else z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(0, xShapeInfo + 1, xShapeInfo + rank + 1, coords, rank)]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { triuBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diag); } BUILD_SINGLE_TEMPLATE(template void triuBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// void triuBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) { const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * gradO.rankOf() + 128; PointersManager manager(context, "triuBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int xRank, zRank; // xRank >= zRank __shared__ Nd4jLong numOfXOffsets, zLen, totalThreads, *sharedMem; // xLen >= zLen if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); xRank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); numOfXOffsets = shape::length(xShapeInfo) / zLen; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; auto memBuff = sharedMem + threadIdx.x * 2 * xRank; auto xOffsets = globMem + tid * numOfXOffsets; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const auto zOffset = shape::getIndexOffset(i, zShapeInfo, zLen); shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff); z[zOffset] = x[xOffsets[0]]; // first offset for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets z[zOffset] += x[xOffsets[j]]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { tileBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, globMem); } BUILD_SINGLE_TEMPLATE(template void tileBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem), FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// void tileBP(nd4j::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) { NDArray memBuff('c', gradO.getShapeAsVector(), nd4j::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(Nd4jLong) * 2 * gradO.rankOf() + 128; PointersManager manager(context, "tileBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff}); BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void scatterUpdateCuda(const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { __shared__ T *x, *y; __shared__ Nd4jLong arrLenX, arrLenY; for (int e = 0; e < numOfInd; e++ ) { const auto xIndex = indexes[e]; const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x; if (!isOwner) continue; if (threadIdx.x == 0) { x = reinterpret_cast<T*>(vx) + xOffsets[xIndex]; y = reinterpret_cast<T*>(vy) + yOffsets[e]; arrLenX = shape::length(xShapeInfo); arrLenY = shape::length(yShapeInfo); } __syncthreads(); if (arrLenX != arrLenY) return; for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo, arrLenX); const auto yOffset = shape::getIndexOffset(i, yShapeInfo, arrLenY); switch (opCode) { case 0: x[xOffset] += y[yOffset]; break; case 1: x[xOffset] -= y[yOffset]; break; case 2: x[xOffset] *= y[yOffset]; break; case 3: x[xOffset] /= y[yOffset]; break; case 4: x[xOffset] = y[yOffset] - x[xOffset]; break; case 5: x[xOffset] = y[yOffset] / x[xOffset]; break; case 6: x[xOffset] = y[yOffset]; break; default: continue; } } __syncthreads(); } } template<typename T> __host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfInd, void* vx, const Nd4jLong *xShapeInfo, const Nd4jLong *xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const int* indexes) { scatterUpdateCuda<T><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfInd, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes); } ////////////////////////////////////////////////////////////////////////// void scatterUpdate(nd4j::LaunchContext* context, NDArray& input, NDArray& updates, const std::vector<int>* intArgs) { const int opCode = (*intArgs)[0]; const int numOfDims = (*intArgs)[1]; const int numOfInd = (*intArgs)[2 + numOfDims]; std::vector<int> tadDimensions(numOfDims); for (int e = 2; e < 2 + numOfDims; e++) tadDimensions[e-2] = (*intArgs)[e]; auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), tadDimensions); auto packY = ConstantTadHelper::getInstance()->tadForDimensions(updates.getShapeInfo(), tadDimensions); NDArray indices(const_cast<int*>(intArgs->data()) + numOfDims + 3, 'c', {numOfInd}, nd4j::DataType::INT32, context); PointersManager manager(context, "scatterUpdate"); NDArray::prepareSpecialUse({&input}, {&input, &updates, &indices}); BUILD_SINGLE_SELECTOR(input.dataType(), scatterUpdateCudaLauncher, (context->getCudaStream(), opCode, numOfInd, input.specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), updates.specialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), reinterpret_cast<int*>(indices.getSpecialBuffer())), LIBND4J_TYPES); NDArray::registerSpecialUse({&input}, {&input, &updates, &indices}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// // x - input, y - indices, z - output template<typename X, typename Y> __global__ static void gatherNDCuda(const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Y*>(vy); auto z = reinterpret_cast<X*>(vz); __shared__ int xRank, yRank, zRank, maxRank, yLastDim; __shared__ Nd4jLong zLen, totalThreads, *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); xRank = shape::rank(xShapeInfo); yRank = shape::rank(yShapeInfo); zRank = shape::rank(zShapeInfo); maxRank = nd4j::math::nd4j_max<int>(yRank, nd4j::math::nd4j_max<int>(xRank, zRank)); zLen = shape::length(zShapeInfo); yLastDim = yShapeInfo[yRank]; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); auto coord = sharedMem + threadIdx.x * maxRank; Nd4jLong *zCoordStart, *xCoordStart; if(yLastDim == xRank) { zCoordStart = coord; xCoordStart = coord; } if(zRank >= xRank) { zCoordStart = coord; xCoordStart = coord + zRank - xRank; } else { zCoordStart = coord + xRank - zRank; xCoordStart = coord; } const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { shape::index2coords(zRank, zShapeInfo + 1, i, zLen, zCoordStart); const auto zOffset = shape::getOffset(0, zShapeInfo + 1, zShapeInfo + zRank + 1, zCoordStart, zRank); // last y coordinate int coordToRestore; if(yLastDim != xRank) coordToRestore = static_cast<int>(zCoordStart[yRank - 1]); zCoordStart[yRank - 1] = 0; // last y coordinate const auto yOffset = shape::getOffset(0, yShapeInfo + 1, yShapeInfo + yRank + 1, zCoordStart, yRank); //restore z coordinate if(yLastDim != xRank) zCoordStart[yRank - 1] = coordToRestore; // construct coordinates for x for(uint j = 0; j < yLastDim; ++j) xCoordStart[j] = y[yOffset + j * yShapeInfo[2 * yRank]]; // last stride const auto xOffset = shape::getOffset(0, xShapeInfo + 1, xShapeInfo + xRank + 1, xCoordStart, xRank); z[zOffset] = x[xOffset]; } } /////////////////////////////////////////////////////////////////// template<typename X, typename Y> static void gatherNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo) { gatherNDCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } BUILD_DOUBLE_TEMPLATE(template void gatherNDCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz, const Nd4jLong *zShapeInfo), LIBND4J_TYPES, INTEGER_TYPES); /////////////////////////////////////////////////////////////////// void gatherND(nd4j::LaunchContext * context, NDArray& input, NDArray& indices, NDArray& output) { const int maxRank = nd4j::math::nd4j_max<int>(indices.rankOf(), nd4j::math::nd4j_max<int>(input.rankOf(), output.rankOf())); const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 8 * threadsPerBlock * maxRank + 128; const auto xType = input.dataType(); const auto yType = indices.dataType(); PointersManager manager(context, "gatherND"); NDArray::prepareSpecialUse({&output}, {&input, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, gatherNDCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), indices.getSpecialBuffer(), indices.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&output}, {&input, &indices}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// // x - input, y - gradO, z - gradI template<typename X, typename Z> __global__ static void clipByNormBPWholeArrCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= shape::length(zShapeInfo)) return; const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Z*>(vy); auto z = reinterpret_cast<Z*>(vz); auto reducBuff = reinterpret_cast<Z*>(vreducBuff); uint* count = reinterpret_cast<uint*>(vreducBuff) + 16384; __shared__ Z* shMem; __shared__ Nd4jLong len; __shared__ bool amIinLastBlock; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shMem = reinterpret_cast<Z*>(shmem); len = shape::length(zShapeInfo); // xLen = yLen = zLen } __syncthreads(); // fill shared memory with array elements const auto xVal = x[shape::getIndexOffset(tid, xShapeInfo, len)]; const auto yVal = y[shape::getIndexOffset(tid, yShapeInfo, len)]; shMem[2*threadIdx.x] = static_cast<Z>(xVal * xVal); // for norm shMem[2*threadIdx.x + 1] = static_cast<Z>(xVal * yVal); // for input * gradO __syncthreads(); // accumulate sum per block for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads && tid + activeThreads < len) { shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)]; shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1]; } __syncthreads(); } // store accumulated sums in reduction buffer (reducBuff) if (threadIdx.x == 0) { reducBuff[2*blockIdx.x] = shMem[0]; reducBuff[2*blockIdx.x + 1] = shMem[1]; __threadfence(); amIinLastBlock = gridDim.x == 1 || (atomicInc(count, gridDim.x) == gridDim.x - 1); } __syncthreads(); // shared memory of last block is used for final summation of values stored in reduction buffer if (amIinLastBlock) { for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { shMem[2*threadIdx.x] = (i == threadIdx.x ) ? reducBuff[2*i] : reducBuff[2*i] + shMem[2*threadIdx.x]; shMem[2*threadIdx.x + 1] = (i == threadIdx.x ) ? reducBuff[2*i + 1] : reducBuff[2*i + 1] + shMem[2*threadIdx.x + 1]; } __syncthreads(); // accumulate sum for (int activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads && threadIdx.x + activeThreads < gridDim.x) { shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)]; shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1]; } __syncthreads(); } if (threadIdx.x == 0) { reducBuff[0] = math::nd4j_sqrt<Z,Z>(shMem[0]); reducBuff[1] = shMem[1]; count = 0; } } } ////////////////////////////////////////////////////////////////////////// // x - input, y - gradO, z - gradI template<typename X, typename Z> __global__ static void clipByNormBPCalcGradCuda(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, void* vreducBuff, const Z clipNormVal) { const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const Nd4jLong len = shape::length(zShapeInfo); // xLen = yLen = zLen if(tid >= len) return; const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Z*>(vy); auto z = reinterpret_cast<Z*>(vz); __shared__ Z norm, sumOfProd; if (threadIdx.x == 0) { norm = reinterpret_cast<Z*>(vreducBuff)[0]; sumOfProd = reinterpret_cast<Z*>(vreducBuff)[1]; } __syncthreads(); const auto yOffset = shape::getIndexOffset(tid, yShapeInfo, len); const auto zOffset = shape::getIndexOffset(tid, zShapeInfo, len); if(norm > clipNormVal) { const auto xOffset = shape::getIndexOffset(tid, xShapeInfo, len); const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm) z[zOffset] = clipNormVal * (factor1 * y[yOffset] - factor2 * sumOfProd * x[xOffset]); } else { z[zOffset] = y[yOffset]; } } ////////////////////////////////////////////////////////////////////////// // x - input, y - gradO, z - gradI template<typename X, typename Z> __global__ static void clipByNormBPTadsCuda(const void* vx, const Nd4jLong* xTadShapeInfo, const Nd4jLong* xTadOffsets, const void* vy, const Nd4jLong* yTadShapeInfo, const Nd4jLong* yTadOffsets, void* vz, const Nd4jLong* zTadShapeInfo, const Nd4jLong* zTadOffsets, const Z clipNormVal) { const auto x = reinterpret_cast<const X*>(vx); const auto y = reinterpret_cast<const Z*>(vy); auto z = reinterpret_cast<Z*>(vz); __shared__ Z* shMem; __shared__ Nd4jLong tadLen; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; shMem = reinterpret_cast<Z*>(shmem); tadLen = shape::length(zTadShapeInfo); // xTadLen = yTadLen = zTadLen } __syncthreads(); const auto* xTad = x + xTadOffsets[blockIdx.x]; const auto* yTad = y + yTadOffsets[blockIdx.x]; auto* zTad = z + zTadOffsets[blockIdx.x]; // *** FIRST STAGE - ACCUMULATE REQUIRED SUMS *** // Z norm = 0; Z sumOfProd = 0; for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) { const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen); const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen); shMem[2*threadIdx.x] = static_cast<Z>(xTad[xOffset] * xTad[xOffset]); // for norm shMem[2*threadIdx.x + 1] = static_cast<Z>(xTad[xOffset] * yTad[yOffset]); // for input * gradO __syncthreads(); // accumulate sum per block for (uint activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads && i + activeThreads < tadLen) { shMem[2*threadIdx.x] += shMem[2*(threadIdx.x + activeThreads)]; shMem[2*threadIdx.x + 1] += shMem[2*(threadIdx.x + activeThreads) + 1]; } __syncthreads(); } norm += shMem[0]; sumOfProd += shMem[1]; } // *** SECOND STAGE - GRADIENT CALCULATION *** // norm = math::nd4j_sqrt<Z,Z>(norm); for (uint i = threadIdx.x; i < tadLen; i += blockDim.x) { const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo, tadLen); const auto zOffset = shape::getIndexOffset(i, zTadShapeInfo, tadLen); if(norm > clipNormVal) { const auto xOffset = shape::getIndexOffset(i, xTadShapeInfo, tadLen); const Z factor1 = static_cast<Z>(1) / norm; // 1 / norm const Z factor2 = factor1 / (norm * norm); // 1 / (norm * norm * norm) zTad[zOffset] = clipNormVal * (factor1 * yTad[yOffset] - factor2 * sumOfProd * xTad[xOffset]); } else { zTad[zOffset] = yTad[yOffset]; } } } ////////////////////////////////////////////////////////////////////////// template<typename X, typename Z> static void clipByNormBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, const void* vy, const Nd4jLong* yShapeInfo, const Nd4jLong* yTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, void* vreducBuff, const double clipNormVal) { if(xTadOffsets == nullptr) { // means whole array clipByNormBPWholeArrCuda<X,Z><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal)); clipByNormBPCalcGradCuda<X,Z><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo, vreducBuff, static_cast<Z>(clipNormVal)); } else // means tads using clipByNormBPTadsCuda<X,Z><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, xTadOffsets, vy, yShapeInfo, yTadOffsets, vz, zShapeInfo, zTadOffsets, static_cast<Z>(clipNormVal)); } BUILD_DOUBLE_TEMPLATE(template void clipByNormBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const Nd4jLong* xTadOffsets, const void *vy, const Nd4jLong *yShapeInfo, const Nd4jLong* yTadOffsets, void *vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, void* vreducBuff, const double clipNormVal), LIBND4J_TYPES, FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// void clipByNormBP(nd4j::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI /*output*/, const std::vector<int>& dimensions, const NDArray& clipNorm) { PointersManager manager(context, "clipByNormBP"); const double clipNormVal = clipNorm.e<double>(0); const auto xType = input.dataType(); const auto zType = gradI.dataType(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int sharedMem = threadsPerBlock * 2 * input.sizeOfT() + 128; NDArray::prepareSpecialUse({&gradI}, {&input, &gradO}); if(dimensions.empty() || dimensions.size() == input.rankOf()) { // means whole array const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), nullptr, gradO.getSpecialBuffer(), gradO.getSpecialShapeInfo(), nullptr, gradI.getSpecialBuffer(), gradI.getSpecialShapeInfo(), nullptr, context->getReductionPointer(), clipNormVal), LIBND4J_TYPES, FLOAT_TYPES); } else { // means tads using auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions); auto packY = ConstantTadHelper::getInstance()->tadForDimensions(gradO.getShapeInfo(), dimensions); auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(gradI.getShapeInfo(), dimensions); const int blocksPerGrid = packX.numberOfTads(); BUILD_DOUBLE_SELECTOR(xType, zType, clipByNormBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), gradO.getSpecialBuffer(), packY.platformShapeInfo(), packY.platformOffsets(), gradI.getSpecialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), nullptr, clipNormVal), LIBND4J_TYPES, FLOAT_TYPES); } NDArray::registerSpecialUse({&gradI}, {&input, &gradO}); manager.synchronize(); } template <typename T> static __global__ void swapShuffleKernel(T* input, Nd4jLong* shape, Nd4jLong firstDim, Nd4jLong len, nd4j::graph::RandomGenerator* rng) { auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for (int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; if (i != r) { T e0 = input[shape::getIndexOffset(i, shape, len)]; T e1 = input[shape::getIndexOffset(r, shape, len)]; //math::nd4j_swap<T>(input(i), input(r)); input[shape::getIndexOffset(i, shape, len)] = e1; input[shape::getIndexOffset(r, shape, len)] = e0; } } } template <typename T> static __global__ void fillShuffleKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong firstDim, Nd4jLong len, int* indices, nd4j::graph::RandomGenerator* rng) { // PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold()) auto tid = blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(int i = firstDim - 1 - tid - threadIdx.x; i > 0; i -= step) { int r = rng->relativeInt(i) % i; output[shape::getIndexOffset(i, outputShape, len)] = input[shape::getIndexOffset(indices[r], inputShape, len)]; if(i != r) { output[shape::getIndexOffset(r, outputShape, len)] = input[shape::getIndexOffset(indices[i], inputShape, len)]; // output.p(r, input.e<T>(indices[i])); // math::nd4j_swap<int>(indices[i], indices[r]); atomicExch(&indices[i], indices[r]); } } } ////////////////////////////////////////////////////////////////////////// template <typename T> void randomShuffle_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) { // check edge cases first int temp; const int firstDim = input.sizeAt(0); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input}); if(input.lengthOf() == 1 || firstDim == 1) { if(!isInplace) output.assign(input); } else if (input.isVector() || shape::isLikeVector(input.getShapeInfo(), temp)) { // apply Fisher-Yates shuffle nd4j::graph::RandomGenerator* dRandom = nullptr; cudaMalloc(&dRandom, sizeof(nd4j::graph::RandomGenerator)); cudaMemcpy(dRandom, &rng, sizeof(nd4j::graph::RandomGenerator), cudaMemcpyHostToDevice); T* inputBuf = reinterpret_cast<T*>(input.specialBuffer()); if(isInplace) { swapShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), firstDim, input.lengthOf(), dRandom); } else { std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); cudaMemcpy(output.specialBuffer(), input.specialBuffer(), sizeof(T), cudaMemcpyDeviceToDevice); //output.p<T>(Nd4jLong(0), input.e<T>(0)); PointersManager pointersManager(context, "helper::randomShuffle_"); int* indicesDev = reinterpret_cast<int*>(pointersManager.replicatePointer(indices.data(), indices.size() * sizeof(int))); T* outputBuf = reinterpret_cast<T*>(output.specialBuffer()); fillShuffleKernel<T><<<128, 256, 1024, *stream>>>(inputBuf, input.specialShapeInfo(), outputBuf, output.specialShapeInfo(), firstDim, input.lengthOf(), indicesDev, dRandom); pointersManager.synchronize(); } // rng.rewindH(firstDim - 1); cudaFree(dRandom); } else { // evaluate sub-arrays list of input array through all dimensions excluding first one std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input.rankOf(), {0}); auto subArrsListIn = input.allTensorsAlongDimension(dimensions); // apply Fisher-Yates shuffle if(isInplace) { PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->elementwiseThreshold()) for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; if(i != r) subArrsListIn->at(i)->swapUnsafe(*subArrsListIn->at(r)); } } else { // evaluate sub-arrays list of output array through all dimensions excluding first one auto subArrsListOut = output.allTensorsAlongDimension(dimensions); std::vector<int> indices(firstDim); std::iota(indices.begin(), indices.end(), 0); bool isZeroShuffled = false; PRAGMA_OMP_PARALLEL_FOR_IF((firstDim-1) > Environment::getInstance()->tadThreshold()) for(int i = firstDim - 1; i > 0; --i) { int r = rng.relativeInt(i) % i; subArrsListOut->at(i)->assign(subArrsListIn->at(indices[r])); if(r == 0) isZeroShuffled = true; if(i != r) { subArrsListOut->at(r)->assign(subArrsListIn->at(indices[i])); math::nd4j_swap<int>(indices[i], indices[r]); } } if(!isZeroShuffled) subArrsListOut->at(0)->assign(subArrsListIn->at(0)); delete subArrsListOut; } rng.rewindH(firstDim-1); delete subArrsListIn; } NDArray::registerSpecialUse({&output}, {&input}); } void randomShuffle(nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace) { BUILD_SINGLE_SELECTOR(input.dataType(), randomShuffle_, (context, input, output, rng, isInplace), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void randomShuffle_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, nd4j::graph::RandomGenerator& rng, const bool isInplace), LIBND4J_TYPES); ////////////////////////////////////////////////////////////////////////// void eye(nd4j::LaunchContext * context, NDArray& output) { output.setIdentity(); } ////////////////////////////////////////////////////////////////////////// template <typename T, typename Z> static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<Z*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T mVal = -DataTypeUtils::max<T>(); Z mIdx(0); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); auto val = x[shape::getIndexOffset(e, xShape, length)];; if (mVal < val) mIdx = static_cast<Z>(e); } __syncthreads(); output[shape::getIndexOffset(e, outputShape, length)] = mIdx; } } template <typename T, typename Z> static void mergeMaxIndex_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeMaxIndex"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); global_mergeMaxIndex_<T,Z><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } void mergeMaxIndex(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INTEGER_TYPES); } BUILD_DOUBLE_TEMPLATE(template void mergeMaxIndex_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T mVal = -DataTypeUtils::max<T>(); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); auto val = x[shape::getIndexOffset(e, xShape, length)];; if (mVal < val) mVal = val; } __syncthreads(); output[shape::getIndexOffset(e, outputShape, length)] = mVal; } } template<typename T> static void mergeMax_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeMax"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); global_mergeMax_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } BUILD_SINGLE_TEMPLATE(template void mergeMax_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES); void mergeMax(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T sum(0.0f); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); sum += x[shape::getIndexOffset(e, xShape, length)]; } output[shape::getIndexOffset(e, outputShape, length)] = sum / numArrays; } } template<typename T> static void mergeAvg_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeAvg"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); global_mergeAvg_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } BUILD_SINGLE_TEMPLATE(template void mergeAvg_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES); void mergeAvg(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), LIBND4J_TYPES); } ////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) { auto output = reinterpret_cast<T*>(voutput); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { T sum(0.0f); for (int i = 0; i < numArrays; i++) { auto x = reinterpret_cast<T*>(inArrs[i]); auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]); sum += x[shape::getIndexOffset(e, xShape, length)]; } output[shape::getIndexOffset(e, outputShape, length)] = sum; } } template<typename T> static void mergeAdd_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { std::vector<void *> inBuffers(inArrs.size()); std::vector<void *> inShapes(inArrs.size()); for (int e = 0; e < inArrs.size(); e++) { inBuffers[e] = inArrs[e]->getSpecialBuffer(); inShapes[e] = inArrs[e]->getSpecialShapeInfo(); } PointersManager manager(context, "mergeAdd"); auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *))); auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *))); auto length = output.lengthOf(); global_mergeAdd_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length); manager.synchronize(); } BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), LIBND4J_TYPES); void mergeAdd(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) { BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), LIBND4J_TYPES); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void clipByNormInplaceKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) { for (int arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) { __shared__ T* z; __shared__ Nd4jLong len; if (threadIdx.x == 0) { len = shape::length(shape); z = inputBuffer + inputOffsets[arr]; } __syncthreads(); for (int j = threadIdx.x; j < len; j+= blockDim.x) { auto xIndex = shape::getIndexOffset(j, shape, len); if(norm2Buf[arr] > clipNorm) z[xIndex] *= clipNorm / norm2Buf[arr]; // case with ews = 1 and ordering is 'c' } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void clipByNormKernel(Nd4jLong numOfSubArrs, T* inputBuffer, Nd4jLong* shape, Nd4jLong* inputOffsets, T* outputBuffer, Nd4jLong* outputShape, Nd4jLong* outputOffsets, T* norm2Buf, Nd4jLong* norm2shape, T clipNorm) { for (Nd4jLong arr = blockIdx.x; arr < numOfSubArrs; arr += gridDim.x) { __shared__ T* x, *z; __shared__ Nd4jLong lenX, lenZ; __shared__ T norm2; if (threadIdx.x == 0) { lenX = shape::length(shape); x = inputBuffer + inputOffsets[arr]; z = outputBuffer + outputOffsets[arr]; lenZ = shape::length(outputShape); norm2 = norm2Buf[shape::getIndexOffset(arr, norm2shape, numOfSubArrs)]; //printf("%d: %lf (vs %lf) %lld %lld\n", arr, norm2, clipNorm, lenX, lenZ); } __syncthreads(); for (Nd4jLong j = threadIdx.x; j < lenZ; j+= blockDim.x) { auto xIndex = shape::getIndexOffset(j, shape, lenX); auto zIndex = shape::getIndexOffset(j, outputShape, lenZ); if(norm2 > clipNorm) { z[zIndex] = x[xIndex] * clipNorm / norm2; // case with ews = 1 and ordering is 'c' } else { z[zIndex] = x[xIndex]; } //printf("%lld: %lf %lf\n", j, z[zIndex], x[xIndex]); } __syncthreads(); } } ////////////////////////////////////////////////////////////////////////// template<typename T> static void clipByNorm_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, NDArray const& clipNormA, const bool isInplace) { const int rank = input.rankOf(); auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions); clipNormA.syncToHost(); //norm2.printBuffer("Norm2"); T const clipNorm = clipNormA.e<T>(0); //clipNormA.printBuffer("ClipNorm"); auto stream = context->getCudaStream(); if (isInplace) { if(norm2.lengthOf() == 1) { norm2.syncToHost(); T norm2Val = norm2.e<T>(0); if(norm2Val > clipNorm) input *= clipNorm / norm2Val; } else { std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions); //auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimsToExclude); T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer()); T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer()); clipByNormInplaceKernel<T><<<256, 512, 1024, *stream>>>(numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm); } } else { if(norm2.lengthOf() == 1) { norm2.syncToHost(); T norm2Val = norm2.e<T>(0); if(norm2Val > clipNorm) output.assign( input * (clipNorm / norm2Val)); else output.assign( input ); } else { std::vector<int> dimsToExclude = ShapeUtils::evalDimsToExclude(rank, dimensions); const Nd4jLong numOfSubArrs = ShapeUtils::getNumOfSubArrs(input.getShapeInfo(), dimsToExclude); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dimensions); auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), dimensions); T* inputBuffer = reinterpret_cast<T*>(input.specialBuffer()); T* norm2buf = reinterpret_cast<T*>(norm2.specialBuffer()); T* outputBuffer = reinterpret_cast<T*>(output.specialBuffer()); clipByNormKernel<T><<<256, 512, 1024, *stream>>>(numOfSubArrs, inputBuffer, packX.specialShapeInfo(), packX.specialOffsets(), outputBuffer, packZ.specialShapeInfo(), packZ.specialOffsets(), norm2buf, norm2.specialShapeInfo(), clipNorm); } } } void clipByNorm(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) { BUILD_SINGLE_SELECTOR(output.dataType(), clipByNorm_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByNorm_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES); template <typename T> static void clipByGlobalNorm_(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) { } void clipByGlobalNorm(nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace) { BUILD_SINGLE_SELECTOR(outputs[0]->dataType(), clipByGlobalNorm_, (context, inputs, clipNorm, workspace, outputs, isInplace), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByGlobalNorm_, (nd4j::LaunchContext * context, std::vector<NDArray*> const& inputs, double clipNorm, nd4j::memory::Workspace* workspace, std::vector<NDArray*>& outputs, bool isInplace), FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// template<typename T> static void clipByAveraged_(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) { auto cn = clipNorm.e<T>(0); if (dimensions.size() == 0) { // all-reduce T n2 = input.reduceNumber(reduce::Norm2).e<T>(0) / input.lengthOf(); if (n2 <= cn) { if (!isInplace) output.assign(input); } else { const T factor = cn / n2; //auto lambda = LAMBDA_T(_x, factor) { return _x * factor; }; //input.applyLambda<T>(lambda, &output); output.assign(input * factor); } } else { // along dimension auto norm2 = input.reduceAlongDims(reduce::Norm2, dimensions, false); if (!isInplace) output.assign(input); auto tads = output.allTensorsAlongDimension(dimensions); auto outTads = output.allTensorsAlongDimension(dimensions); // TODO: make this CUDA-compliant somehow for (int e = 0; e < tads->size(); e++) { T n2 = norm2.e<T>(e) / tads->at(e)->lengthOf(); const T factor = cn / n2; if (n2 > cn) { //auto lambda = LAMBDA_T(_x, factor) {return _x * factor;}; tads->at(e)->applyScalar(scalar::Multiply, factor, outTads->at(e));//applyLambda<T>(lambda, &output); } } delete tads; delete outTads; } } void clipByAveraged(nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace) { BUILD_SINGLE_SELECTOR(input.dataType(), clipByAveraged_, (context, input, output, dimensions, clipNorm, isInplace), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByAveraged_, (nd4j::LaunchContext * context, NDArray& input, NDArray& output, const std::vector<int>& dimensions, const NDArray& clipNorm, const bool isInplace), FLOAT_TYPES); /* if (d1 > params[1]) return params[1]; else if (d1 < params[0]) return params[0]; else return d1; */ template <typename T> static void __global__ clipByValueKernel(void* input, Nd4jLong* inputShape, void* output, Nd4jLong* outputShape, double leftBound, double rightBound) { __shared__ T* outputBuf; __shared__ T* inputBuf; __shared__ Nd4jLong length; __shared__ bool linearBuffers; if (threadIdx.x == 0) { outputBuf = reinterpret_cast<T *>(output); inputBuf = reinterpret_cast<T *>(input); length = shape::length(inputShape); linearBuffers = shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape) && shape::elementWiseStride(inputShape) == 1; } __syncthreads(); const auto tid = blockIdx.x * gridDim.x + threadIdx.x; const auto step = gridDim.x * blockDim.x; for (Nd4jLong e = tid; e < length; e += step) { if (linearBuffers) { if (inputBuf[e] > rightBound) outputBuf[e] = (T) rightBound; else if (inputBuf[e] < leftBound) outputBuf[e] = (T) leftBound; else outputBuf[e] = inputBuf[e]; } else { auto inputOffset = shape::getIndexOffset(e, inputShape, length); auto outputOffset = shape::getIndexOffset(e, outputShape, length); if (inputBuf[inputOffset] > rightBound) outputBuf[outputOffset] = (T) rightBound; else if (inputBuf[inputOffset] < leftBound) outputBuf[outputOffset] = (T) leftBound; else outputBuf[outputOffset] = inputBuf[outputOffset]; } } } template <typename T> static void clipByValue_(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) { auto stream = context->getCudaStream(); if (!input.isActualOnDeviceSide()) input.syncToDevice(); NDArray::prepareSpecialUse({&output}, {&input}); clipByValueKernel<T><<<256, 512, 8192, *stream>>>(input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftBound, rightBound); NDArray::registerSpecialUse({&output}, {&input}); } void clipByValue(nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output) { BUILD_SINGLE_SELECTOR(input.dataType(), clipByValue_, (context, input, leftBound, rightBound, output), FLOAT_TYPES); } BUILD_SINGLE_TEMPLATE(template void clipByValue_, (nd4j::LaunchContext * context, NDArray& input, double leftBound, double rightBound, NDArray& output);, FLOAT_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static __global__ void mirrorPadLinearKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong leftSide, Nd4jLong leftSideCorrected, Nd4jLong xLen, Nd4jLong len, Nd4jLong zLen) { __shared__ T const* x; __shared__ T* z; if (threadIdx.x == 0) { x = reinterpret_cast<T const*>(vx); z = reinterpret_cast<T*>(vz); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for(int i = start; i < zLen; i+= step) { auto zIndex = shape::getIndexOffset(i, zShape, zLen); auto xIndex = shape::getIndexOffset(len - i, xShape, xLen); if (i < leftSide) // left side xIndex = shape::getIndexOffset(leftSideCorrected - i, xShape, xLen); else if(i >= leftSide && i < leftSide + xLen) // middle xIndex = shape::getIndexOffset(i - leftSide, xShape, xLen); // else // right side // z[i] = x[len - i]; z[zIndex] = x[xIndex]; } } template <typename F, typename I> static __global__ void mirrorPadKernel(void const* vx, Nd4jLong* xShape, void* vz, Nd4jLong* zShape, Nd4jLong outLen, void const* paddings, Nd4jLong* paddingShape, int reflBorder) { __shared__ F const* x; __shared__ I const* pads; __shared__ F* z; __shared__ Nd4jLong zRank, rank; __shared__ Nd4jLong* xShapeOf, *xStrideOf, *padsShapeOf, *padsStrideOf; __shared__ Nd4jLong* zShapeOf, *zStrideOf; __shared__ Nd4jLong* xIdx; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; xIdx = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(xShape); x = reinterpret_cast<F const*>(vx);// pads = reinterpret_cast<I const*>(paddings); z = reinterpret_cast<F*>(vz); xShapeOf = shape::shapeOf(xShape); xStrideOf = shape::stride(xShape); zShapeOf = shape::shapeOf(zShape); zRank = shape::rank(zShape); zStrideOf = shape::stride(zShape); padsShapeOf = shape::shapeOf(paddingShape); padsStrideOf = shape::stride(paddingShape); } __syncthreads(); auto start = threadIdx.x + blockIdx.x * blockDim.x; auto step = blockDim.x * gridDim.x; for(Nd4jLong i = start; i < outLen; i+= step) { auto xzCoord = xIdx + threadIdx.x * rank; //auto zxCoord = xIdx + (threadIdx.x + threadIdx.x % 2 + 1) * rank; shape::index2coords(rank, zShapeOf, i, xzCoord); auto outOffset = shape::getOffset(0, zShapeOf, zStrideOf, xzCoord, rank); // auto intStep = blockDim.y * gridDim.y; for(int j = 0; j < rank; j++) { const Nd4jLong inLen = shape::sizeAt(xShape, j); Nd4jLong coords[2] = {j, 0}; auto padOffset = shape::getOffset(0, padsShapeOf, padsStrideOf, coords, 2); // padding already has rank 2 const auto leftSide = pads[padOffset]; const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2 * (inLen - 1) + leftSide + reflBorder; if(xzCoord[j] < leftSide) // left side xzCoord[j] = leftSideCorrected - xzCoord[j]; else if(xzCoord[j] >= leftSide && xzCoord[j] < leftSide + inLen) // middle xzCoord[j] = xzCoord[j] - leftSide; else if (len > xzCoord[j]) // right side xzCoord[j] = len - xzCoord[j]; else xzCoord[j] = xzCoord[j] - len; } auto inOffset = shape::getOffset(0, xShapeOf, xStrideOf, xzCoord, rank); z[outOffset] = x[inOffset]; } } template<typename F, typename I> static void mirrorPad_(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { // mode: 0 - REFLECT, else - SYMMETRIC const int reflBorder = (bool)mode ? 1 : 0; const int rank = input.rankOf(); const Nd4jLong outLen = output.lengthOf(); auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({&output}, {&input, &paddings}); if(rank <= 1) { const Nd4jLong inLen = input.lengthOf(); const auto leftSide = paddings.e<Nd4jLong>(0); const auto leftSideCorrected = leftSide - reflBorder; const Nd4jLong len = 2*(inLen-1) + leftSide + reflBorder; mirrorPadLinearKernel<F><<<256, 512, 256, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), leftSide, leftSideCorrected, inLen, len, outLen); nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadLinearKernel(...) failed"); } else { mirrorPadKernel<F, I><<<256, 256, 8192, *stream>>>(input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), outLen, paddings.getSpecialBuffer(), paddings.getSpecialShapeInfo(), reflBorder); nd4j::DebugHelper::checkErrorCode(stream, "helpers::mirrorPadKernel(...) failed"); } NDArray::registerSpecialUse({&output}, {&input, &paddings}); } void mirrorPad(nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode) { BUILD_DOUBLE_SELECTOR(input.dataType(), paddings.dataType(), mirrorPad_, (context, input, paddings, output, mode), LIBND4J_TYPES, INTEGER_TYPES); } BUILD_DOUBLE_TEMPLATE(template void mirrorPad_, (nd4j::LaunchContext * context, const NDArray& input, const NDArray& paddings, NDArray& output, const int mode), LIBND4J_TYPES, INTEGER_TYPES); ////////////////////////////////////////////////////////////////////////// void concat(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output, const int axis) { const int numOfArrs = inArrs.size(); for(int i = 0; i < numOfArrs; ++i) if(!inArrs[i]->isActualOnDeviceSide()) inArrs[i]->syncToDevice(); const int rank = inArrs[0]->rankOf(); const int rank2 = 2*rank; std::vector<std::vector<Nd4jLong>> indices(numOfArrs, std::vector<Nd4jLong>(rank2,0)); // take into account indices for first array indices[0][2 * axis + 1] = inArrs[0]->sizeAt(axis); // loop through the rest of input arrays for(int i = 1; i < numOfArrs; ++i) { indices[i][2 * axis] = indices[i-1][2 * axis + 1]; // index start from indices[i][2 * axis + 1] = indices[i-1][2 * axis + 1] + inArrs[i]->sizeAt(axis); // index end with (excluding) } std::vector<NDArray*> outSubArrs(numOfArrs); for(int i = 0; i < numOfArrs; ++i) outSubArrs[i] = new NDArray(output(indices[i], true)); // prepare arrays of pointers on buffers and shapes std::vector<void*> hOutBuffers(numOfArrs), hInBuffers(numOfArrs); std::vector<Nd4jLong*> hOutShapeInfo(numOfArrs), hInShapeInfo(numOfArrs); for(int i = 0; i < numOfArrs; ++i) { hOutBuffers[i] = outSubArrs[i]->getSpecialBuffer(); hInBuffers[i] = inArrs[i]->getSpecialBuffer(); hOutShapeInfo[i] = outSubArrs[i]->getSpecialShapeInfo(); hInShapeInfo[i] = inArrs[i]->getSpecialShapeInfo(); } // allocate and copy all buffers and shapes arrays to global memory PointersManager manager(context, "helpers::concat"); void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*)); void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); void* dInShapeInfo = manager.replicatePointer(hInShapeInfo.data(), hInShapeInfo.size() * sizeof(Nd4jLong*)); void* dOutShapeInfo = manager.replicatePointer(hOutShapeInfo.data(), hOutShapeInfo.size() * sizeof(Nd4jLong*)); BUILD_SINGLE_SELECTOR(inArrs[0]->dataType(), concatCudaLauncher, (numOfArrs, context->getCudaStream(), dInBuffers, dInShapeInfo, dOutBuffers, dOutShapeInfo), LIBND4J_TYPES); manager.synchronize(); for(int i = 0; i < numOfArrs; ++i) delete outSubArrs[i]; for(int i = 0; i < numOfArrs; ++i) inArrs[i]->tickReadHost(); output.tickWriteDevice(); } template <typename X, typename Y> static _CUDA_G void scatterSimpleKernel(void *vx, Nd4jLong *xTadShape, Nd4jLong *xTadOffsets, Nd4jLong xLength, Nd4jLong numTads, void *vi, Nd4jLong *iShapeInfo, Nd4jLong iLength, void *vu, Nd4jLong *uShapeInfo, Nd4jLong uLength) { auto u = reinterpret_cast<X*>(vu); auto indices = reinterpret_cast<Y*>(vi); auto tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < iLength; i += blockDim.x * gridDim.x) { auto x = reinterpret_cast<X*>(vx) + xTadOffsets[i]; auto idx = indices[shape::getIndexOffset(i, iShapeInfo, iLength)]; x[shape::getIndexOffset(idx, xTadShape, xLength)] = u[shape::getIndexOffset(i, uShapeInfo, uLength)]; } } template <typename X, typename Y> void scatterSimple_(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto dims = ShapeUtils::evalDimsToExclude(input.rankOf(), dimensions); auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), dims); auto xLength = shape::length(packX.primaryShapeInfo()); auto iLength = indices.lengthOf(); auto uLength = updates.lengthOf(); scatterSimpleKernel<X,Y><<<256, 256, 1024, *context->getCudaStream()>>>(input.getSpecialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), xLength, packX.numberOfTads(), indices.getSpecialBuffer(), indices.getSpecialShapeInfo(), iLength, updates.getSpecialBuffer(), updates.getSpecialShapeInfo(), uLength); } void scatterSimple(nd4j::LaunchContext * context, const int opId, NDArray& input, const NDArray& updates, const NDArray& indices, const std::vector<int>& dimensions) { auto xType = input.dataType(); auto yType = indices.dataType(); if (opId != 6) throw std::runtime_error("scatterSimple: only copy op is supported"); NDArray::prepareSpecialUse({&input}, {&updates, &indices}); BUILD_DOUBLE_SELECTOR(xType, yType, scatterSimple_, (context, opId, input, updates, indices, dimensions), LIBND4J_TYPES, INTEGER_TYPES); NDArray::registerSpecialUse({&input}, {&updates, &indices}); } } } }
bbe06e91fc7fb805f3bd2e6eb6042e922c90b03f.hip
// !!! This is a file automatically generated by hipify!!! using namespace std; #include "parallel.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <string.h> #include <float.h> #include <assert.h> #include <fstream> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #define GET_X(x) (2*x) #define GET_Y(y) (2*y + 1) #define THREADS_PER_BLOCK 1024 #define DAMPING 0.75 #define OVERLAP_COEFF 1.3 typedef struct Parameters { string fileName; int numFrames; // Number of frames int stepsPerFrame; //Steps per frame float size; // Particle size float dt; //Time step float density_ref; //Reference density float k; // Bulk modulus float viscocity; float g; /* Gravity strength */ } Parameters; typedef struct CurrState { int numParticles; float mass; float* densities; //array of all the densities of the particles float* positions; float* velocities_half; float* velocities_full; float* accelerations; int numBlocks; int* CUDA_numParticles; //Array of size 1 int CUDA_tempInt; //Array of size 1 float CUDA_tempFloat; //Array of size 1 float* CUDA_velocities_half; float* CUDA_velocities_full; float* CUDA_accelerations; float* CUDA_positions; float* CUDA_densities; } CurrState; static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } CurrState* allocState(int n) { CurrState* currState = (CurrState*)calloc(1,sizeof(CurrState)); currState->densities = (float*)calloc(n, sizeof(float)); currState->positions = (float*)calloc(2*n, sizeof(float)); currState->velocities_full = (float*)calloc(2*n, sizeof(float)); currState->velocities_half = (float*)calloc(2*n, sizeof(float)); currState->accelerations = (float*)calloc(2*n, sizeof(float)); currState->numParticles = n; currState->numBlocks = 2*currState->numParticles / THREADS_PER_BLOCK + 1; hipMalloc((void**)&(currState->CUDA_positions), (2*n*(sizeof(float)))); hipMalloc((void**)&(currState->CUDA_densities), n*sizeof(float)); hipMalloc((void**)&(currState->CUDA_numParticles), sizeof(int)); hipMalloc((void**)&(currState->CUDA_velocities_full),(2*n*(sizeof(float)))); hipMalloc((void**)&(currState->CUDA_velocities_half),(2*n*(sizeof(float)))); hipMalloc((void**)&(currState->CUDA_accelerations),(2*n*(sizeof(float)))); hipMemset(currState->CUDA_positions, 0, 2*n*sizeof(float)); hipMemset(currState->CUDA_velocities_full, 0, 2*n*sizeof(float)); hipMemset(currState->CUDA_velocities_half, 0, 2*n*sizeof(float)); hipMemset(currState->CUDA_accelerations, 0, 2*n*sizeof(float)); hipMemset(currState->CUDA_numParticles, 0, sizeof(int)); return currState; } void freeState(CurrState* currState) { free(currState->densities); free(currState->positions); free(currState->velocities_half); free(currState->velocities_full); free(currState->accelerations); free(currState); hipFree(currState->CUDA_positions); //hipFree(currState->CUDA_tempFloat); //hipFree(currState->CUDA_tempInt); hipFree(currState->CUDA_numParticles); hipFree(currState->CUDA_velocities_full); hipFree(currState->CUDA_velocities_half); hipFree(currState->CUDA_accelerations); } __global__ void kernel_velocityStep(float* CUDA_velocities_half, float* CUDA_velocities_full, float* CUDA_accelerations, int CUDA_numParticles, float* CUDA_positions, float CUDA_dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= 2* CUDA_numParticles) { //printf("FUCKME"); return; } CUDA_velocities_half[i] += CUDA_accelerations[i] * CUDA_dt; CUDA_velocities_full[i] = CUDA_velocities_half[i] + CUDA_accelerations[i] * CUDA_dt / 2; CUDA_positions[i] += CUDA_velocities_half[i] * CUDA_dt; } __global__ void kernel_calculateDensity(int CUDA_numParticles, float CUDA_innerConstant, float CUDA_outerConstant, float CUDA_size_2, float* CUDA_densities, float* CUDA_positions) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= CUDA_numParticles) { return; } CUDA_densities[i] += CUDA_innerConstant; for (unsigned int j = 0; j < CUDA_numParticles; j++) { if (j != i) { float dx = CUDA_positions[GET_X(i)] - CUDA_positions[GET_X(j)]; float dy = CUDA_positions[GET_Y(i)] - CUDA_positions[GET_Y(j)]; float r_2 = dx * dx + dy * dy; float z = CUDA_size_2 - r_2; float z_3 = z * z * z; if (z > 0) { float densities_ij = CUDA_outerConstant * z_3; CUDA_densities[i] += densities_ij; } } } } __global__ void kernel_calculateAcceleration(int CUDA_numParticles, float CUDA_g, float CUDA_size, float CUDA_C0, float CUDA_Cp, float CUDA_Cv, float CUDA_density_ref, float* CUDA_positions, float* CUDA_accelerations, float* CUDA_densities, float* CUDA_velocities_full){ int i = blockIdx.x * blockDim.x+ threadIdx.x; if (i >= CUDA_numParticles) { return; } float size_2 = CUDA_size * CUDA_size; CUDA_accelerations[GET_X(i)] = 0; CUDA_accelerations[GET_Y(i)] = -CUDA_g; float currDensity_i = CUDA_densities[i]; for (unsigned int j = 0; j < CUDA_numParticles; j++) { if (j!=i) { float dx = CUDA_positions[GET_X(i)] - CUDA_positions[GET_X(j)]; float dy = CUDA_positions[GET_Y(i)] - CUDA_positions[GET_Y(j)]; float r_2 = dx * dx + dy * dy; if (r_2 < size_2) { const float currDensity_j = CUDA_densities[j]; float q = sqrt(r_2)/CUDA_size; float u = 1-q; float w0 = CUDA_C0 * u/(currDensity_j * currDensity_i); float wp = w0 * CUDA_Cp * (currDensity_i + currDensity_j - 2 * CUDA_density_ref) * u/q; float wv = w0 * CUDA_Cv; float dvx = CUDA_velocities_full[GET_X(i)] - CUDA_velocities_full[GET_X(j)]; float dvy = CUDA_velocities_full[GET_Y(i)] - CUDA_velocities_full[GET_Y(j)]; if (i > j) { CUDA_accelerations[GET_X(i)] -= (wp * dx + wv * dvx); CUDA_accelerations[GET_Y(i)] -= (wp * dy + wv * dvy); } else { CUDA_accelerations[GET_X(i)] += (wp * dx + wv * dvx); CUDA_accelerations[GET_Y(i)] += (wp * dy + wv * dvy); } } } } } //now we need to compute densities. We are going to compute densities once for //each ij ji pair since they are the same. void calculateDensity(Parameters* params, CurrState* currState) { int numParticles = currState->numParticles; float size = params->size; float mass = currState->mass; float* positions = currState->positions; float* densities = currState->densities; memset(densities, 0, numParticles * sizeof(float)); float size_2 = size * size; float size_8 = size_2 * size_2 * size_2 * size_2; float outerConstant = (4 * mass) / (M_PI * size_8); float innerConstant = (4 * mass) / (M_PI * size_2); float* CUDA_positions = currState->CUDA_positions; float* CUDA_densities = currState->CUDA_densities; //hipMemcpy(currState->CUDA_densities, currState->densities, sizeof(float) * numParticles, hipMemcpyHostToDevice); //hipMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); int numBlocks = numParticles / THREADS_PER_BLOCK + 1; hipLaunchKernelGGL(( kernel_calculateDensity), dim3(numBlocks),dim3(THREADS_PER_BLOCK), 0, 0, numParticles, innerConstant, outerConstant, size_2, CUDA_densities, CUDA_positions); //hipMemcpy(currState->densities, currState->CUDA_densities, sizeof(float) * numParticles, hipMemcpyDeviceToHost); //hipMemcpy(currState->positions, currState->CUDA_positions, sizeof(float) * numParticles * 2, hipMemcpyDeviceToHost); } void calculateAcceleration(Parameters* params, CurrState* currState) { float size = params->size; float g = params->g; float k = params->k; float viscocity = params->viscocity; float density_ref = params->density_ref; int numParticles = currState->numParticles; float mass = currState->mass; float* accelerations = currState->accelerations; float* densities = currState->densities; float* positions = currState->positions; float* velocities = currState->velocities_full; float size_2 = size * size; float size_4 = size_2 * size_2; calculateDensity(params, currState); float C0 = mass / (M_PI * size_4); float Cp = 15 * k; float Cv = -40 * viscocity; float* CUDA_positions = currState->CUDA_positions; float* CUDA_accelerations = currState->CUDA_accelerations; float* CUDA_densities = currState->CUDA_densities; float* CUDA_velocities_full = currState->CUDA_velocities_full; //hipMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); //hipMemcpy(currState->CUDA_accelerations, currState->accelerations, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); //hipMemcpy(currState->CUDA_densities, currState->densities, sizeof(float) * numParticles, hipMemcpyHostToDevice); //hipMemcpy(currState->CUDA_velocities_full, currState->velocities_full, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); int numBlocks = numParticles / THREADS_PER_BLOCK + 1; hipLaunchKernelGGL(( kernel_calculateAcceleration), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, numParticles, g, size, C0, Cp, Cv, density_ref, CUDA_positions, CUDA_accelerations, CUDA_densities, CUDA_velocities_full); //hipMemcpy(currState->accelerations, currState->CUDA_accelerations, sizeof(float) * numParticles * 2, hipMemcpyDeviceToHost); } void reflect(int axis, float barrier, float* positions, float* velocities_full, float* velocities_half) { const float damping = DAMPING; if (velocities_full[axis] == 0) { //this means the particle has stopped return; } float dt = (positions[axis] - barrier) / velocities_full[axis]; positions[0] -= velocities_full[0] * (1-damping) * dt; positions[1] -= velocities_full[1] * (1-damping) * dt; //reflect the positions positions[axis] = 2 * barrier - positions[axis]; //reflect the velocities velocities_full[axis] = - velocities_full[axis]; velocities_half[axis] = - velocities_half[axis]; //damp the velocities velocities_half[0] *= damping; velocities_half[1] *= damping; velocities_full[0] *= damping; velocities_full[1] *= damping; } __global__ void boundaryCheckKernel(int CUDA_numParticles, float damping, float* CUDA_positions, float* CUDA_velocities_full, float* CUDA_velocities_half) { int i = blockIdx.x * blockDim.x+ threadIdx.x; if (i >= CUDA_numParticles) { return; } const float XMIN = 0.0; const float YMIN = 0.0; const float XMAX = 1.0; const float YMAX = 1.0; if (CUDA_positions[2*i] < XMIN) { if (CUDA_velocities_full[2*i] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i] - XMIN) / CUDA_velocities_full[2*i]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i] = 2 * XMIN - CUDA_positions[2*i]; //reflect the velocities CUDA_velocities_full[2*i] = - CUDA_velocities_full[2*i]; CUDA_velocities_half[2*i] = - CUDA_velocities_half[2*i]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } else if (CUDA_positions[2*i] > XMAX) { if (CUDA_velocities_full[2*i] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i] - XMAX) / CUDA_velocities_full[2*i]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i] = 2 * XMAX - CUDA_positions[2*i]; //reflect the velocities CUDA_velocities_full[2*i] = - CUDA_velocities_full[2*i]; CUDA_velocities_half[2*i] = - CUDA_velocities_half[2*i]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } if (CUDA_positions[2*i+1] < YMIN) { if (CUDA_velocities_full[2*i+1] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i+1] - YMIN) / CUDA_velocities_full[2*i+1]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i+1] = 2 * YMIN - CUDA_positions[2*i+1]; //reflect the velocities CUDA_velocities_full[2*i+1] = - CUDA_velocities_full[2*i+1]; CUDA_velocities_half[2*i+1] = - CUDA_velocities_half[2*i+1]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } else if (CUDA_positions[2*i+1] > YMAX) { if (CUDA_velocities_full[2*i+1] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i+1] - YMAX) / CUDA_velocities_full[2*i+1]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i+1] = 2 * YMAX - CUDA_positions[2*i+1]; //reflect the velocities CUDA_velocities_full[2*i+1] = - CUDA_velocities_full[2*i+1]; CUDA_velocities_half[2*i+1] = - CUDA_velocities_half[2*i+1]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } } void boundaryCheck(CurrState* currState) { int numParticles = currState->numParticles; float* velocities_full = currState->velocities_full; float* velocities_half = currState->velocities_half; float* positions = currState->positions; float* CUDA_positions = currState->CUDA_positions; float* CUDA_velocities_half = currState->CUDA_velocities_half; float* CUDA_velocities_full = currState->CUDA_velocities_full; //hipMemcpy(CUDA_positions, positions, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); //hipMemcpy(CUDA_velocities_full, velocities_full, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); //hipMemcpy(CUDA_velocities_half, velocities_half, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); int numBlocks = numParticles/THREADS_PER_BLOCK + 1; hipLaunchKernelGGL(( boundaryCheckKernel), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, numParticles, DAMPING, CUDA_positions, CUDA_velocities_full, CUDA_velocities_half); //hipMemcpy(positions, CUDA_positions, sizeof(float) * numParticles * 2, hipMemcpyDeviceToHost); //hipMemcpy(velocities_full, CUDA_velocities_full, sizeof(float) * numParticles * 2, hipMemcpyDeviceToHost); //hipMemcpy(velocities_half, CUDA_velocities_half, sizeof(float) * numParticles * 2, hipMemcpyDeviceToHost); } void velocityStep (CurrState* currState, double dt) { int numParticles = currState->numParticles; float* accelerations = currState->accelerations; float* velocities_full = currState->velocities_full; float* velocities_half = currState->velocities_half; float* positions = currState->positions; int numBlocks = currState->numBlocks; float* CUDA_velocities_half = currState->CUDA_velocities_half; float* CUDA_velocities_full = currState->CUDA_velocities_full; float* CUDA_accelerations = currState->CUDA_accelerations; float* CUDA_positions = currState->CUDA_positions; //hipMemcpy(currState->CUDA_velocities_half, currState->velocities_half, sizeof(float) * 2 * numParticles, hipMemcpyHostToDevice); //hipMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * 2 * numParticles, hipMemcpyHostToDevice); //hipMemcpy(currState->CUDA_velocities_full, currState->velocities_full, sizeof(float) * 2 * numParticles, hipMemcpyHostToDevice); //hipMemcpy(currState->CUDA_accelerations, currState->accelerations, sizeof(float) * 2 * numParticles, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_velocityStep), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, CUDA_velocities_half, CUDA_velocities_full, CUDA_accelerations, numParticles, CUDA_positions, dt); //hipMemcpy(currState->velocities_half, currState->CUDA_velocities_half,sizeof(float) * 2 * numParticles, hipMemcpyDeviceToHost); //hipMemcpy(currState->velocities_full, currState->CUDA_velocities_full, sizeof(float) * 2 * numParticles, hipMemcpyDeviceToHost); //hipMemcpy(currState->positions, currState->CUDA_positions, sizeof(float)*2*numParticles, hipMemcpyDeviceToHost); boundaryCheck(currState); } __global__ void kernel_velocityStart(float* CUDA_velocities_half, float* CUDA_velocities_full, float* CUDA_accelerations, int CUDA_numParticles, float* CUDA_positions, float CUDA_dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= 2* CUDA_numParticles) { return; } CUDA_velocities_half[i] = CUDA_velocities_full[i] + CUDA_accelerations[i] * CUDA_dt/2; CUDA_velocities_full[i] += CUDA_accelerations[i] * CUDA_dt; CUDA_positions[i] += CUDA_velocities_half[i] * CUDA_dt; } void velocityStart (CurrState* currState, double dt) { int numParticles = currState->numParticles; float* accelerations = currState->accelerations; float* velocities_full = currState->velocities_full; float* velocities_half = currState->velocities_half; float* positions = currState->positions; float* CUDA_velocities_half = currState->CUDA_velocities_half; float* CUDA_velocities_full = currState->CUDA_velocities_full; float* CUDA_accelerations = currState->CUDA_accelerations; float* CUDA_positions = currState->CUDA_positions; int numBlocks = 2* numParticles/THREADS_PER_BLOCK + 1; hipLaunchKernelGGL(( kernel_velocityStart), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, CUDA_velocities_half, CUDA_velocities_full, CUDA_accelerations, numParticles, CUDA_positions, dt); /*for (unsigned int i = 0; i < 2*numParticles; ++i) { velocities_half[i] = velocities_full[i] + accelerations[i] * dt / 2; } for (unsigned int i = 0; i < 2*numParticles; ++i) { velocities_full[i] += accelerations[i] * dt; } for (unsigned int i = 0; i < 2*numParticles; ++i) { positions[i] += velocities_half[i] * dt; }*/ boundaryCheck(currState); } typedef int (*initialFluidShape_fun)(float, float); int cornerBoxInit(float x, float y) { return ((x < 0.5) && (y < 0.5)); } int sphereDropInit(float x, float y) { float dx = (x-0.5); float dy = (y-0.3); float r2 = dx*dx + dy*dy; return (r2 < 0.25 * 0.25); } CurrState* initialParticlePlacement(Parameters* params, initialFluidShape_fun shapeFun) { float size = params->size; float adjSize = size/OVERLAP_COEFF; int newCount = 0; int iterations = 1.0f/adjSize + 1; int inRegionCount = 0; for (unsigned int i = 0; i < iterations; ++i){ for (unsigned int j = 0; j < iterations; ++j) { float x = i * adjSize; float y = j * adjSize; inRegionCount += shapeFun(x,y); } } CurrState* currState = allocState(inRegionCount); int position = 0; for (unsigned int i = 0; i < iterations; ++i) { for (unsigned int j = 0; j < iterations; ++j) { float x = i * adjSize; float y = j * adjSize; if (shapeFun(x,y)) { currState->positions[GET_X(position)] = x; currState->positions[GET_Y(position)] = y; currState->velocities_full[GET_X(position)] = 0; currState->velocities_full[GET_Y(position)] = 0; ++position; } } } return currState; } void normalizeMasses(CurrState* currState, Parameters* params) { currState->mass = 1.f; calculateDensity(params, currState); float density_ref = params->density_ref; float cDensity = 0.f; float cDensity_2 = 0.f; int numParticles = currState->numParticles; for (int i = 0; i < numParticles; ++i) { float density = currState->densities[i]; cDensity += density; cDensity_2 += (density * density); } currState->mass *= (density_ref * cDensity / cDensity_2); } CurrState* initParticles(Parameters* params) { CurrState* currState = initialParticlePlacement(params, sphereDropInit); normalizeMasses(currState, params); return currState; } void initParams(Parameters* params) { params->fileName = "output.out"; params->numFrames = 400; params->stepsPerFrame = 100; params->dt = 1e-4; params->size = 5e-2; params->k = 1e3; params->density_ref = 1000; params->viscocity = 0.1; params->g = 9.8; } void errorCheck(CurrState* currState) { int numParticles = currState->numParticles; float currX; float currY; for (unsigned int i = 0; i < numParticles; ++i) { int xIndex = GET_X(i); int yIndex = GET_Y(i); currX = currState->positions[xIndex]; currY = currState->positions[yIndex]; assert(currX >=0 || currX <=1); assert(currY >=0 || currY <=1); } } int run_main() { //printf("main called\n"); Parameters params; initParams(&params); CurrState* currState = initParticles(&params); string fileName = params.fileName; int numFrames = params.numFrames; int stepsPerFrame = params.stepsPerFrame; float timeStep = params.dt; int numParticles = currState->numParticles; hipMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); hipMemcpy(currState->CUDA_accelerations, currState->accelerations, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); hipMemcpy(currState->CUDA_densities, currState->densities, sizeof(float) * numParticles, hipMemcpyHostToDevice); hipMemcpy(currState->CUDA_velocities_full, currState->velocities_full, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); hipMemcpy(currState->CUDA_velocities_half, currState->velocities_half, sizeof(float) * numParticles * 2, hipMemcpyHostToDevice); calculateAcceleration(&params, currState); velocityStart(currState, timeStep); hipMemcpy(currState->positions, currState->CUDA_positions, sizeof(float) * numParticles * 2, hipMemcpyDeviceToHost); /* Write to file */ ofstream data_file; data_file.open("simulation_data.txt", ios::out); data_file << params.size << "\n"; data_file << numFrames * stepsPerFrame << "\n"; for (int i=0; i < numParticles; i++) { data_file << currState->positions[GET_X(i)] << "\n"; data_file << currState->positions[GET_Y(i)] << "\n"; } data_file << "DONE WITH AN ITERATION\n"; //data_file.close(); /* End of write */ //errorCheck(currState); //iterate through all the frames in the image for (unsigned int frame = 1; frame < numFrames; ++frame) { //iterate through all the steps per frame for (unsigned int j = 0; j < stepsPerFrame; ++j) { calculateAcceleration(&params, currState); //printf("calculaate acceleration returned on loop %d\n",j); velocityStep(currState, timeStep); hipMemcpy(currState->positions, currState->CUDA_positions, sizeof(float) * numParticles * 2, hipMemcpyDeviceToHost); /* Write to file */ //ofstream data_file; //data_file.open("simulation_data.txt", ios::out | ios::app); for (int i=0; i < numParticles; i++) { data_file << currState->positions[GET_X(i)] << "\n"; data_file << currState->positions[GET_Y(i)] << "\n"; } data_file << "DONE WITH AN ITERATION\n"; //errorCheck(currState); } cout << frame << "\n"; } data_file.close(); freeState(currState); return 0; } void parallel() { run_main(); }
bbe06e91fc7fb805f3bd2e6eb6042e922c90b03f.cu
using namespace std; #include "parallel.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <string.h> #include <float.h> #include <assert.h> #include <fstream> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #define GET_X(x) (2*x) #define GET_Y(y) (2*y + 1) #define THREADS_PER_BLOCK 1024 #define DAMPING 0.75 #define OVERLAP_COEFF 1.3 typedef struct Parameters { string fileName; int numFrames; // Number of frames int stepsPerFrame; //Steps per frame float size; // Particle size float dt; //Time step float density_ref; //Reference density float k; // Bulk modulus float viscocity; float g; /* Gravity strength */ } Parameters; typedef struct CurrState { int numParticles; float mass; float* densities; //array of all the densities of the particles float* positions; float* velocities_half; float* velocities_full; float* accelerations; int numBlocks; int* CUDA_numParticles; //Array of size 1 int CUDA_tempInt; //Array of size 1 float CUDA_tempFloat; //Array of size 1 float* CUDA_velocities_half; float* CUDA_velocities_full; float* CUDA_accelerations; float* CUDA_positions; float* CUDA_densities; } CurrState; static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } CurrState* allocState(int n) { CurrState* currState = (CurrState*)calloc(1,sizeof(CurrState)); currState->densities = (float*)calloc(n, sizeof(float)); currState->positions = (float*)calloc(2*n, sizeof(float)); currState->velocities_full = (float*)calloc(2*n, sizeof(float)); currState->velocities_half = (float*)calloc(2*n, sizeof(float)); currState->accelerations = (float*)calloc(2*n, sizeof(float)); currState->numParticles = n; currState->numBlocks = 2*currState->numParticles / THREADS_PER_BLOCK + 1; cudaMalloc((void**)&(currState->CUDA_positions), (2*n*(sizeof(float)))); cudaMalloc((void**)&(currState->CUDA_densities), n*sizeof(float)); cudaMalloc((void**)&(currState->CUDA_numParticles), sizeof(int)); cudaMalloc((void**)&(currState->CUDA_velocities_full),(2*n*(sizeof(float)))); cudaMalloc((void**)&(currState->CUDA_velocities_half),(2*n*(sizeof(float)))); cudaMalloc((void**)&(currState->CUDA_accelerations),(2*n*(sizeof(float)))); cudaMemset(currState->CUDA_positions, 0, 2*n*sizeof(float)); cudaMemset(currState->CUDA_velocities_full, 0, 2*n*sizeof(float)); cudaMemset(currState->CUDA_velocities_half, 0, 2*n*sizeof(float)); cudaMemset(currState->CUDA_accelerations, 0, 2*n*sizeof(float)); cudaMemset(currState->CUDA_numParticles, 0, sizeof(int)); return currState; } void freeState(CurrState* currState) { free(currState->densities); free(currState->positions); free(currState->velocities_half); free(currState->velocities_full); free(currState->accelerations); free(currState); cudaFree(currState->CUDA_positions); //cudaFree(currState->CUDA_tempFloat); //cudaFree(currState->CUDA_tempInt); cudaFree(currState->CUDA_numParticles); cudaFree(currState->CUDA_velocities_full); cudaFree(currState->CUDA_velocities_half); cudaFree(currState->CUDA_accelerations); } __global__ void kernel_velocityStep(float* CUDA_velocities_half, float* CUDA_velocities_full, float* CUDA_accelerations, int CUDA_numParticles, float* CUDA_positions, float CUDA_dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= 2* CUDA_numParticles) { //printf("FUCKME"); return; } CUDA_velocities_half[i] += CUDA_accelerations[i] * CUDA_dt; CUDA_velocities_full[i] = CUDA_velocities_half[i] + CUDA_accelerations[i] * CUDA_dt / 2; CUDA_positions[i] += CUDA_velocities_half[i] * CUDA_dt; } __global__ void kernel_calculateDensity(int CUDA_numParticles, float CUDA_innerConstant, float CUDA_outerConstant, float CUDA_size_2, float* CUDA_densities, float* CUDA_positions) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= CUDA_numParticles) { return; } CUDA_densities[i] += CUDA_innerConstant; for (unsigned int j = 0; j < CUDA_numParticles; j++) { if (j != i) { float dx = CUDA_positions[GET_X(i)] - CUDA_positions[GET_X(j)]; float dy = CUDA_positions[GET_Y(i)] - CUDA_positions[GET_Y(j)]; float r_2 = dx * dx + dy * dy; float z = CUDA_size_2 - r_2; float z_3 = z * z * z; if (z > 0) { float densities_ij = CUDA_outerConstant * z_3; CUDA_densities[i] += densities_ij; } } } } __global__ void kernel_calculateAcceleration(int CUDA_numParticles, float CUDA_g, float CUDA_size, float CUDA_C0, float CUDA_Cp, float CUDA_Cv, float CUDA_density_ref, float* CUDA_positions, float* CUDA_accelerations, float* CUDA_densities, float* CUDA_velocities_full){ int i = blockIdx.x * blockDim.x+ threadIdx.x; if (i >= CUDA_numParticles) { return; } float size_2 = CUDA_size * CUDA_size; CUDA_accelerations[GET_X(i)] = 0; CUDA_accelerations[GET_Y(i)] = -CUDA_g; float currDensity_i = CUDA_densities[i]; for (unsigned int j = 0; j < CUDA_numParticles; j++) { if (j!=i) { float dx = CUDA_positions[GET_X(i)] - CUDA_positions[GET_X(j)]; float dy = CUDA_positions[GET_Y(i)] - CUDA_positions[GET_Y(j)]; float r_2 = dx * dx + dy * dy; if (r_2 < size_2) { const float currDensity_j = CUDA_densities[j]; float q = sqrt(r_2)/CUDA_size; float u = 1-q; float w0 = CUDA_C0 * u/(currDensity_j * currDensity_i); float wp = w0 * CUDA_Cp * (currDensity_i + currDensity_j - 2 * CUDA_density_ref) * u/q; float wv = w0 * CUDA_Cv; float dvx = CUDA_velocities_full[GET_X(i)] - CUDA_velocities_full[GET_X(j)]; float dvy = CUDA_velocities_full[GET_Y(i)] - CUDA_velocities_full[GET_Y(j)]; if (i > j) { CUDA_accelerations[GET_X(i)] -= (wp * dx + wv * dvx); CUDA_accelerations[GET_Y(i)] -= (wp * dy + wv * dvy); } else { CUDA_accelerations[GET_X(i)] += (wp * dx + wv * dvx); CUDA_accelerations[GET_Y(i)] += (wp * dy + wv * dvy); } } } } } //now we need to compute densities. We are going to compute densities once for //each ij ji pair since they are the same. void calculateDensity(Parameters* params, CurrState* currState) { int numParticles = currState->numParticles; float size = params->size; float mass = currState->mass; float* positions = currState->positions; float* densities = currState->densities; memset(densities, 0, numParticles * sizeof(float)); float size_2 = size * size; float size_8 = size_2 * size_2 * size_2 * size_2; float outerConstant = (4 * mass) / (M_PI * size_8); float innerConstant = (4 * mass) / (M_PI * size_2); float* CUDA_positions = currState->CUDA_positions; float* CUDA_densities = currState->CUDA_densities; //cudaMemcpy(currState->CUDA_densities, currState->densities, sizeof(float) * numParticles, cudaMemcpyHostToDevice); //cudaMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); int numBlocks = numParticles / THREADS_PER_BLOCK + 1; kernel_calculateDensity<<<numBlocks,THREADS_PER_BLOCK>>>(numParticles, innerConstant, outerConstant, size_2, CUDA_densities, CUDA_positions); //cudaMemcpy(currState->densities, currState->CUDA_densities, sizeof(float) * numParticles, cudaMemcpyDeviceToHost); //cudaMemcpy(currState->positions, currState->CUDA_positions, sizeof(float) * numParticles * 2, cudaMemcpyDeviceToHost); } void calculateAcceleration(Parameters* params, CurrState* currState) { float size = params->size; float g = params->g; float k = params->k; float viscocity = params->viscocity; float density_ref = params->density_ref; int numParticles = currState->numParticles; float mass = currState->mass; float* accelerations = currState->accelerations; float* densities = currState->densities; float* positions = currState->positions; float* velocities = currState->velocities_full; float size_2 = size * size; float size_4 = size_2 * size_2; calculateDensity(params, currState); float C0 = mass / (M_PI * size_4); float Cp = 15 * k; float Cv = -40 * viscocity; float* CUDA_positions = currState->CUDA_positions; float* CUDA_accelerations = currState->CUDA_accelerations; float* CUDA_densities = currState->CUDA_densities; float* CUDA_velocities_full = currState->CUDA_velocities_full; //cudaMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); //cudaMemcpy(currState->CUDA_accelerations, currState->accelerations, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); //cudaMemcpy(currState->CUDA_densities, currState->densities, sizeof(float) * numParticles, cudaMemcpyHostToDevice); //cudaMemcpy(currState->CUDA_velocities_full, currState->velocities_full, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); int numBlocks = numParticles / THREADS_PER_BLOCK + 1; kernel_calculateAcceleration<<<numBlocks, THREADS_PER_BLOCK>>>(numParticles, g, size, C0, Cp, Cv, density_ref, CUDA_positions, CUDA_accelerations, CUDA_densities, CUDA_velocities_full); //cudaMemcpy(currState->accelerations, currState->CUDA_accelerations, sizeof(float) * numParticles * 2, cudaMemcpyDeviceToHost); } void reflect(int axis, float barrier, float* positions, float* velocities_full, float* velocities_half) { const float damping = DAMPING; if (velocities_full[axis] == 0) { //this means the particle has stopped return; } float dt = (positions[axis] - barrier) / velocities_full[axis]; positions[0] -= velocities_full[0] * (1-damping) * dt; positions[1] -= velocities_full[1] * (1-damping) * dt; //reflect the positions positions[axis] = 2 * barrier - positions[axis]; //reflect the velocities velocities_full[axis] = - velocities_full[axis]; velocities_half[axis] = - velocities_half[axis]; //damp the velocities velocities_half[0] *= damping; velocities_half[1] *= damping; velocities_full[0] *= damping; velocities_full[1] *= damping; } __global__ void boundaryCheckKernel(int CUDA_numParticles, float damping, float* CUDA_positions, float* CUDA_velocities_full, float* CUDA_velocities_half) { int i = blockIdx.x * blockDim.x+ threadIdx.x; if (i >= CUDA_numParticles) { return; } const float XMIN = 0.0; const float YMIN = 0.0; const float XMAX = 1.0; const float YMAX = 1.0; if (CUDA_positions[2*i] < XMIN) { if (CUDA_velocities_full[2*i] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i] - XMIN) / CUDA_velocities_full[2*i]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i] = 2 * XMIN - CUDA_positions[2*i]; //reflect the velocities CUDA_velocities_full[2*i] = - CUDA_velocities_full[2*i]; CUDA_velocities_half[2*i] = - CUDA_velocities_half[2*i]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } else if (CUDA_positions[2*i] > XMAX) { if (CUDA_velocities_full[2*i] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i] - XMAX) / CUDA_velocities_full[2*i]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i] = 2 * XMAX - CUDA_positions[2*i]; //reflect the velocities CUDA_velocities_full[2*i] = - CUDA_velocities_full[2*i]; CUDA_velocities_half[2*i] = - CUDA_velocities_half[2*i]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } if (CUDA_positions[2*i+1] < YMIN) { if (CUDA_velocities_full[2*i+1] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i+1] - YMIN) / CUDA_velocities_full[2*i+1]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i+1] = 2 * YMIN - CUDA_positions[2*i+1]; //reflect the velocities CUDA_velocities_full[2*i+1] = - CUDA_velocities_full[2*i+1]; CUDA_velocities_half[2*i+1] = - CUDA_velocities_half[2*i+1]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } else if (CUDA_positions[2*i+1] > YMAX) { if (CUDA_velocities_full[2*i+1] != 0) { //this means the particle has stopped float dt = (CUDA_positions[2*i+1] - YMAX) / CUDA_velocities_full[2*i+1]; CUDA_positions[2*i] -= CUDA_velocities_full[2*i] * (1-damping) * dt; CUDA_positions[2*i+1] -= CUDA_velocities_full[2*i+1] * (1-damping) * dt; //reflect the positions CUDA_positions[2*i+1] = 2 * YMAX - CUDA_positions[2*i+1]; //reflect the velocities CUDA_velocities_full[2*i+1] = - CUDA_velocities_full[2*i+1]; CUDA_velocities_half[2*i+1] = - CUDA_velocities_half[2*i+1]; //damp the velocities CUDA_velocities_half[2*i] *= damping; CUDA_velocities_half[2*i+1] *= damping; CUDA_velocities_full[2*i] *= damping; CUDA_velocities_full[2*i+1] *= damping; } } } void boundaryCheck(CurrState* currState) { int numParticles = currState->numParticles; float* velocities_full = currState->velocities_full; float* velocities_half = currState->velocities_half; float* positions = currState->positions; float* CUDA_positions = currState->CUDA_positions; float* CUDA_velocities_half = currState->CUDA_velocities_half; float* CUDA_velocities_full = currState->CUDA_velocities_full; //cudaMemcpy(CUDA_positions, positions, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); //cudaMemcpy(CUDA_velocities_full, velocities_full, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); //cudaMemcpy(CUDA_velocities_half, velocities_half, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); int numBlocks = numParticles/THREADS_PER_BLOCK + 1; boundaryCheckKernel<<<numBlocks, THREADS_PER_BLOCK>>>(numParticles, DAMPING, CUDA_positions, CUDA_velocities_full, CUDA_velocities_half); //cudaMemcpy(positions, CUDA_positions, sizeof(float) * numParticles * 2, cudaMemcpyDeviceToHost); //cudaMemcpy(velocities_full, CUDA_velocities_full, sizeof(float) * numParticles * 2, cudaMemcpyDeviceToHost); //cudaMemcpy(velocities_half, CUDA_velocities_half, sizeof(float) * numParticles * 2, cudaMemcpyDeviceToHost); } void velocityStep (CurrState* currState, double dt) { int numParticles = currState->numParticles; float* accelerations = currState->accelerations; float* velocities_full = currState->velocities_full; float* velocities_half = currState->velocities_half; float* positions = currState->positions; int numBlocks = currState->numBlocks; float* CUDA_velocities_half = currState->CUDA_velocities_half; float* CUDA_velocities_full = currState->CUDA_velocities_full; float* CUDA_accelerations = currState->CUDA_accelerations; float* CUDA_positions = currState->CUDA_positions; //cudaMemcpy(currState->CUDA_velocities_half, currState->velocities_half, sizeof(float) * 2 * numParticles, cudaMemcpyHostToDevice); //cudaMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * 2 * numParticles, cudaMemcpyHostToDevice); //cudaMemcpy(currState->CUDA_velocities_full, currState->velocities_full, sizeof(float) * 2 * numParticles, cudaMemcpyHostToDevice); //cudaMemcpy(currState->CUDA_accelerations, currState->accelerations, sizeof(float) * 2 * numParticles, cudaMemcpyHostToDevice); kernel_velocityStep<<<numBlocks, THREADS_PER_BLOCK>>>(CUDA_velocities_half, CUDA_velocities_full, CUDA_accelerations, numParticles, CUDA_positions, dt); //cudaMemcpy(currState->velocities_half, currState->CUDA_velocities_half,sizeof(float) * 2 * numParticles, cudaMemcpyDeviceToHost); //cudaMemcpy(currState->velocities_full, currState->CUDA_velocities_full, sizeof(float) * 2 * numParticles, cudaMemcpyDeviceToHost); //cudaMemcpy(currState->positions, currState->CUDA_positions, sizeof(float)*2*numParticles, cudaMemcpyDeviceToHost); boundaryCheck(currState); } __global__ void kernel_velocityStart(float* CUDA_velocities_half, float* CUDA_velocities_full, float* CUDA_accelerations, int CUDA_numParticles, float* CUDA_positions, float CUDA_dt) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= 2* CUDA_numParticles) { return; } CUDA_velocities_half[i] = CUDA_velocities_full[i] + CUDA_accelerations[i] * CUDA_dt/2; CUDA_velocities_full[i] += CUDA_accelerations[i] * CUDA_dt; CUDA_positions[i] += CUDA_velocities_half[i] * CUDA_dt; } void velocityStart (CurrState* currState, double dt) { int numParticles = currState->numParticles; float* accelerations = currState->accelerations; float* velocities_full = currState->velocities_full; float* velocities_half = currState->velocities_half; float* positions = currState->positions; float* CUDA_velocities_half = currState->CUDA_velocities_half; float* CUDA_velocities_full = currState->CUDA_velocities_full; float* CUDA_accelerations = currState->CUDA_accelerations; float* CUDA_positions = currState->CUDA_positions; int numBlocks = 2* numParticles/THREADS_PER_BLOCK + 1; kernel_velocityStart<<<numBlocks, THREADS_PER_BLOCK>>>(CUDA_velocities_half, CUDA_velocities_full, CUDA_accelerations, numParticles, CUDA_positions, dt); /*for (unsigned int i = 0; i < 2*numParticles; ++i) { velocities_half[i] = velocities_full[i] + accelerations[i] * dt / 2; } for (unsigned int i = 0; i < 2*numParticles; ++i) { velocities_full[i] += accelerations[i] * dt; } for (unsigned int i = 0; i < 2*numParticles; ++i) { positions[i] += velocities_half[i] * dt; }*/ boundaryCheck(currState); } typedef int (*initialFluidShape_fun)(float, float); int cornerBoxInit(float x, float y) { return ((x < 0.5) && (y < 0.5)); } int sphereDropInit(float x, float y) { float dx = (x-0.5); float dy = (y-0.3); float r2 = dx*dx + dy*dy; return (r2 < 0.25 * 0.25); } CurrState* initialParticlePlacement(Parameters* params, initialFluidShape_fun shapeFun) { float size = params->size; float adjSize = size/OVERLAP_COEFF; int newCount = 0; int iterations = 1.0f/adjSize + 1; int inRegionCount = 0; for (unsigned int i = 0; i < iterations; ++i){ for (unsigned int j = 0; j < iterations; ++j) { float x = i * adjSize; float y = j * adjSize; inRegionCount += shapeFun(x,y); } } CurrState* currState = allocState(inRegionCount); int position = 0; for (unsigned int i = 0; i < iterations; ++i) { for (unsigned int j = 0; j < iterations; ++j) { float x = i * adjSize; float y = j * adjSize; if (shapeFun(x,y)) { currState->positions[GET_X(position)] = x; currState->positions[GET_Y(position)] = y; currState->velocities_full[GET_X(position)] = 0; currState->velocities_full[GET_Y(position)] = 0; ++position; } } } return currState; } void normalizeMasses(CurrState* currState, Parameters* params) { currState->mass = 1.f; calculateDensity(params, currState); float density_ref = params->density_ref; float cDensity = 0.f; float cDensity_2 = 0.f; int numParticles = currState->numParticles; for (int i = 0; i < numParticles; ++i) { float density = currState->densities[i]; cDensity += density; cDensity_2 += (density * density); } currState->mass *= (density_ref * cDensity / cDensity_2); } CurrState* initParticles(Parameters* params) { CurrState* currState = initialParticlePlacement(params, sphereDropInit); normalizeMasses(currState, params); return currState; } void initParams(Parameters* params) { params->fileName = "output.out"; params->numFrames = 400; params->stepsPerFrame = 100; params->dt = 1e-4; params->size = 5e-2; params->k = 1e3; params->density_ref = 1000; params->viscocity = 0.1; params->g = 9.8; } void errorCheck(CurrState* currState) { int numParticles = currState->numParticles; float currX; float currY; for (unsigned int i = 0; i < numParticles; ++i) { int xIndex = GET_X(i); int yIndex = GET_Y(i); currX = currState->positions[xIndex]; currY = currState->positions[yIndex]; assert(currX >=0 || currX <=1); assert(currY >=0 || currY <=1); } } int run_main() { //printf("main called\n"); Parameters params; initParams(&params); CurrState* currState = initParticles(&params); string fileName = params.fileName; int numFrames = params.numFrames; int stepsPerFrame = params.stepsPerFrame; float timeStep = params.dt; int numParticles = currState->numParticles; cudaMemcpy(currState->CUDA_positions, currState->positions, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); cudaMemcpy(currState->CUDA_accelerations, currState->accelerations, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); cudaMemcpy(currState->CUDA_densities, currState->densities, sizeof(float) * numParticles, cudaMemcpyHostToDevice); cudaMemcpy(currState->CUDA_velocities_full, currState->velocities_full, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); cudaMemcpy(currState->CUDA_velocities_half, currState->velocities_half, sizeof(float) * numParticles * 2, cudaMemcpyHostToDevice); calculateAcceleration(&params, currState); velocityStart(currState, timeStep); cudaMemcpy(currState->positions, currState->CUDA_positions, sizeof(float) * numParticles * 2, cudaMemcpyDeviceToHost); /* Write to file */ ofstream data_file; data_file.open("simulation_data.txt", ios::out); data_file << params.size << "\n"; data_file << numFrames * stepsPerFrame << "\n"; for (int i=0; i < numParticles; i++) { data_file << currState->positions[GET_X(i)] << "\n"; data_file << currState->positions[GET_Y(i)] << "\n"; } data_file << "DONE WITH AN ITERATION\n"; //data_file.close(); /* End of write */ //errorCheck(currState); //iterate through all the frames in the image for (unsigned int frame = 1; frame < numFrames; ++frame) { //iterate through all the steps per frame for (unsigned int j = 0; j < stepsPerFrame; ++j) { calculateAcceleration(&params, currState); //printf("calculaate acceleration returned on loop %d\n",j); velocityStep(currState, timeStep); cudaMemcpy(currState->positions, currState->CUDA_positions, sizeof(float) * numParticles * 2, cudaMemcpyDeviceToHost); /* Write to file */ //ofstream data_file; //data_file.open("simulation_data.txt", ios::out | ios::app); for (int i=0; i < numParticles; i++) { data_file << currState->positions[GET_X(i)] << "\n"; data_file << currState->positions[GET_Y(i)] << "\n"; } data_file << "DONE WITH AN ITERATION\n"; //errorCheck(currState); } cout << frame << "\n"; } data_file.close(); freeState(currState); return 0; } void parallel() { run_main(); }
a05120a31290744a21effa3b70366a7012d82945.hip
// !!! This is a file automatically generated by hipify!!! #include <SphSystem.cuh> #include <hip/hip_runtime.h> extern "C" { /**********************************************************************************************************/ void integrateSPHSystem(double* velInterAv, double* velInterAp, double* oldPos, double* newPos, double* forces, double* densities, double dt, int numBodies) { int numThreadsX, numBlocksX; computeGridSize(numBodies,numBlocksX, numThreadsX); hipLaunchKernelGGL(( integrateSPH_LeapFrog), dim3(numBlocksX), dim3(numThreadsX), 0, 0, (double3*) velInterAv, (double3*) velInterAp, (double3*) oldPos, (double3*) newPos, (double3*) forces, densities, dt, numBodies); hipDeviceSynchronize(); } /**********************************************************************************************************/ void interpolateSPHVelocities(double* velInterAv, double* velInterAp, double* oldVel, double* newVel, int numBodies) { int numThreadsX, numBlocksX; computeGridSize(numBodies,numBlocksX, numThreadsX); hipLaunchKernelGGL(( interpolateSPH_velocities), dim3(numBlocksX), dim3(numThreadsX), 0, 0, (double3*) velInterAv, (double3*) velInterAp, (double3*) oldVel, (double3*) newVel, numBodies); hipDeviceSynchronize(); } /**********************************************************************************************************/ void evaluate_densities_forces(double* pos, double* vel, double* mass, double* radius, double* densities, double* pressure, double* normales, double* restDensities, double* viscosities, double *k, double* threshold, double* surfaceTension, int numBodies, double* fPressure, double* fViscosity, double* fSurface, double* forcesAccum, partVoisine voisines) { int numThreadsX, numBlocksX; computeGridSize(numBodies,numBlocksX, numThreadsX); hipLaunchKernelGGL(( densityEvaluation), dim3(numBlocksX), dim3(numThreadsX), 0, 0, (double3*) pos, mass, radius, k, restDensities, numBodies, densities, pressure, voisines); hipDeviceSynchronize(); hipLaunchKernelGGL(( internalForces), dim3(numBlocksX), dim3(numThreadsX), 0, 0, (double3*) pos, (double3*) vel, mass, densities, pressure, radius, viscosities, threshold, surfaceTension, (double3*) normales, numBodies, (double3*) fPressure, (double3*) fViscosity, (double3*) fSurface, (double3*) forcesAccum, voisines); hipDeviceSynchronize(); } }
a05120a31290744a21effa3b70366a7012d82945.cu
#include <SphSystem.cuh> #include <cuda.h> extern "C" { /**********************************************************************************************************/ void integrateSPHSystem(double* velInterAv, double* velInterAp, double* oldPos, double* newPos, double* forces, double* densities, double dt, int numBodies) { int numThreadsX, numBlocksX; computeGridSize(numBodies,numBlocksX, numThreadsX); integrateSPH_LeapFrog<<<numBlocksX, numThreadsX>>>((double3*) velInterAv, (double3*) velInterAp, (double3*) oldPos, (double3*) newPos, (double3*) forces, densities, dt, numBodies); cudaDeviceSynchronize(); } /**********************************************************************************************************/ void interpolateSPHVelocities(double* velInterAv, double* velInterAp, double* oldVel, double* newVel, int numBodies) { int numThreadsX, numBlocksX; computeGridSize(numBodies,numBlocksX, numThreadsX); interpolateSPH_velocities<<<numBlocksX, numThreadsX>>> ((double3*) velInterAv, (double3*) velInterAp, (double3*) oldVel, (double3*) newVel, numBodies); cudaDeviceSynchronize(); } /**********************************************************************************************************/ void evaluate_densities_forces(double* pos, double* vel, double* mass, double* radius, double* densities, double* pressure, double* normales, double* restDensities, double* viscosities, double *k, double* threshold, double* surfaceTension, int numBodies, double* fPressure, double* fViscosity, double* fSurface, double* forcesAccum, partVoisine voisines) { int numThreadsX, numBlocksX; computeGridSize(numBodies,numBlocksX, numThreadsX); densityEvaluation<<<numBlocksX, numThreadsX>>>((double3*) pos, mass, radius, k, restDensities, numBodies, densities, pressure, voisines); cudaDeviceSynchronize(); internalForces<<<numBlocksX, numThreadsX>>>((double3*) pos, (double3*) vel, mass, densities, pressure, radius, viscosities, threshold, surfaceTension, (double3*) normales, numBodies, (double3*) fPressure, (double3*) fViscosity, (double3*) fSurface, (double3*) forcesAccum, voisines); cudaDeviceSynchronize(); } }
4ea02cb3c4b034186162a5bfc6cffcf5cc090d7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __global__ void helloFromGPU(){ printf("Hello World from GPU: %d\n",threadIdx.x); } int main(void){ hipLaunchKernelGGL(( helloFromGPU), dim3(1),dim3(10), 0, 0, ); hipDeviceReset(); //hipDeviceSynchronize(); return 0; }
4ea02cb3c4b034186162a5bfc6cffcf5cc090d7b.cu
#include<stdio.h> __global__ void helloFromGPU(){ printf("Hello World from GPU: %d\n",threadIdx.x); } int main(void){ helloFromGPU<<<1,10>>>(); cudaDeviceReset(); //cudaDeviceSynchronize(); return 0; }
c7010fe830be0795e687a15f240be7b3561ce442.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // adapted from https://github.com/balbasty/nitorch // This file implements spline interpolation / sampling and its adjoint // operations. It corresponds loosely to torch's `GridSampler`. // It handles boundary conditions and interpolation orders defined in // `utils/resample_utils.h` and `utils/resample_utils.h`. // These parameters can be specified per dimension. // Isotropic 0-th and 1-st order interpolation have their own (faster) // implementations. Sliding boundary conditions are also implemented // separately. // TODO: // . [DONE] generic 3d // . [DONE] generic 2d // . sliding nearest 3d // . sliding nearest 2d // . sliding linear 3d // . sliding linear 2d // . sliding generic 3d // . sliding generic 2d // . [DONE] spatial gradient mode (without multiplication with output gradient) // . [DONE] second order gradients (backward pass for spatial gradients) // . performance tests // . input bound/inter are always vectors -> clean unused constructors #include <ATen/ATen.h> #include <tuple> #include "bounds_common.h" #include "interpolation_common.h" #include "utils/resample_utils.h" //#include <cstdio> // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GPU-specific parameters #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <c10/macros/Macros.h> using namespace at::cuda::detail; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // maximum number of channels // > not used in mode isotropic nearest/linear #ifndef MONAI_MAX_NUM_CHANNELS #define MONAI_MAX_NUM_CHANNELS 1024 #endif // This parameter allows for a little bit of tolerance when considering // a coordinate as "out-of-bound" (if !extrapolate) #define TINY 5e-2 using at::Tensor; using at::TensorOptions; using c10::IntArrayRef; namespace monai { MONAI_NAMESPACE_DEVICE { // cuda namespace { // anonymous namespace > everything inside has internal linkage // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GENERIC PUSHPULL CLASS // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This class implements the bulk of the code. // /!\ No type and shape checking is performed here. template <typename scalar_t, typename offset_t> class PushPullImpl { public: // ~~~ CONSTRUCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST PushPullImpl( int dim, BoundVectorRef bound, InterpolationVectorRef interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate), bound1(bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), bound2( bound.size() > 2 ? bound[2] : bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation1( interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation2( interpolation.size() > 2 ? interpolation[2] : interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } MONAI_HOST PushPullImpl( int dim, BoundType bound, InterpolationVectorRef interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound), bound1(bound), bound2(bound), interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation1( interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation2( interpolation.size() > 2 ? interpolation[2] : interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } MONAI_HOST PushPullImpl( int dim, BoundVectorRef bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate), bound1(bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), bound2( bound.size() > 2 ? bound[2] : bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), interpolation0(interpolation), interpolation1(interpolation), interpolation2(interpolation), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } MONAI_HOST PushPullImpl( int dim, BoundType bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound), bound1(bound), bound2(bound), interpolation0(interpolation), interpolation1(interpolation), interpolation2(interpolation), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } // ~~~ PUBLIC VALUE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ std::deque<Tensor> output; // MONAI_HOST MONAI_DEVICE void printInfo() const { // printf("dim: %d\n", dim); // printf("do_pull: %d\n", do_pull); // printf("do_push: %d\n", do_push); // printf("do_count: %d\n", do_count); // printf("do_sgrad: %d\n", do_sgrad); // printf("do_grad: %d\n", do_grad); // printf("bound: [%d %d %d]\n", static_cast<int>(bound0), // static_cast<int>(bound1), static_cast<int>(bound2)); // printf("interpolation: [%d %d %d]\n", static_cast<int>(interpolation0), // static_cast<int>(interpolation1), static_cast<int>(interpolation2)); // printf("src: [%d %d %d]\n", src_Z, src_Y, src_X); // printf("trgt: [%d %d %d (%d)]\n", trgt_Z, trgt_Y, trgt_X, trgt_K); // printf("N: %d\n", N); // printf("C: %d\n", C); // printf("src -> %lu\n", reinterpret_cast<std::uintptr_t>(src_ptr)); // printf("trgt -> %lu\n", reinterpret_cast<std::uintptr_t>(trgt_ptr)); // printf("grid -> %lu\n", reinterpret_cast<std::uintptr_t>(grid_ptr)); // printf("out -> %lu\n", reinterpret_cast<std::uintptr_t>(out_ptr)); // printf("grad -> %lu\n", reinterpret_cast<std::uintptr_t>(grad_ptr)); // } // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST void ioset // Pull (const Tensor& source, const Tensor& grid) { init_all(); init_source(source); init_grid(grid); init_output(); } MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) { init_all(); init_source(source); init_grid(grid); init_target(target); init_output(); } MONAI_HOST void ioset // Push (IntArrayRef source_size, const Tensor& grid, const Tensor& target) { init_all(); init_source(source_size); init_grid(grid); init_target(target); init_output(); } MONAI_HOST void ioset // Count (IntArrayRef source_size, const Tensor& grid) { init_all(); init_source(source_size); init_grid(grid); init_output(); } MONAI_DEVICE void loop(int threadIdx, int blockIdx, int blockDim, int gridDim) const; MONAI_HOST MONAI_DEVICE int64_t voxcount() const { return N * trgt_X * trgt_Y * trgt_Z; } private: // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST void init_all(); MONAI_HOST void init_source(const Tensor& source); MONAI_HOST void init_source(IntArrayRef source_size); MONAI_HOST void init_grid(const Tensor& grid); MONAI_HOST void init_target(const Tensor& target); MONAI_HOST void init_output(); MONAI_DEVICE void check2d(offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void check3d(offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate2d(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_sliding(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate2d_sliding_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate2d_sliding_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate3d(scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate3d_nearest( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate3d_trilinear( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate3d_sliding( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate3d_sliding_nearest( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate3d_sliding_trilinear( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { /*TODO*/ } // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ int dim; // dimensionality (2 or 3) BoundType bound0; // boundary condition // x|W BoundType bound1; // boundary condition // y|H BoundType bound2; // boundary condition // z|D InterpolationType interpolation0; // interpolation order // x|W InterpolationType interpolation1; // interpolation order // y|H InterpolationType interpolation2; // interpolation order // z|D bool iso; // isotropic interpolation? bool extrapolate; // compute out-of-bound values bool do_pull; // sample a volume bool do_push; // splat a volume bool do_count; // splatting weights (= jacobian determinant) bool do_grad; // backprop: gradient of grid // pull bool do_sgrad; // sample spatial gradients // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TensorOptions src_opt; TensorOptions grid_opt; TensorOptions trgt_opt; offset_t N; offset_t C; offset_t src_X; offset_t src_Y; offset_t src_Z; offset_t trgt_X; offset_t trgt_Y; offset_t trgt_Z; offset_t trgt_K; offset_t src_sN; offset_t src_sC; offset_t src_sX; offset_t src_sY; offset_t src_sZ; scalar_t* src_ptr; offset_t trgt_sN; offset_t trgt_sC; offset_t trgt_sX; offset_t trgt_sY; offset_t trgt_sZ; offset_t trgt_sK; scalar_t* trgt_ptr; offset_t grid_sN; offset_t grid_sC; offset_t grid_sX; offset_t grid_sY; offset_t grid_sZ; scalar_t* grid_ptr; offset_t out_sN; offset_t out_sC; offset_t out_sX; offset_t out_sY; offset_t out_sZ; offset_t out_sK; // gradient dimension scalar_t* out_ptr; offset_t grad_sN; offset_t grad_sC; offset_t grad_sX; offset_t grad_sY; offset_t grad_sZ; scalar_t* grad_ptr; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // INITIALISATION // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> void PushPullImpl<scalar_t, offset_t>::init_all() { src_opt = grid_opt = trgt_opt = TensorOptions(); N = C = static_cast<offset_t>(1); src_X = src_Y = src_Z = static_cast<offset_t>(1); trgt_X = trgt_Y = trgt_Z = trgt_K = static_cast<offset_t>(1); src_sN = src_sC = src_sX = src_sY = src_sZ = static_cast<offset_t>(0); grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = static_cast<offset_t>(0); grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = static_cast<offset_t>(0); trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = static_cast<offset_t>(0); out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = static_cast<offset_t>(0); src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast<scalar_t*>(0); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_source(const Tensor& source) { N = source.size(0); C = source.size(1); src_X = source.size(2); src_Y = source.size(3); src_Z = dim == 2 ? static_cast<offset_t>(1) : source.size(4); src_sN = source.stride(0); src_sC = source.stride(1); src_sX = source.stride(2); src_sY = source.stride(3); src_sZ = dim == 2 ? static_cast<offset_t>(0) : source.stride(4); src_ptr = source.data_ptr<scalar_t>(); src_opt = source.options(); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_source(IntArrayRef source_size) { src_X = source_size[0]; src_Y = source_size[1]; src_Z = dim == 2 ? static_cast<offset_t>(1) : source_size[2]; } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_grid(const Tensor& grid) { N = grid.size(0); trgt_X = grid.size(1); trgt_Y = grid.size(2); trgt_Z = dim == 2 ? static_cast<offset_t>(1) : grid.size(3); grid_sN = grid.stride(0); grid_sX = grid.stride(1); grid_sY = grid.stride(2); grid_sZ = dim == 2 ? static_cast<offset_t>(0) : grid.stride(3); grid_sC = grid.stride(dim == 2 ? 3 : 4); grid_ptr = grid.data_ptr<scalar_t>(); grid_opt = grid.options(); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_target(const Tensor& target) { N = target.size(0); C = target.size(1); trgt_X = target.size(2); trgt_Y = target.size(3); trgt_Z = dim == 2 ? static_cast<offset_t>(1) : target.size(4); trgt_K = target.dim() == dim + 3 ? target.size(dim == 2 ? 4 : 5) : static_cast<offset_t>(1); trgt_sN = target.stride(0); trgt_sC = target.stride(1); trgt_sX = target.stride(2); trgt_sY = target.stride(3); trgt_sZ = dim == 2 ? static_cast<offset_t>(0) : target.stride(4); trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 2 ? 4 : 5) : static_cast<offset_t>(0); trgt_ptr = target.data_ptr<scalar_t>(); trgt_opt = target.options(); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_output() { output.clear(); if (do_pull) { if (dim == 2) output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt)); else output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt)); auto pull = output.back(); out_sN = pull.stride(0); out_sC = pull.stride(1); out_sX = pull.stride(2); out_sY = pull.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : pull.stride(4); out_sK = static_cast<offset_t>(0); out_ptr = pull.template data_ptr<scalar_t>(); } else if (do_sgrad) { if (dim == 2) output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt)); else output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt)); auto sgrad = output.back(); out_sN = sgrad.stride(0); out_sC = sgrad.stride(1); out_sX = sgrad.stride(2); out_sY = sgrad.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : sgrad.stride(4); out_sK = sgrad.stride(dim == 2 ? 4 : 5); out_ptr = sgrad.template data_ptr<scalar_t>(); if (iso && interpolation0 == InterpolationType::Nearest) sgrad.zero_(); } else if (do_push) { if (dim == 2) output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt)); else output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt)); auto push = output.back(); out_sN = push.stride(0); out_sC = push.stride(1); out_sX = push.stride(2); out_sY = push.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : push.stride(4); out_sK = static_cast<offset_t>(0); out_ptr = push.template data_ptr<scalar_t>(); } else if (do_count) { if (dim == 2) output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt)); else output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt)); auto count = output.back(); out_sN = count.stride(0); out_sC = count.stride(1); out_sX = count.stride(2); out_sY = count.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : count.stride(4); out_sK = static_cast<offset_t>(0); out_ptr = count.template data_ptr<scalar_t>(); } if (do_grad) { if (dim == 2) output.push_back(at::zeros({N, src_X, src_Y, 2}, grid_opt)); else output.push_back(at::zeros({N, src_X, src_Y, src_Z, 3}, grid_opt)); auto grad = output.back(); grad_sN = grad.stride(0); grad_sX = grad.stride(1); grad_sY = grad.stride(2); grad_sZ = dim == 2 ? static_cast<offset_t>(0) : grad.stride(3); grad_sC = grad.stride(dim == 2 ? 3 : 4); grad_ptr = grad.template data_ptr<scalar_t>(); if (iso && interpolation0 == InterpolationType::Nearest) grad.zero_(); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LOOP // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::loop(int threadIdx, int blockIdx, int blockDim, int gridDim) const { int64_t index = blockIdx * blockDim + threadIdx; int64_t nthreads = voxcount(); offset_t trgt_XYZ = trgt_Z * trgt_Y * trgt_X; offset_t trgt_YZ = trgt_Z * trgt_Y; offset_t n, w, h, d; for (offset_t i = index; index < nthreads; index += blockDim * gridDim, i = index) { // Convert index: linear to sub n = (i / trgt_XYZ); w = (i / trgt_YZ) % trgt_X; h = (i / trgt_Z) % trgt_Y; d = i % trgt_Z; if (dim == 2) check2d(w, h, n); else check3d(w, h, d, n); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // CHECK OUT-OF-BOUND // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Here, we: // 1) read the [x,y,z] source coordinate for the current target voxel // 3) check if the source coordinate is in bounds template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check2d(offset_t w, offset_t h, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid scalar_t* grid_ptr_NXY = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY; scalar_t x = *grid_ptr_NXY; scalar_t y = grid_ptr_NXY[grid_sC]; // Check if out-of-bound if (!(extrapolate || (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY))))) { if (do_pull || do_sgrad) { scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sZ + h * out_sY; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) { *out_ptr_NCXY = static_cast<scalar_t>(0); if (do_sgrad) out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0); } } if (do_grad) { scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; (*grad_ptr_NXY) = static_cast<scalar_t>(0); grad_ptr_NXY[grad_sC] = static_cast<scalar_t>(0); } return; } // Next step if (bound0 == BoundType::Sliding) { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate2d_sliding_nearest(x, y, w, h, n); case 1: return interpolate2d_sliding_bilinear(x, y, w, h, n); } return interpolate2d_sliding(x, y, w, h, n); } else { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate2d_nearest(x, y, w, h, n); case 1: return interpolate2d_bilinear(x, y, w, h, n); } return interpolate2d(x, y, w, h, n); } } template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ; scalar_t x = *grid_ptr_NXYZ; scalar_t y = grid_ptr_NXYZ[grid_sC]; scalar_t z = grid_ptr_NXYZ[grid_sC * 2]; // Check if out-of-bound if (!(extrapolate || (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY)) && inbounds(z, src_Z, static_cast<scalar_t>(TINY))))) { if (do_pull || do_sgrad) { scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { *out_ptr_NCXYZ = static_cast<scalar_t>(0); if (do_sgrad) { out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0); out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0); } } } if (do_grad) { scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; (*grad_ptr_NXYZ) = static_cast<scalar_t>(0); grad_ptr_NXYZ[grad_sC] = static_cast<scalar_t>(0); grad_ptr_NXYZ[grad_sC * 2] = static_cast<scalar_t>(0); } return; } // Next step if (bound0 == BoundType::Sliding) { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate3d_sliding_nearest(x, y, z, w, h, d, n); case 1: return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n); } return interpolate3d_sliding(x, y, z, w, h, d, n); } else { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate3d_nearest(x, y, z, w, h, d, n); case 1: return interpolate3d_trilinear(x, y, z, w, h, d, n); } return interpolate3d(x, y, z, w, h, d, n); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GENERIC INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { // Get corner pixel values from (x, y, z) offset_t bx0, bx1, by0, by1, bz0, bz1; interpolation::bounds(interpolation0, x, bx0, bx1); interpolation::bounds(interpolation1, y, by0, by1); interpolation::bounds(interpolation2, z, bz0, bz1); offset_t dbx = bx1 - bx0; offset_t dby = by1 - by0; offset_t dbz = bz1 - bz0; // Pre-compute offsets and target value scalar_t* src_ptr_NC0 = src_ptr + n * src_sN; scalar_t* out_ptr_NC0 = out_ptr + n * out_sN; scalar_t* out_ptr_NCXYZ0 = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t target[3 * MONAI_MAX_NUM_CHANNELS]; if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC) { target[c] = *trgt_ptr_NCXYZ; if (trgt_K > 1) { target[c + C] = trgt_ptr_NCXYZ[trgt_sK]; target[c + C * 2] = trgt_ptr_NCXYZ[trgt_sK * 2]; } } // Initialize output scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0; if (do_pull || do_sgrad) { for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { *out_ptr_NCXYZ = static_cast<scalar_t>(0); if (do_sgrad) { out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0); out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0); } } } // Pre-compute indices/weights/grad scalar_t wx[8], wy[8], wz[8]; // B-spline weights scalar_t gx[8], gy[8], gz[8]; // B-spline derivatives scalar_t hx[8], hy[8], hz[8]; // B-spline 2nd derivatives offset_t ix[8], iy[8], iz[8]; // Warped indices uint8_t sx[8], sy[8], sz[8]; // Warped indices { scalar_t *owz = static_cast<scalar_t*>(wz), *ogz = static_cast<scalar_t*>(gz), *ohz = static_cast<scalar_t*>(hz); offset_t* oiz = static_cast<offset_t*>(iz); uint8_t* osz = static_cast<uint8_t*>(sz); for (offset_t bz = bz0; bz <= bz1; ++bz) { scalar_t dz = z - bz; *(owz++) = interpolation::fastweight(interpolation2, dz); if (do_grad || do_sgrad) *(ogz++) = interpolation::fastgrad(interpolation2, dz); if (do_grad && trgt_sK > 1) *(ohz++) = interpolation::fasthess(interpolation2, dz); *(osz++) = bound::sign(bound2, bz, src_Z); *(oiz++) = bound::index(bound2, bz, src_Z); } } { scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy); offset_t* oiy = static_cast<offset_t*>(iy); uint8_t* osy = static_cast<uint8_t*>(sy); for (offset_t by = by0; by <= by1; ++by) { scalar_t dy = y - by; *(owy++) = interpolation::fastweight(interpolation1, dy); if (do_grad || do_sgrad) *(ogy++) = interpolation::fastgrad(interpolation1, dy); if (do_grad && trgt_sK > 1) *(ohy++) = interpolation::fasthess(interpolation1, dy); *(osy++) = bound::sign(bound1, by, src_Y); *(oiy++) = bound::index(bound1, by, src_Y); } } { scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx); offset_t* oix = static_cast<offset_t*>(ix); uint8_t* osx = static_cast<uint8_t*>(sx); for (offset_t bx = bx0; bx <= bx1; ++bx) { scalar_t dx = x - bx; *(owx++) = interpolation::fastweight(interpolation0, dx); if (do_grad || do_sgrad) *(ogx++) = interpolation::fastgrad(interpolation0, dx); if (do_grad && trgt_sK > 1) *(ohx++) = interpolation::fasthess(interpolation0, dx); *(osx++) = bound::sign(bound0, bx, src_X); *(oix++) = bound::index(bound0, bx, src_X); } } // Convolve coefficients with basis functions scalar_t ogx, ogy, ogz; ogx = ogy = ogz = static_cast<scalar_t>(0); for (offset_t k = 0; k <= dbz; ++k) { offset_t ooz = iz[k] * out_sZ; offset_t osz = iz[k] * src_sZ; uint8_t szz = sz[k]; scalar_t wzz = wz[k]; scalar_t gzz = gz[k]; scalar_t hzz = hz[k]; for (offset_t j = 0; j <= dby; ++j) { offset_t ooyz = ooz + iy[j] * out_sY; offset_t osyz = osz + iy[j] * src_sY; uint8_t syz = szz * sy[j]; scalar_t wyy = wy[j]; scalar_t gyy = gy[j]; scalar_t hyy = hy[j]; for (offset_t i = 0; i <= dbx; ++i) { offset_t ooxyz = ooyz + ix[i] * out_sX; offset_t osxyz = osyz + ix[i] * src_sX; uint8_t sxyz = syz * sx[i]; scalar_t wxx = wx[i]; scalar_t gxx = gx[i]; scalar_t hxx = hx[i]; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXYZ += bound::get(src_ptr_NC, osxyz, sxyz) * (wxx * wyy * wzz); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_sgrad) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz); *out_ptr_NCXYZ += src * (gxx * wyy * wzz); out_ptr_NCXYZ[out_sK] += src * (wxx * gyy * wzz); out_ptr_NCXYZ[2 * out_sK] += src * (wxx * wyy * gzz); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { if (trgt_K == 1) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, ooxyz, (wxx * wyy * wzz) * target[c], sxyz); } else { // Diff w.r.t. sgrad scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) { scalar_t val = (gxx * wyy * wzz) * target[c] + (wxx * gyy * wzz) * target[c + C] + (wxx * wyy * gzz) * target[c + C * 2]; bound::add(out_ptr_NC, ooxyz, val, sxyz); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { bound::add(out_ptr_NC0, ooxyz, (wxx * wyy * wzz), sxyz); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { if (trgt_K == 1) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz); dot += (trgt_ptr ? src * target[c] : src); // trgt_ptr == 0 in the backward pass of 'count' } ogx += (gxx * wyy * wzz) * dot; ogy += (wxx * gyy * wzz) * dot; ogz += (wxx * wyy * gzz) * dot; } else { // Diff w.r.t. sgrad scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot0, dot1, dot2; dot0 = dot1 = dot2 = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz); dot0 += src * target[c]; dot1 += src * target[c + C]; dot2 += src * target[c + C * 2]; } ogx += (hxx * wyy * wzz) * dot0 + (gxx * gyy * wzz) * dot1 + (gxx * wyy * gzz) * dot2; ogy += (gxx * gyy * wzz) * dot0 + (wxx * hyy * wzz) * dot1 + (wxx * gyy * gzz) * dot2; ogz += (gxx * wyy * gzz) * dot0 + (wxx * gyy * gzz) * dot1 + (wxx * wyy * hzz) * dot2; } } } // x } // y } // z // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; (*grad_ptr_NXYZ) = ogx; grad_ptr_NXYZ[grad_sC] = ogy; grad_ptr_NXYZ[grad_sC * 2] = ogz; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GENERIC INTERPOLATION 2D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d( scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { // Get corner pixel values from (x, y) offset_t bx0, bx1, by0, by1; interpolation::bounds(interpolation0, x, bx0, bx1); interpolation::bounds(interpolation1, y, by0, by1); offset_t dbx = bx1 - bx0; offset_t dby = by1 - by0; // Pre-compute offsets and target value scalar_t* src_ptr_NC0 = src_ptr + n * src_sN; scalar_t* out_ptr_NC0 = out_ptr + n * out_sN; scalar_t* out_ptr_NCXY0 = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t target[2 * MONAI_MAX_NUM_CHANNELS]; if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC) { target[c] = *trgt_ptr_NCXY; if (trgt_K > 1) { target[c + C] = trgt_ptr_NCXY[trgt_sK]; } } // Initialize output scalar_t* out_ptr_NCXY = out_ptr_NCXY0; if (do_pull || do_sgrad) { for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) { *out_ptr_NCXY = static_cast<scalar_t>(0); if (do_sgrad) { out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0); } } } // Pre-compute indices/weights/grad scalar_t wx[8], wy[8]; // B-spline weights scalar_t gx[8], gy[8]; // B-spline derivatives scalar_t hx[8], hy[8]; // B-spline 2nd derivatives offset_t ix[8], iy[8]; // Warped indices uint8_t sx[8], sy[8]; // Warped indices { scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy); offset_t* oiy = static_cast<offset_t*>(iy); uint8_t* osy = static_cast<uint8_t*>(sy); for (offset_t by = by0; by <= by1; ++by) { scalar_t dy = y - by; *(owy++) = interpolation::fastweight(interpolation1, dy); if (do_grad || do_sgrad) *(ogy++) = interpolation::fastgrad(interpolation1, dy); if (do_grad && trgt_sK > 1) *(ohy++) = interpolation::fasthess(interpolation1, dy); *(osy++) = bound::sign(bound1, by, src_Y); *(oiy++) = bound::index(bound1, by, src_Y); } } { scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx); offset_t* oix = static_cast<offset_t*>(ix); uint8_t* osx = static_cast<uint8_t*>(sx); for (offset_t bx = bx0; bx <= bx1; ++bx) { scalar_t dx = x - bx; *(owx++) = interpolation::fastweight(interpolation0, dx); if (do_grad || do_sgrad) *(ogx++) = interpolation::fastgrad(interpolation0, dx); if (do_grad && trgt_sK > 1) *(ohx++) = interpolation::fasthess(interpolation0, dx); *(osx++) = bound::sign(bound0, bx, src_X); *(oix++) = bound::index(bound0, bx, src_X); } } // Convolve coefficients with basis functions scalar_t ogx, ogy; ogx = ogy = static_cast<scalar_t>(0); for (offset_t j = 0; j <= dby; ++j) { offset_t ooy = iy[j] * out_sY; offset_t osy = iy[j] * src_sY; uint8_t syy = sy[j]; scalar_t wyy = wy[j]; scalar_t gyy = gy[j]; scalar_t hyy = hy[j]; for (offset_t i = 0; i <= dbx; ++i) { offset_t ooxy = ooy + ix[i] * out_sX; offset_t osxy = osy + ix[i] * src_sX; uint8_t sxy = syy * sx[i]; scalar_t wxx = wx[i]; scalar_t gxx = gx[i]; scalar_t hxx = hx[i]; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXY = out_ptr_NCXY0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXY += bound::get(src_ptr_NC, osxy, sxy) * (wxx * wyy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_sgrad) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXY = out_ptr_NCXY0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxy, sxy); *out_ptr_NCXY += src * (gxx * wyy); out_ptr_NCXY[out_sK] += src * (wxx * gyy); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { if (trgt_K == 1) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, ooxy, (wxx * wyy) * target[c], sxy); } else { // Diff w.r.t. sgrad scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) { scalar_t val = (gxx * wyy) * target[c] + (wxx * gyy) * target[c + C]; bound::add(out_ptr_NC, ooxy, val, sxy); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { bound::add(out_ptr_NC0, ooxy, (wxx * wyy), sxy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { if (trgt_K == 1) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxy, sxy); dot += (trgt_ptr ? src * target[c] : src); // trgt_ptr == 0 in the backward pass of 'count' } ogx += (gxx * wyy) * dot; ogy += (wxx * gyy) * dot; } else { // Diff w.r.t. sgrad scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot0, dot1; dot0 = dot1 = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxy, sxy); dot0 += src * target[c]; dot1 += src * target[c + C]; } ogx += (hxx * wyy) * dot0 + (gxx * gyy) * dot1; ogy += (gxx * gyy) * dot0 + (wxx * hyy) * dot1; } } } // x } // y // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; (*grad_ptr_NXY) = ogx; grad_ptr_NXY[grad_sC] = ogy; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_trilinear( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { // Get corner pixel values from (x, y, z) offset_t ix0 = static_cast<offset_t>(::floor(x)); offset_t iy0 = static_cast<offset_t>(::floor(y)); offset_t iz0 = static_cast<offset_t>(::floor(z)); // Interpolation weights (inversely proportional to distance) scalar_t dx1 = x - ix0; scalar_t dy1 = y - iy0; scalar_t dz1 = z - iz0; scalar_t dx0 = 1. - dx1; scalar_t dy0 = 1. - dy1; scalar_t dz0 = 1. - dz1; scalar_t w000 = dx0 * dy0 * dz0; scalar_t w100 = dx1 * dy0 * dz0; scalar_t w010 = dx0 * dy1 * dz0; scalar_t w001 = dx0 * dy0 * dz1; scalar_t w110 = dx1 * dy1 * dz0; scalar_t w011 = dx0 * dy1 * dz1; scalar_t w101 = dx1 * dy0 * dz1; scalar_t w111 = dx1 * dy1 * dz1; // Sign (/!\ compute sign before warping indices) int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X); int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y); int8_t sz1 = bound::sign(bound2, iz0 + 1, src_Z); int8_t sx0 = bound::sign(bound0, ix0, src_X); int8_t sy0 = bound::sign(bound1, iy0, src_Y); int8_t sz0 = bound::sign(bound2, iz0, src_Z); int8_t s000 = sx0 * sy0 * sz0; int8_t s100 = sx1 * sy0 * sz0; int8_t s010 = sx0 * sy1 * sz0; int8_t s001 = sx0 * sy0 * sz1; int8_t s110 = sx1 * sy1 * sz0; int8_t s011 = sx0 * sy1 * sz1; int8_t s101 = sx1 * sy0 * sz1; int8_t s111 = sx1 * sy1 * sz1; // Warp indices offset_t ix1, iy1, iz1; ix1 = bound::index(bound0, ix0 + 1, src_X); iy1 = bound::index(bound1, iy0 + 1, src_Y); iz1 = bound::index(bound2, iz0 + 1, src_Z); ix0 = bound::index(bound0, ix0, src_X); iy0 = bound::index(bound1, iy0, src_Y); iz0 = bound::index(bound2, iz0, src_Z); // Offsets into source volume offset_t o000, o100, o010, o001, o110, o011, o101, o111; if (do_pull || do_grad || do_sgrad) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; scalar_t gx = static_cast<scalar_t>(0); scalar_t gy = static_cast<scalar_t>(0); scalar_t gz = static_cast<scalar_t>(0); scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; if (trgt_K == 1) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXYZ : static_cast<scalar_t>(1); // ^ trgt_ptr == 0 during the backward pass of count src = bound::get(src_ptr_NC, o000, s000); if (trgt_ptr) src *= trgt; gx -= dy0 * dz0 * src; gy -= dx0 * dz0 * src; gz -= dx0 * dy0 * src; src = bound::get(src_ptr_NC, o100, s100); if (trgt_ptr) src *= trgt; gx += dy0 * dz0 * src; gy -= dx1 * dz0 * src; gz -= dx1 * dy0 * src; src = bound::get(src_ptr_NC, o010, s010); if (trgt_ptr) src *= trgt; gx -= dy1 * dz0 * src; gy += dx0 * dz0 * src; gz -= dx0 * dy1 * src; src = bound::get(src_ptr_NC, o110, s110); if (trgt_ptr) src *= trgt; gx += dy1 * dz0 * src; gy += dx1 * dz0 * src; gz -= dx1 * dy1 * src; src = bound::get(src_ptr_NC, o001, s001); if (trgt_ptr) src *= trgt; gx -= dy0 * dz1 * src; gy -= dx0 * dz1 * src; gz += dx0 * dy0 * src; src = bound::get(src_ptr_NC, o101, s101); if (trgt_ptr) src *= trgt; gx += dy0 * dz1 * src; gy -= dx1 * dz1 * src; gz += dx1 * dy0 * src; src = bound::get(src_ptr_NC, o011, s011); if (trgt_ptr) src *= trgt; gx -= dy1 * dz1 * src; gy += dx0 * dz1 * src; gz += dx0 * dy1 * src; src = bound::get(src_ptr_NC, o111, s111); if (trgt_ptr) src *= trgt; gx += dy1 * dz1 * src; gy += dx1 * dz1 * src; gz += dx1 * dy1 * src; } } else { // backward w.r.t. sgrad for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2]; src = bound::get(src_ptr_NC, o000, s000); gx += (dz0 * trgt1 + dy0 * trgt2) * src; gy += (dz0 * trgt0 + dx0 * trgt2) * src; gz += (dy0 * trgt0 + dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o100, s100); gx += (-dz0 * trgt1 - dy0 * trgt2) * src; gy += (-dz0 * trgt0 + dx1 * trgt2) * src; gz += (-dy0 * trgt0 + dx1 * trgt1) * src; src = bound::get(src_ptr_NC, o010, s010); gx += (-dz0 * trgt1 + dy1 * trgt2) * src; gy += (-dz0 * trgt0 - dx0 * trgt2) * src; gz += (dy1 * trgt0 - dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o110, s110); gx += (dz0 * trgt1 - dy1 * trgt2) * src; gy += (dz0 * trgt0 - dx1 * trgt2) * src; gz += (-dy1 * trgt0 - dx1 * trgt1) * src; src = bound::get(src_ptr_NC, o001, s001); gx += (dz1 * trgt1 - dy0 * trgt2) * src; gy += (dz1 * trgt0 - dx0 * trgt2) * src; gz += (-dy0 * trgt0 - dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o101, s101); gx += (-dz1 * trgt1 + dy0 * trgt2) * src; gy += (-dz1 * trgt0 - dx1 * trgt2) * src; gz += (dy0 * trgt0 - dx1 * trgt1) * src; src = bound::get(src_ptr_NC, o011, s011); gx += (-dz1 * trgt1 - dy1 * trgt2) * src; gy += (-dz1 * trgt0 + dx0 * trgt2) * src; gz += (-dy1 * trgt0 + dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o111, s111); gx += (dz1 * trgt1 + dy1 * trgt2) * src; gy += (dz1 * trgt0 + dx1 * trgt2) * src; gz += (dy1 * trgt0 + dx1 * trgt1) * src; } } scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; (*grad_ptr_NXYZ) = gx; grad_ptr_NXYZ[grad_sC] = gy; grad_ptr_NXYZ[grad_sC * 2] = gz; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) { *out_ptr_NCXYZ = bound::get(src_ptr_NC, o000, s000) * w000 + bound::get(src_ptr_NC, o100, s100) * w100 + bound::get(src_ptr_NC, o010, s010) * w010 + bound::get(src_ptr_NC, o110, s110) * w110 + bound::get(src_ptr_NC, o001, s001) * w001 + bound::get(src_ptr_NC, o101, s101) * w101 + bound::get(src_ptr_NC, o011, s011) * w011 + bound::get(src_ptr_NC, o111, s111) * w111; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~ else if (do_sgrad) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) { scalar_t src000 = bound::get(src_ptr_NC, o000, s000); scalar_t src100 = bound::get(src_ptr_NC, o100, s100); scalar_t src010 = bound::get(src_ptr_NC, o010, s010); scalar_t src110 = bound::get(src_ptr_NC, o110, s110); scalar_t src001 = bound::get(src_ptr_NC, o001, s001); scalar_t src101 = bound::get(src_ptr_NC, o101, s101); scalar_t src011 = bound::get(src_ptr_NC, o011, s011); scalar_t src111 = bound::get(src_ptr_NC, o111, s111); *out_ptr_NCXYZ = -dy0 * dz0 * src000 + dy0 * dz0 * src100 - dy1 * dz0 * src010 + dy1 * dz0 * src110 - dy0 * dz1 * src001 + dy0 * dz1 * src101 - dy1 * dz1 * src011 + dy1 * dz1 * src111; out_ptr_NCXYZ[out_sK] = -dx0 * dz0 * src000 - dx1 * dz0 * src100 + dx0 * dz0 * src010 + dx1 * dz0 * src110 - dx0 * dz1 * src001 - dx1 * dz1 * src101 + dx0 * dz1 * src011 + dx1 * dz1 * src111; out_ptr_NCXYZ[out_sK * 2] = -dx0 * dy0 * src000 - dx1 * dy0 * src100 - dx0 * dy1 * src010 - dx1 * dy1 * src110 + dx0 * dy0 * src001 + dx1 * dy0 * src101 + dx0 * dy1 * src011 + dx1 * dy1 * src111; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { // Offsets into 'push' volume o000 = ix0 * out_sX + iy0 * out_sY + iz0 * out_sZ; o100 = ix1 * out_sX + iy0 * out_sY + iz0 * out_sZ; o010 = ix0 * out_sX + iy1 * out_sY + iz0 * out_sZ; o001 = ix0 * out_sX + iy0 * out_sY + iz1 * out_sZ; o110 = ix1 * out_sX + iy1 * out_sY + iz0 * out_sZ; o011 = ix0 * out_sX + iy1 * out_sY + iz1 * out_sZ; o101 = ix1 * out_sX + iy0 * out_sY + iz1 * out_sZ; o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; if (trgt_K == 1) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXYZ; bound::add(out_ptr_NC, o000, w000 * trgt, s000); bound::add(out_ptr_NC, o100, w100 * trgt, s100); bound::add(out_ptr_NC, o010, w010 * trgt, s010); bound::add(out_ptr_NC, o110, w110 * trgt, s110); bound::add(out_ptr_NC, o001, w001 * trgt, s001); bound::add(out_ptr_NC, o101, w101 * trgt, s101); bound::add(out_ptr_NC, o011, w011 * trgt, s011); bound::add(out_ptr_NC, o111, w111 * trgt, s111); } } else { // Diff w.r.t. sgrad scalar_t val; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2]; val = -dy0 * dz0 * trgt0 - dx0 * dz0 * trgt1 - dx0 * dy0 * trgt2; bound::add(out_ptr_NC, o000, val, s000); val = dy0 * dz0 * trgt0 - dx1 * dz0 * trgt1 - dx1 * dy0 * trgt2; bound::add(out_ptr_NC, o100, val, s100); val = -dy1 * dz0 * trgt0 + dx0 * dz0 * trgt1 - dx0 * dy1 * trgt2; bound::add(out_ptr_NC, o010, val, s010); val = dy1 * dz0 * trgt0 + dx1 * dz0 * trgt1 - dx1 * dy1 * trgt2; bound::add(out_ptr_NC, o110, val, s110); val = -dy0 * dz1 * trgt0 - dx0 * dz1 * trgt1 + dx0 * dy0 * trgt2; bound::add(out_ptr_NC, o001, val, s001); val = dy0 * dz1 * trgt0 - dx1 * dz1 * trgt1 + dx1 * dy0 * trgt2; bound::add(out_ptr_NC, o101, val, s101); val = -dy1 * dz1 * trgt0 + dx0 * dz1 * trgt1 + dx0 * dy1 * trgt2; bound::add(out_ptr_NC, o011, val, s011); val = dy1 * dz1 * trgt0 + dx1 * dz1 * trgt1 + dx1 * dy1 * trgt2; bound::add(out_ptr_NC, o111, val, s111); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { // Offsets into 'push' volume o000 = ix0 * out_sX + iy0 * out_sY + iz0 * out_sZ; o100 = ix1 * out_sX + iy0 * out_sY + iz0 * out_sZ; o010 = ix0 * out_sX + iy1 * out_sY + iz0 * out_sZ; o001 = ix0 * out_sX + iy0 * out_sY + iz1 * out_sZ; o110 = ix1 * out_sX + iy1 * out_sY + iz0 * out_sZ; o011 = ix0 * out_sX + iy1 * out_sY + iz1 * out_sZ; o101 = ix1 * out_sX + iy0 * out_sY + iz1 * out_sZ; o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ; scalar_t* out_ptr_N = out_ptr + n * out_sN; bound::add(out_ptr_N, o000, w000, s000); bound::add(out_ptr_N, o100, w100, s100); bound::add(out_ptr_N, o010, w010, s010); bound::add(out_ptr_N, o110, w110, s110); bound::add(out_ptr_N, o001, w001, s001); bound::add(out_ptr_N, o101, w101, s101); bound::add(out_ptr_N, o011, w011, s011); bound::add(out_ptr_N, o111, w111, s111); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 2D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_bilinear( scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { // Get corner pixel values from (x, y, z) offset_t ix0 = static_cast<offset_t>(::floor(x)); offset_t iy0 = static_cast<offset_t>(::floor(y)); // Interpolation weights (inversely proportional to distance) scalar_t dx1 = x - ix0; scalar_t dy1 = y - iy0; scalar_t dx0 = 1. - dx1; scalar_t dy0 = 1. - dy1; scalar_t w00 = dx0 * dy0; scalar_t w10 = dx1 * dy0; scalar_t w01 = dx0 * dy1; scalar_t w11 = dx1 * dy1; ; // Sign (/!\ compute sign before warping indices) int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X); int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y); int8_t sx0 = bound::sign(bound0, ix0, src_X); int8_t sy0 = bound::sign(bound1, iy0, src_Y); int8_t s00 = sx0 * sy0; int8_t s10 = sx1 * sy0; int8_t s01 = sx0 * sy1; int8_t s11 = sx1 * sy1; // Warp indices offset_t ix1, iy1; ix1 = bound::index(bound0, ix0 + 1, src_X); iy1 = bound::index(bound1, iy0 + 1, src_Y); ix0 = bound::index(bound0, ix0, src_X); iy0 = bound::index(bound1, iy0, src_Y); // Offsets into source volume offset_t o00, o10, o01, o11; if (do_pull || do_grad || do_sgrad) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; scalar_t gx = static_cast<scalar_t>(0); scalar_t gy = static_cast<scalar_t>(0); scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; if (trgt_K == 1) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXY : static_cast<scalar_t>(1); // ^ trgt_ptr == 0 during the backward pass of count src = bound::get(src_ptr_NC, o00, s00); if (trgt_ptr) src *= trgt; gx -= dy0 * src; gy -= dx0 * src; src = bound::get(src_ptr_NC, o10, s10); if (trgt_ptr) src *= trgt; gx += dy0 * src; gy -= dx1 * src; src = bound::get(src_ptr_NC, o01, s01); if (trgt_ptr) src *= trgt; gx -= dy1 * src; gy += dx0 * src; src = bound::get(src_ptr_NC, o11, s11); if (trgt_ptr) src *= trgt; gx += dy1 * src; gy += dx1 * src; } } else { // backward w.r.t. sgrad for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK]; src = bound::get(src_ptr_NC, o00, s00); gx += trgt1 * src; gy += trgt0 * src; src = bound::get(src_ptr_NC, o10, s10); gx -= trgt1 * src; gy -= trgt0 * src; src = bound::get(src_ptr_NC, o01, s01); gx -= trgt1 * src; gy -= trgt0 * src; src = bound::get(src_ptr_NC, o11, s11); gx += trgt1 * src; gy += trgt0 * src; } } scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; (*grad_ptr_NXYZ) = gx; grad_ptr_NXYZ[grad_sC] = gy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) { *out_ptr_NCXY = bound::get(src_ptr_NC, o00, s00) * w00 + bound::get(src_ptr_NC, o10, s10) * w10 + bound::get(src_ptr_NC, o01, s01) * w01 + bound::get(src_ptr_NC, o11, s11) * w11; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_sgrad) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) { scalar_t src00 = bound::get(src_ptr_NC, o00, s00); scalar_t src10 = bound::get(src_ptr_NC, o10, s10); scalar_t src01 = bound::get(src_ptr_NC, o01, s01); scalar_t src11 = bound::get(src_ptr_NC, o11, s11); *out_ptr_NCXY = -dy0 * src00 + dy0 * src10 - dy1 * src01 + dy1 * src11; out_ptr_NCXY[out_sK] = -dx0 * src00 - dx1 * src10 + dx0 * src01 + dx1 * src11; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { // Offsets into 'push' volume o00 = ix0 * out_sX + iy0 * out_sY; o10 = ix1 * out_sX + iy0 * out_sY; o01 = ix0 * out_sX + iy1 * out_sY; o11 = ix1 * out_sX + iy1 * out_sY; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; if (trgt_K == 1) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXY; bound::add(out_ptr_NC, o00, w00 * trgt, s00); bound::add(out_ptr_NC, o10, w10 * trgt, s10); bound::add(out_ptr_NC, o01, w01 * trgt, s01); bound::add(out_ptr_NC, o11, w11 * trgt, s11); } } else { // Diff w.r.t. sgrad scalar_t val; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK]; val = -dy0 * trgt0 - dx0 * trgt1; bound::add(out_ptr_NC, o00, val, s00); val = dy0 * trgt0 - dx1 * trgt1; bound::add(out_ptr_NC, o10, val, s10); val = -dy1 * trgt0 + dx0 * trgt1; bound::add(out_ptr_NC, o01, val, s01); val = dy1 * trgt0 + dx1 * trgt1; bound::add(out_ptr_NC, o11, val, s11); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { // Offsets into 'push' volume o00 = ix0 * out_sX + iy0 * out_sY; o10 = ix1 * out_sX + iy0 * out_sY; o01 = ix0 * out_sX + iy1 * out_sY; o11 = ix1 * out_sX + iy1 * out_sY; scalar_t* out_ptr_N = out_ptr + n * out_sN; bound::add(out_ptr_N, o00, w00, s00); bound::add(out_ptr_N, o10, w10, s10); bound::add(out_ptr_N, o01, w01, s01); bound::add(out_ptr_N, o11, w11, s11); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // NEAREST NEIGHBOR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_nearest( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { offset_t ix = static_cast<offset_t>(std::round(x)); offset_t iy = static_cast<offset_t>(std::round(y)); offset_t iz = static_cast<offset_t>(std::round(z)); // Boundary condition (/!\ compute sign before warping indices) int8_t sx = bound::sign(bound0, ix, src_X); int8_t sy = bound::sign(bound1, iy, src_Y); int8_t sz = bound::sign(bound2, iz, src_Z); ix = bound::index(bound0, ix, src_X); iy = bound::index(bound1, iy, src_Y); iz = bound::index(bound2, iz, src_Z); // Sign int8_t s = sz * sy * sx; if (do_pull) { offset_t o = iz * src_sZ + iy * src_sY + ix * src_sX; scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXYZ = bound::get(src_ptr_NC, o, s); } else if (do_push && trgt_K == 1) { offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, *trgt_ptr_NCXYZ, s); } else if (do_count) { offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // NEAREST NEIGHBOR INTERPOLATION 2D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_nearest( scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { offset_t ix = static_cast<offset_t>(std::round(x)); offset_t iy = static_cast<offset_t>(std::round(y)); // Boundary condition (/!\ compute sign before warping indices) int8_t sx = bound::sign(bound0, ix, src_X); int8_t sy = bound::sign(bound1, iy, src_Y); ix = bound::index(bound0, ix, src_X); iy = bound::index(bound1, iy, src_Y); // Sign int8_t s = sy * sx; if (do_pull) { offset_t o = iy * src_sY + ix * src_sX; scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXY = bound::get(src_ptr_NC, o, s); } else if (do_push && trgt_K == 1) { offset_t o = iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, *trgt_ptr_NCXY, s); } else if (do_count) { offset_t o = iy * out_sY + ix * out_sX; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D + SLIDING BOUNDARY // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // TODO // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // CUDA KERNEL (MUST BE OUT OF CLASS) // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // CUDA Kernel template <typename scalar_t, typename offset_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void pushpull_kernel(PushPullImpl<scalar_t, offset_t> f) { f.loop(threadIdx.x, blockIdx.x, blockDim.x, gridDim.x); } } // namespace // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // FUNCTIONAL FORM WITH DISPATCH // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #define PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, SourceType0) \ template std::deque<Tensor> pushpull( \ const SourceType0&, \ const Tensor&, \ const Tensor&, \ BoundType0, \ InterpolationType0, \ bool, \ bool, \ bool, \ bool, \ bool, \ bool); \ template std::deque<Tensor> pushpull( \ const SourceType0&, const Tensor&, BoundType0, InterpolationType0, bool, bool, bool, bool, bool, bool) #define PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType0) \ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, IntArrayRef); \ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, Tensor) #define PUSHPULL_INSTANTIATE1(BoundType0) \ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType); \ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationVectorRef) #define PUSHPULL_INSTANTIATE \ PUSHPULL_INSTANTIATE1(BoundType); \ PUSHPULL_INSTANTIATE1(BoundVectorRef) // ~~~ CUDA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Two arguments (source, grid) // > `bound` and `interpolation` can be single arguments or vectors. template <typename BoundType, typename InterpolationType, typename SourceType> MONAI_HOST std::deque<Tensor> pushpull( const SourceType& source, const Tensor& grid, BoundType bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) { return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] { PushPullImpl<scalar_t, int64_t> f( grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); f.ioset(source, grid); hipLaunchKernelGGL(( pushpull_kernel), dim3(GET_BLOCKS(f.voxcount())), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), f); return f.output; }); } // Three arguments (source, grid, target) // > `bound` and `interpolation` can be single arguments or vectors. // > `source` can be a tensor or a vector of dimensions. template <typename BoundType, typename InterpolationType, typename SourceType> MONAI_HOST std::deque<Tensor> pushpull( const SourceType& source, const Tensor& grid, const Tensor& target, BoundType bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) { return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] { PushPullImpl<scalar_t, int64_t> f( grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); f.ioset(source, grid, target); hipLaunchKernelGGL(( pushpull_kernel), dim3(GET_BLOCKS(f.voxcount())), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), f); return f.output; }); } PUSHPULL_INSTANTIATE; } // namespace <device> } // namespace monai
c7010fe830be0795e687a15f240be7b3561ce442.cu
/* Copyright 2020 MONAI Consortium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // adapted from https://github.com/balbasty/nitorch // This file implements spline interpolation / sampling and its adjoint // operations. It corresponds loosely to torch's `GridSampler`. // It handles boundary conditions and interpolation orders defined in // `utils/resample_utils.h` and `utils/resample_utils.h`. // These parameters can be specified per dimension. // Isotropic 0-th and 1-st order interpolation have their own (faster) // implementations. Sliding boundary conditions are also implemented // separately. // TODO: // . [DONE] generic 3d // . [DONE] generic 2d // . sliding nearest 3d // . sliding nearest 2d // . sliding linear 3d // . sliding linear 2d // . sliding generic 3d // . sliding generic 2d // . [DONE] spatial gradient mode (without multiplication with output gradient) // . [DONE] second order gradients (backward pass for spatial gradients) // . performance tests // . input bound/inter are always vectors -> clean unused constructors #include <ATen/ATen.h> #include <tuple> #include "bounds_common.h" #include "interpolation_common.h" #include "utils/resample_utils.h" //#include <cstdio> // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GPU-specific parameters #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <c10/macros/Macros.h> using namespace at::cuda::detail; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // maximum number of channels // > not used in mode isotropic nearest/linear #ifndef MONAI_MAX_NUM_CHANNELS #define MONAI_MAX_NUM_CHANNELS 1024 #endif // This parameter allows for a little bit of tolerance when considering // a coordinate as "out-of-bound" (if !extrapolate) #define TINY 5e-2 using at::Tensor; using at::TensorOptions; using c10::IntArrayRef; namespace monai { MONAI_NAMESPACE_DEVICE { // cuda namespace { // anonymous namespace > everything inside has internal linkage // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GENERIC PUSHPULL CLASS // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // This class implements the bulk of the code. // /!\ No type and shape checking is performed here. template <typename scalar_t, typename offset_t> class PushPullImpl { public: // ~~~ CONSTRUCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST PushPullImpl( int dim, BoundVectorRef bound, InterpolationVectorRef interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate), bound1(bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), bound2( bound.size() > 2 ? bound[2] : bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation1( interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation2( interpolation.size() > 2 ? interpolation[2] : interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } MONAI_HOST PushPullImpl( int dim, BoundType bound, InterpolationVectorRef interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound), bound1(bound), bound2(bound), interpolation0(interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation1( interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), interpolation2( interpolation.size() > 2 ? interpolation[2] : interpolation.size() > 1 ? interpolation[1] : interpolation.size() > 0 ? interpolation[0] : InterpolationType::Linear), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } MONAI_HOST PushPullImpl( int dim, BoundVectorRef bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound.size() > 0 ? bound[0] : BoundType::Replicate), bound1(bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), bound2( bound.size() > 2 ? bound[2] : bound.size() > 1 ? bound[1] : bound.size() > 0 ? bound[0] : BoundType::Replicate), interpolation0(interpolation), interpolation1(interpolation), interpolation2(interpolation), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } MONAI_HOST PushPullImpl( int dim, BoundType bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) : dim(dim), bound0(bound), bound1(bound), bound2(bound), interpolation0(interpolation), interpolation1(interpolation), interpolation2(interpolation), extrapolate(extrapolate), do_pull(do_pull), do_push(do_push), do_count(do_count), do_grad(do_grad), do_sgrad(do_sgrad) { iso = interpolation0 == interpolation1 && interpolation0 == interpolation2; } // ~~~ PUBLIC VALUE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ std::deque<Tensor> output; // MONAI_HOST MONAI_DEVICE void printInfo() const { // printf("dim: %d\n", dim); // printf("do_pull: %d\n", do_pull); // printf("do_push: %d\n", do_push); // printf("do_count: %d\n", do_count); // printf("do_sgrad: %d\n", do_sgrad); // printf("do_grad: %d\n", do_grad); // printf("bound: [%d %d %d]\n", static_cast<int>(bound0), // static_cast<int>(bound1), static_cast<int>(bound2)); // printf("interpolation: [%d %d %d]\n", static_cast<int>(interpolation0), // static_cast<int>(interpolation1), static_cast<int>(interpolation2)); // printf("src: [%d %d %d]\n", src_Z, src_Y, src_X); // printf("trgt: [%d %d %d (%d)]\n", trgt_Z, trgt_Y, trgt_X, trgt_K); // printf("N: %d\n", N); // printf("C: %d\n", C); // printf("src -> %lu\n", reinterpret_cast<std::uintptr_t>(src_ptr)); // printf("trgt -> %lu\n", reinterpret_cast<std::uintptr_t>(trgt_ptr)); // printf("grid -> %lu\n", reinterpret_cast<std::uintptr_t>(grid_ptr)); // printf("out -> %lu\n", reinterpret_cast<std::uintptr_t>(out_ptr)); // printf("grad -> %lu\n", reinterpret_cast<std::uintptr_t>(grad_ptr)); // } // ~~~ FUNCTORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST void ioset // Pull (const Tensor& source, const Tensor& grid) { init_all(); init_source(source); init_grid(grid); init_output(); } MONAI_HOST void ioset(const Tensor& source, const Tensor& grid, const Tensor& target) { init_all(); init_source(source); init_grid(grid); init_target(target); init_output(); } MONAI_HOST void ioset // Push (IntArrayRef source_size, const Tensor& grid, const Tensor& target) { init_all(); init_source(source_size); init_grid(grid); init_target(target); init_output(); } MONAI_HOST void ioset // Count (IntArrayRef source_size, const Tensor& grid) { init_all(); init_source(source_size); init_grid(grid); init_output(); } MONAI_DEVICE void loop(int threadIdx, int blockIdx, int blockDim, int gridDim) const; MONAI_HOST MONAI_DEVICE int64_t voxcount() const { return N * trgt_X * trgt_Y * trgt_Z; } private: // ~~~ COMPONENTS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MONAI_HOST void init_all(); MONAI_HOST void init_source(const Tensor& source); MONAI_HOST void init_source(IntArrayRef source_size); MONAI_HOST void init_grid(const Tensor& grid); MONAI_HOST void init_target(const Tensor& target); MONAI_HOST void init_output(); MONAI_DEVICE void check2d(offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void check3d(offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate2d(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const; MONAI_DEVICE void interpolate2d_sliding(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate2d_sliding_nearest(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate2d_sliding_bilinear(scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate3d(scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate3d_nearest( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate3d_trilinear( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const; MONAI_DEVICE void interpolate3d_sliding( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate3d_sliding_nearest( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { /*TODO*/ } MONAI_DEVICE void interpolate3d_sliding_trilinear( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { /*TODO*/ } // ~~~ OPTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ int dim; // dimensionality (2 or 3) BoundType bound0; // boundary condition // x|W BoundType bound1; // boundary condition // y|H BoundType bound2; // boundary condition // z|D InterpolationType interpolation0; // interpolation order // x|W InterpolationType interpolation1; // interpolation order // y|H InterpolationType interpolation2; // interpolation order // z|D bool iso; // isotropic interpolation? bool extrapolate; // compute out-of-bound values bool do_pull; // sample a volume bool do_push; // splat a volume bool do_count; // splatting weights (= jacobian determinant) bool do_grad; // backprop: gradient of grid // pull bool do_sgrad; // sample spatial gradients // ~~~ NAVIGATORS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TensorOptions src_opt; TensorOptions grid_opt; TensorOptions trgt_opt; offset_t N; offset_t C; offset_t src_X; offset_t src_Y; offset_t src_Z; offset_t trgt_X; offset_t trgt_Y; offset_t trgt_Z; offset_t trgt_K; offset_t src_sN; offset_t src_sC; offset_t src_sX; offset_t src_sY; offset_t src_sZ; scalar_t* src_ptr; offset_t trgt_sN; offset_t trgt_sC; offset_t trgt_sX; offset_t trgt_sY; offset_t trgt_sZ; offset_t trgt_sK; scalar_t* trgt_ptr; offset_t grid_sN; offset_t grid_sC; offset_t grid_sX; offset_t grid_sY; offset_t grid_sZ; scalar_t* grid_ptr; offset_t out_sN; offset_t out_sC; offset_t out_sX; offset_t out_sY; offset_t out_sZ; offset_t out_sK; // gradient dimension scalar_t* out_ptr; offset_t grad_sN; offset_t grad_sC; offset_t grad_sX; offset_t grad_sY; offset_t grad_sZ; scalar_t* grad_ptr; }; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // INITIALISATION // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> void PushPullImpl<scalar_t, offset_t>::init_all() { src_opt = grid_opt = trgt_opt = TensorOptions(); N = C = static_cast<offset_t>(1); src_X = src_Y = src_Z = static_cast<offset_t>(1); trgt_X = trgt_Y = trgt_Z = trgt_K = static_cast<offset_t>(1); src_sN = src_sC = src_sX = src_sY = src_sZ = static_cast<offset_t>(0); grid_sN = grid_sC = grid_sX = grid_sY = grid_sZ = static_cast<offset_t>(0); grad_sN = grad_sC = grad_sX = grad_sY = grad_sZ = static_cast<offset_t>(0); trgt_sN = trgt_sC = trgt_sX = trgt_sY = trgt_sZ = trgt_sK = static_cast<offset_t>(0); out_sN = out_sC = out_sX = out_sY = out_sZ = out_sK = static_cast<offset_t>(0); src_ptr = trgt_ptr = grid_ptr = out_ptr = grad_ptr = static_cast<scalar_t*>(0); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_source(const Tensor& source) { N = source.size(0); C = source.size(1); src_X = source.size(2); src_Y = source.size(3); src_Z = dim == 2 ? static_cast<offset_t>(1) : source.size(4); src_sN = source.stride(0); src_sC = source.stride(1); src_sX = source.stride(2); src_sY = source.stride(3); src_sZ = dim == 2 ? static_cast<offset_t>(0) : source.stride(4); src_ptr = source.data_ptr<scalar_t>(); src_opt = source.options(); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_source(IntArrayRef source_size) { src_X = source_size[0]; src_Y = source_size[1]; src_Z = dim == 2 ? static_cast<offset_t>(1) : source_size[2]; } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_grid(const Tensor& grid) { N = grid.size(0); trgt_X = grid.size(1); trgt_Y = grid.size(2); trgt_Z = dim == 2 ? static_cast<offset_t>(1) : grid.size(3); grid_sN = grid.stride(0); grid_sX = grid.stride(1); grid_sY = grid.stride(2); grid_sZ = dim == 2 ? static_cast<offset_t>(0) : grid.stride(3); grid_sC = grid.stride(dim == 2 ? 3 : 4); grid_ptr = grid.data_ptr<scalar_t>(); grid_opt = grid.options(); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_target(const Tensor& target) { N = target.size(0); C = target.size(1); trgt_X = target.size(2); trgt_Y = target.size(3); trgt_Z = dim == 2 ? static_cast<offset_t>(1) : target.size(4); trgt_K = target.dim() == dim + 3 ? target.size(dim == 2 ? 4 : 5) : static_cast<offset_t>(1); trgt_sN = target.stride(0); trgt_sC = target.stride(1); trgt_sX = target.stride(2); trgt_sY = target.stride(3); trgt_sZ = dim == 2 ? static_cast<offset_t>(0) : target.stride(4); trgt_sK = target.dim() == dim + 3 ? target.stride(dim == 2 ? 4 : 5) : static_cast<offset_t>(0); trgt_ptr = target.data_ptr<scalar_t>(); trgt_opt = target.options(); } template <typename scalar_t, typename offset_t> MONAI_HOST void PushPullImpl<scalar_t, offset_t>::init_output() { output.clear(); if (do_pull) { if (dim == 2) output.push_back(at::empty({N, C, trgt_X, trgt_Y}, src_opt)); else output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z}, src_opt)); auto pull = output.back(); out_sN = pull.stride(0); out_sC = pull.stride(1); out_sX = pull.stride(2); out_sY = pull.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : pull.stride(4); out_sK = static_cast<offset_t>(0); out_ptr = pull.template data_ptr<scalar_t>(); } else if (do_sgrad) { if (dim == 2) output.push_back(at::empty({N, C, trgt_X, trgt_Y, 2}, src_opt)); else output.push_back(at::empty({N, C, trgt_X, trgt_Y, trgt_Z, 3}, src_opt)); auto sgrad = output.back(); out_sN = sgrad.stride(0); out_sC = sgrad.stride(1); out_sX = sgrad.stride(2); out_sY = sgrad.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : sgrad.stride(4); out_sK = sgrad.stride(dim == 2 ? 4 : 5); out_ptr = sgrad.template data_ptr<scalar_t>(); if (iso && interpolation0 == InterpolationType::Nearest) sgrad.zero_(); } else if (do_push) { if (dim == 2) output.push_back(at::zeros({N, C, src_X, src_Y}, trgt_opt)); else output.push_back(at::zeros({N, C, src_X, src_Y, src_Z}, trgt_opt)); auto push = output.back(); out_sN = push.stride(0); out_sC = push.stride(1); out_sX = push.stride(2); out_sY = push.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : push.stride(4); out_sK = static_cast<offset_t>(0); out_ptr = push.template data_ptr<scalar_t>(); } else if (do_count) { if (dim == 2) output.push_back(at::zeros({N, 1, src_X, src_Y}, grid_opt)); else output.push_back(at::zeros({N, 1, src_X, src_Y, src_Z}, grid_opt)); auto count = output.back(); out_sN = count.stride(0); out_sC = count.stride(1); out_sX = count.stride(2); out_sY = count.stride(3); out_sZ = dim == 2 ? static_cast<offset_t>(0) : count.stride(4); out_sK = static_cast<offset_t>(0); out_ptr = count.template data_ptr<scalar_t>(); } if (do_grad) { if (dim == 2) output.push_back(at::zeros({N, src_X, src_Y, 2}, grid_opt)); else output.push_back(at::zeros({N, src_X, src_Y, src_Z, 3}, grid_opt)); auto grad = output.back(); grad_sN = grad.stride(0); grad_sX = grad.stride(1); grad_sY = grad.stride(2); grad_sZ = dim == 2 ? static_cast<offset_t>(0) : grad.stride(3); grad_sC = grad.stride(dim == 2 ? 3 : 4); grad_ptr = grad.template data_ptr<scalar_t>(); if (iso && interpolation0 == InterpolationType::Nearest) grad.zero_(); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LOOP // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::loop(int threadIdx, int blockIdx, int blockDim, int gridDim) const { int64_t index = blockIdx * blockDim + threadIdx; int64_t nthreads = voxcount(); offset_t trgt_XYZ = trgt_Z * trgt_Y * trgt_X; offset_t trgt_YZ = trgt_Z * trgt_Y; offset_t n, w, h, d; for (offset_t i = index; index < nthreads; index += blockDim * gridDim, i = index) { // Convert index: linear to sub n = (i / trgt_XYZ); w = (i / trgt_YZ) % trgt_X; h = (i / trgt_Z) % trgt_Y; d = i % trgt_Z; if (dim == 2) check2d(w, h, n); else check3d(w, h, d, n); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // CHECK OUT-OF-BOUND // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Here, we: // 1) read the [x,y,z] source coordinate for the current target voxel // 3) check if the source coordinate is in bounds template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check2d(offset_t w, offset_t h, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid scalar_t* grid_ptr_NXY = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY; scalar_t x = *grid_ptr_NXY; scalar_t y = grid_ptr_NXY[grid_sC]; // Check if out-of-bound if (!(extrapolate || (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY))))) { if (do_pull || do_sgrad) { scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sZ + h * out_sY; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) { *out_ptr_NCXY = static_cast<scalar_t>(0); if (do_sgrad) out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0); } } if (do_grad) { scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; (*grad_ptr_NXY) = static_cast<scalar_t>(0); grad_ptr_NXY[grad_sC] = static_cast<scalar_t>(0); } return; } // Next step if (bound0 == BoundType::Sliding) { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate2d_sliding_nearest(x, y, w, h, n); case 1: return interpolate2d_sliding_bilinear(x, y, w, h, n); } return interpolate2d_sliding(x, y, w, h, n); } else { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate2d_nearest(x, y, w, h, n); case 1: return interpolate2d_bilinear(x, y, w, h, n); } return interpolate2d(x, y, w, h, n); } } template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::check3d(offset_t w, offset_t h, offset_t d, offset_t n) const { // get the corresponding input x, y, z co-ordinates from grid scalar_t* grid_ptr_NXYZ = grid_ptr + n * grid_sN + w * grid_sX + h * grid_sY + d * grid_sZ; scalar_t x = *grid_ptr_NXYZ; scalar_t y = grid_ptr_NXYZ[grid_sC]; scalar_t z = grid_ptr_NXYZ[grid_sC * 2]; // Check if out-of-bound if (!(extrapolate || (inbounds(x, src_X, static_cast<scalar_t>(TINY)) && inbounds(y, src_Y, static_cast<scalar_t>(TINY)) && inbounds(z, src_Z, static_cast<scalar_t>(TINY))))) { if (do_pull || do_sgrad) { scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { *out_ptr_NCXYZ = static_cast<scalar_t>(0); if (do_sgrad) { out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0); out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0); } } } if (do_grad) { scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; (*grad_ptr_NXYZ) = static_cast<scalar_t>(0); grad_ptr_NXYZ[grad_sC] = static_cast<scalar_t>(0); grad_ptr_NXYZ[grad_sC * 2] = static_cast<scalar_t>(0); } return; } // Next step if (bound0 == BoundType::Sliding) { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate3d_sliding_nearest(x, y, z, w, h, d, n); case 1: return interpolate3d_sliding_trilinear(x, y, z, w, h, d, n); } return interpolate3d_sliding(x, y, z, w, h, d, n); } else { if (iso) switch (static_cast<int>(interpolation0)) { case 0: return interpolate3d_nearest(x, y, z, w, h, d, n); case 1: return interpolate3d_trilinear(x, y, z, w, h, d, n); } return interpolate3d(x, y, z, w, h, d, n); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GENERIC INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { // Get corner pixel values from (x, y, z) offset_t bx0, bx1, by0, by1, bz0, bz1; interpolation::bounds(interpolation0, x, bx0, bx1); interpolation::bounds(interpolation1, y, by0, by1); interpolation::bounds(interpolation2, z, bz0, bz1); offset_t dbx = bx1 - bx0; offset_t dby = by1 - by0; offset_t dbz = bz1 - bz0; // Pre-compute offsets and target value scalar_t* src_ptr_NC0 = src_ptr + n * src_sN; scalar_t* out_ptr_NC0 = out_ptr + n * out_sN; scalar_t* out_ptr_NCXYZ0 = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t target[3 * MONAI_MAX_NUM_CHANNELS]; if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC) { target[c] = *trgt_ptr_NCXYZ; if (trgt_K > 1) { target[c + C] = trgt_ptr_NCXYZ[trgt_sK]; target[c + C * 2] = trgt_ptr_NCXYZ[trgt_sK * 2]; } } // Initialize output scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0; if (do_pull || do_sgrad) { for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC) { *out_ptr_NCXYZ = static_cast<scalar_t>(0); if (do_sgrad) { out_ptr_NCXYZ[out_sK] = static_cast<scalar_t>(0); out_ptr_NCXYZ[out_sK * 2] = static_cast<scalar_t>(0); } } } // Pre-compute indices/weights/grad scalar_t wx[8], wy[8], wz[8]; // B-spline weights scalar_t gx[8], gy[8], gz[8]; // B-spline derivatives scalar_t hx[8], hy[8], hz[8]; // B-spline 2nd derivatives offset_t ix[8], iy[8], iz[8]; // Warped indices uint8_t sx[8], sy[8], sz[8]; // Warped indices { scalar_t *owz = static_cast<scalar_t*>(wz), *ogz = static_cast<scalar_t*>(gz), *ohz = static_cast<scalar_t*>(hz); offset_t* oiz = static_cast<offset_t*>(iz); uint8_t* osz = static_cast<uint8_t*>(sz); for (offset_t bz = bz0; bz <= bz1; ++bz) { scalar_t dz = z - bz; *(owz++) = interpolation::fastweight(interpolation2, dz); if (do_grad || do_sgrad) *(ogz++) = interpolation::fastgrad(interpolation2, dz); if (do_grad && trgt_sK > 1) *(ohz++) = interpolation::fasthess(interpolation2, dz); *(osz++) = bound::sign(bound2, bz, src_Z); *(oiz++) = bound::index(bound2, bz, src_Z); } } { scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy); offset_t* oiy = static_cast<offset_t*>(iy); uint8_t* osy = static_cast<uint8_t*>(sy); for (offset_t by = by0; by <= by1; ++by) { scalar_t dy = y - by; *(owy++) = interpolation::fastweight(interpolation1, dy); if (do_grad || do_sgrad) *(ogy++) = interpolation::fastgrad(interpolation1, dy); if (do_grad && trgt_sK > 1) *(ohy++) = interpolation::fasthess(interpolation1, dy); *(osy++) = bound::sign(bound1, by, src_Y); *(oiy++) = bound::index(bound1, by, src_Y); } } { scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx); offset_t* oix = static_cast<offset_t*>(ix); uint8_t* osx = static_cast<uint8_t*>(sx); for (offset_t bx = bx0; bx <= bx1; ++bx) { scalar_t dx = x - bx; *(owx++) = interpolation::fastweight(interpolation0, dx); if (do_grad || do_sgrad) *(ogx++) = interpolation::fastgrad(interpolation0, dx); if (do_grad && trgt_sK > 1) *(ohx++) = interpolation::fasthess(interpolation0, dx); *(osx++) = bound::sign(bound0, bx, src_X); *(oix++) = bound::index(bound0, bx, src_X); } } // Convolve coefficients with basis functions scalar_t ogx, ogy, ogz; ogx = ogy = ogz = static_cast<scalar_t>(0); for (offset_t k = 0; k <= dbz; ++k) { offset_t ooz = iz[k] * out_sZ; offset_t osz = iz[k] * src_sZ; uint8_t szz = sz[k]; scalar_t wzz = wz[k]; scalar_t gzz = gz[k]; scalar_t hzz = hz[k]; for (offset_t j = 0; j <= dby; ++j) { offset_t ooyz = ooz + iy[j] * out_sY; offset_t osyz = osz + iy[j] * src_sY; uint8_t syz = szz * sy[j]; scalar_t wyy = wy[j]; scalar_t gyy = gy[j]; scalar_t hyy = hy[j]; for (offset_t i = 0; i <= dbx; ++i) { offset_t ooxyz = ooyz + ix[i] * out_sX; offset_t osxyz = osyz + ix[i] * src_sX; uint8_t sxyz = syz * sx[i]; scalar_t wxx = wx[i]; scalar_t gxx = gx[i]; scalar_t hxx = hx[i]; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXYZ += bound::get(src_ptr_NC, osxyz, sxyz) * (wxx * wyy * wzz); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_sgrad) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXYZ = out_ptr_NCXYZ0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz); *out_ptr_NCXYZ += src * (gxx * wyy * wzz); out_ptr_NCXYZ[out_sK] += src * (wxx * gyy * wzz); out_ptr_NCXYZ[2 * out_sK] += src * (wxx * wyy * gzz); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { if (trgt_K == 1) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, ooxyz, (wxx * wyy * wzz) * target[c], sxyz); } else { // Diff w.r.t. sgrad scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) { scalar_t val = (gxx * wyy * wzz) * target[c] + (wxx * gyy * wzz) * target[c + C] + (wxx * wyy * gzz) * target[c + C * 2]; bound::add(out_ptr_NC, ooxyz, val, sxyz); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { bound::add(out_ptr_NC0, ooxyz, (wxx * wyy * wzz), sxyz); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { if (trgt_K == 1) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz); dot += (trgt_ptr ? src * target[c] : src); // trgt_ptr == 0 in the backward pass of 'count' } ogx += (gxx * wyy * wzz) * dot; ogy += (wxx * gyy * wzz) * dot; ogz += (wxx * wyy * gzz) * dot; } else { // Diff w.r.t. sgrad scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot0, dot1, dot2; dot0 = dot1 = dot2 = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxyz, sxyz); dot0 += src * target[c]; dot1 += src * target[c + C]; dot2 += src * target[c + C * 2]; } ogx += (hxx * wyy * wzz) * dot0 + (gxx * gyy * wzz) * dot1 + (gxx * wyy * gzz) * dot2; ogy += (gxx * gyy * wzz) * dot0 + (wxx * hyy * wzz) * dot1 + (wxx * gyy * gzz) * dot2; ogz += (gxx * wyy * gzz) * dot0 + (wxx * gyy * gzz) * dot1 + (wxx * wyy * hzz) * dot2; } } } // x } // y } // z // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; (*grad_ptr_NXYZ) = ogx; grad_ptr_NXYZ[grad_sC] = ogy; grad_ptr_NXYZ[grad_sC * 2] = ogz; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // GENERIC INTERPOLATION 2D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d( scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { // Get corner pixel values from (x, y) offset_t bx0, bx1, by0, by1; interpolation::bounds(interpolation0, x, bx0, bx1); interpolation::bounds(interpolation1, y, by0, by1); offset_t dbx = bx1 - bx0; offset_t dby = by1 - by0; // Pre-compute offsets and target value scalar_t* src_ptr_NC0 = src_ptr + n * src_sN; scalar_t* out_ptr_NC0 = out_ptr + n * out_sN; scalar_t* out_ptr_NCXY0 = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t target[2 * MONAI_MAX_NUM_CHANNELS]; if (trgt_ptr && (do_push || do_grad)) for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC) { target[c] = *trgt_ptr_NCXY; if (trgt_K > 1) { target[c + C] = trgt_ptr_NCXY[trgt_sK]; } } // Initialize output scalar_t* out_ptr_NCXY = out_ptr_NCXY0; if (do_pull || do_sgrad) { for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC) { *out_ptr_NCXY = static_cast<scalar_t>(0); if (do_sgrad) { out_ptr_NCXY[out_sK] = static_cast<scalar_t>(0); } } } // Pre-compute indices/weights/grad scalar_t wx[8], wy[8]; // B-spline weights scalar_t gx[8], gy[8]; // B-spline derivatives scalar_t hx[8], hy[8]; // B-spline 2nd derivatives offset_t ix[8], iy[8]; // Warped indices uint8_t sx[8], sy[8]; // Warped indices { scalar_t *owy = static_cast<scalar_t*>(wy), *ogy = static_cast<scalar_t*>(gy), *ohy = static_cast<scalar_t*>(hy); offset_t* oiy = static_cast<offset_t*>(iy); uint8_t* osy = static_cast<uint8_t*>(sy); for (offset_t by = by0; by <= by1; ++by) { scalar_t dy = y - by; *(owy++) = interpolation::fastweight(interpolation1, dy); if (do_grad || do_sgrad) *(ogy++) = interpolation::fastgrad(interpolation1, dy); if (do_grad && trgt_sK > 1) *(ohy++) = interpolation::fasthess(interpolation1, dy); *(osy++) = bound::sign(bound1, by, src_Y); *(oiy++) = bound::index(bound1, by, src_Y); } } { scalar_t *owx = static_cast<scalar_t*>(wx), *ogx = static_cast<scalar_t*>(gx), *ohx = static_cast<scalar_t*>(hx); offset_t* oix = static_cast<offset_t*>(ix); uint8_t* osx = static_cast<uint8_t*>(sx); for (offset_t bx = bx0; bx <= bx1; ++bx) { scalar_t dx = x - bx; *(owx++) = interpolation::fastweight(interpolation0, dx); if (do_grad || do_sgrad) *(ogx++) = interpolation::fastgrad(interpolation0, dx); if (do_grad && trgt_sK > 1) *(ohx++) = interpolation::fasthess(interpolation0, dx); *(osx++) = bound::sign(bound0, bx, src_X); *(oix++) = bound::index(bound0, bx, src_X); } } // Convolve coefficients with basis functions scalar_t ogx, ogy; ogx = ogy = static_cast<scalar_t>(0); for (offset_t j = 0; j <= dby; ++j) { offset_t ooy = iy[j] * out_sY; offset_t osy = iy[j] * src_sY; uint8_t syy = sy[j]; scalar_t wyy = wy[j]; scalar_t gyy = gy[j]; scalar_t hyy = hy[j]; for (offset_t i = 0; i <= dbx; ++i) { offset_t ooxy = ooy + ix[i] * out_sX; offset_t osxy = osy + ix[i] * src_sX; uint8_t sxy = syy * sx[i]; scalar_t wxx = wx[i]; scalar_t gxx = gx[i]; scalar_t hxx = hx[i]; // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXY = out_ptr_NCXY0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXY += bound::get(src_ptr_NC, osxy, sxy) * (wxx * wyy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_sgrad) { scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t* out_ptr_NCXY = out_ptr_NCXY0; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxy, sxy); *out_ptr_NCXY += src * (gxx * wyy); out_ptr_NCXY[out_sK] += src * (wxx * gyy); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { if (trgt_K == 1) { // Diff w.r.t. push/pull scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, ooxy, (wxx * wyy) * target[c], sxy); } else { // Diff w.r.t. sgrad scalar_t* out_ptr_NC = out_ptr_NC0; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) { scalar_t val = (gxx * wyy) * target[c] + (wxx * gyy) * target[c + C]; bound::add(out_ptr_NC, ooxy, val, sxy); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Count ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { bound::add(out_ptr_NC0, ooxy, (wxx * wyy), sxy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { if (trgt_K == 1) { // Diff w.r.t. pull/push scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxy, sxy); dot += (trgt_ptr ? src * target[c] : src); // trgt_ptr == 0 in the backward pass of 'count' } ogx += (gxx * wyy) * dot; ogy += (wxx * gyy) * dot; } else { // Diff w.r.t. sgrad scalar_t* src_ptr_NC = src_ptr_NC0; scalar_t dot0, dot1; dot0 = dot1 = static_cast<scalar_t>(0); for (offset_t c = 0; c < C; ++c, src_ptr_NC += src_sC) { scalar_t src = bound::get(src_ptr_NC, osxy, sxy); dot0 += src * target[c]; dot1 += src * target[c + C]; } ogx += (hxx * wyy) * dot0 + (gxx * gyy) * dot1; ogy += (gxx * gyy) * dot0 + (wxx * hyy) * dot1; } } } // x } // y // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Grad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { scalar_t* grad_ptr_NXY = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; (*grad_ptr_NXY) = ogx; grad_ptr_NXY[grad_sC] = ogy; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_trilinear( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { // Get corner pixel values from (x, y, z) offset_t ix0 = static_cast<offset_t>(std::floor(x)); offset_t iy0 = static_cast<offset_t>(std::floor(y)); offset_t iz0 = static_cast<offset_t>(std::floor(z)); // Interpolation weights (inversely proportional to distance) scalar_t dx1 = x - ix0; scalar_t dy1 = y - iy0; scalar_t dz1 = z - iz0; scalar_t dx0 = 1. - dx1; scalar_t dy0 = 1. - dy1; scalar_t dz0 = 1. - dz1; scalar_t w000 = dx0 * dy0 * dz0; scalar_t w100 = dx1 * dy0 * dz0; scalar_t w010 = dx0 * dy1 * dz0; scalar_t w001 = dx0 * dy0 * dz1; scalar_t w110 = dx1 * dy1 * dz0; scalar_t w011 = dx0 * dy1 * dz1; scalar_t w101 = dx1 * dy0 * dz1; scalar_t w111 = dx1 * dy1 * dz1; // Sign (/!\ compute sign before warping indices) int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X); int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y); int8_t sz1 = bound::sign(bound2, iz0 + 1, src_Z); int8_t sx0 = bound::sign(bound0, ix0, src_X); int8_t sy0 = bound::sign(bound1, iy0, src_Y); int8_t sz0 = bound::sign(bound2, iz0, src_Z); int8_t s000 = sx0 * sy0 * sz0; int8_t s100 = sx1 * sy0 * sz0; int8_t s010 = sx0 * sy1 * sz0; int8_t s001 = sx0 * sy0 * sz1; int8_t s110 = sx1 * sy1 * sz0; int8_t s011 = sx0 * sy1 * sz1; int8_t s101 = sx1 * sy0 * sz1; int8_t s111 = sx1 * sy1 * sz1; // Warp indices offset_t ix1, iy1, iz1; ix1 = bound::index(bound0, ix0 + 1, src_X); iy1 = bound::index(bound1, iy0 + 1, src_Y); iz1 = bound::index(bound2, iz0 + 1, src_Z); ix0 = bound::index(bound0, ix0, src_X); iy0 = bound::index(bound1, iy0, src_Y); iz0 = bound::index(bound2, iz0, src_Z); // Offsets into source volume offset_t o000, o100, o010, o001, o110, o011, o101, o111; if (do_pull || do_grad || do_sgrad) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; scalar_t gx = static_cast<scalar_t>(0); scalar_t gy = static_cast<scalar_t>(0); scalar_t gz = static_cast<scalar_t>(0); scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; if (trgt_K == 1) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXYZ : static_cast<scalar_t>(1); // ^ trgt_ptr == 0 during the backward pass of count src = bound::get(src_ptr_NC, o000, s000); if (trgt_ptr) src *= trgt; gx -= dy0 * dz0 * src; gy -= dx0 * dz0 * src; gz -= dx0 * dy0 * src; src = bound::get(src_ptr_NC, o100, s100); if (trgt_ptr) src *= trgt; gx += dy0 * dz0 * src; gy -= dx1 * dz0 * src; gz -= dx1 * dy0 * src; src = bound::get(src_ptr_NC, o010, s010); if (trgt_ptr) src *= trgt; gx -= dy1 * dz0 * src; gy += dx0 * dz0 * src; gz -= dx0 * dy1 * src; src = bound::get(src_ptr_NC, o110, s110); if (trgt_ptr) src *= trgt; gx += dy1 * dz0 * src; gy += dx1 * dz0 * src; gz -= dx1 * dy1 * src; src = bound::get(src_ptr_NC, o001, s001); if (trgt_ptr) src *= trgt; gx -= dy0 * dz1 * src; gy -= dx0 * dz1 * src; gz += dx0 * dy0 * src; src = bound::get(src_ptr_NC, o101, s101); if (trgt_ptr) src *= trgt; gx += dy0 * dz1 * src; gy -= dx1 * dz1 * src; gz += dx1 * dy0 * src; src = bound::get(src_ptr_NC, o011, s011); if (trgt_ptr) src *= trgt; gx -= dy1 * dz1 * src; gy += dx0 * dz1 * src; gz += dx0 * dy1 * src; src = bound::get(src_ptr_NC, o111, s111); if (trgt_ptr) src *= trgt; gx += dy1 * dz1 * src; gy += dx1 * dz1 * src; gz += dx1 * dy1 * src; } } else { // backward w.r.t. sgrad for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2]; src = bound::get(src_ptr_NC, o000, s000); gx += (dz0 * trgt1 + dy0 * trgt2) * src; gy += (dz0 * trgt0 + dx0 * trgt2) * src; gz += (dy0 * trgt0 + dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o100, s100); gx += (-dz0 * trgt1 - dy0 * trgt2) * src; gy += (-dz0 * trgt0 + dx1 * trgt2) * src; gz += (-dy0 * trgt0 + dx1 * trgt1) * src; src = bound::get(src_ptr_NC, o010, s010); gx += (-dz0 * trgt1 + dy1 * trgt2) * src; gy += (-dz0 * trgt0 - dx0 * trgt2) * src; gz += (dy1 * trgt0 - dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o110, s110); gx += (dz0 * trgt1 - dy1 * trgt2) * src; gy += (dz0 * trgt0 - dx1 * trgt2) * src; gz += (-dy1 * trgt0 - dx1 * trgt1) * src; src = bound::get(src_ptr_NC, o001, s001); gx += (dz1 * trgt1 - dy0 * trgt2) * src; gy += (dz1 * trgt0 - dx0 * trgt2) * src; gz += (-dy0 * trgt0 - dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o101, s101); gx += (-dz1 * trgt1 + dy0 * trgt2) * src; gy += (-dz1 * trgt0 - dx1 * trgt2) * src; gz += (dy0 * trgt0 - dx1 * trgt1) * src; src = bound::get(src_ptr_NC, o011, s011); gx += (-dz1 * trgt1 - dy1 * trgt2) * src; gy += (-dz1 * trgt0 + dx0 * trgt2) * src; gz += (-dy1 * trgt0 + dx0 * trgt1) * src; src = bound::get(src_ptr_NC, o111, s111); gx += (dz1 * trgt1 + dy1 * trgt2) * src; gy += (dz1 * trgt0 + dx1 * trgt2) * src; gz += (dy1 * trgt0 + dx1 * trgt1) * src; } } scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY + d * grad_sZ; (*grad_ptr_NXYZ) = gx; grad_ptr_NXYZ[grad_sC] = gy; grad_ptr_NXYZ[grad_sC * 2] = gz; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) { *out_ptr_NCXYZ = bound::get(src_ptr_NC, o000, s000) * w000 + bound::get(src_ptr_NC, o100, s100) * w100 + bound::get(src_ptr_NC, o010, s010) * w010 + bound::get(src_ptr_NC, o110, s110) * w110 + bound::get(src_ptr_NC, o001, s001) * w001 + bound::get(src_ptr_NC, o101, s101) * w101 + bound::get(src_ptr_NC, o011, s011) * w011 + bound::get(src_ptr_NC, o111, s111) * w111; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~ else if (do_sgrad) { o000 = ix0 * src_sX + iy0 * src_sY + iz0 * src_sZ; o100 = ix1 * src_sX + iy0 * src_sY + iz0 * src_sZ; o010 = ix0 * src_sX + iy1 * src_sY + iz0 * src_sZ; o001 = ix0 * src_sX + iy0 * src_sY + iz1 * src_sZ; o110 = ix1 * src_sX + iy1 * src_sY + iz0 * src_sZ; o011 = ix0 * src_sX + iy1 * src_sY + iz1 * src_sZ; o101 = ix1 * src_sX + iy0 * src_sY + iz1 * src_sZ; o111 = ix1 * src_sX + iy1 * src_sY + iz1 * src_sZ; scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) { scalar_t src000 = bound::get(src_ptr_NC, o000, s000); scalar_t src100 = bound::get(src_ptr_NC, o100, s100); scalar_t src010 = bound::get(src_ptr_NC, o010, s010); scalar_t src110 = bound::get(src_ptr_NC, o110, s110); scalar_t src001 = bound::get(src_ptr_NC, o001, s001); scalar_t src101 = bound::get(src_ptr_NC, o101, s101); scalar_t src011 = bound::get(src_ptr_NC, o011, s011); scalar_t src111 = bound::get(src_ptr_NC, o111, s111); *out_ptr_NCXYZ = -dy0 * dz0 * src000 + dy0 * dz0 * src100 - dy1 * dz0 * src010 + dy1 * dz0 * src110 - dy0 * dz1 * src001 + dy0 * dz1 * src101 - dy1 * dz1 * src011 + dy1 * dz1 * src111; out_ptr_NCXYZ[out_sK] = -dx0 * dz0 * src000 - dx1 * dz0 * src100 + dx0 * dz0 * src010 + dx1 * dz0 * src110 - dx0 * dz1 * src001 - dx1 * dz1 * src101 + dx0 * dz1 * src011 + dx1 * dz1 * src111; out_ptr_NCXYZ[out_sK * 2] = -dx0 * dy0 * src000 - dx1 * dy0 * src100 - dx0 * dy1 * src010 - dx1 * dy1 * src110 + dx0 * dy0 * src001 + dx1 * dy0 * src101 + dx0 * dy1 * src011 + dx1 * dy1 * src111; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { // Offsets into 'push' volume o000 = ix0 * out_sX + iy0 * out_sY + iz0 * out_sZ; o100 = ix1 * out_sX + iy0 * out_sY + iz0 * out_sZ; o010 = ix0 * out_sX + iy1 * out_sY + iz0 * out_sZ; o001 = ix0 * out_sX + iy0 * out_sY + iz1 * out_sZ; o110 = ix1 * out_sX + iy1 * out_sY + iz0 * out_sZ; o011 = ix0 * out_sX + iy1 * out_sY + iz1 * out_sZ; o101 = ix1 * out_sX + iy0 * out_sY + iz1 * out_sZ; o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; if (trgt_K == 1) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXYZ; bound::add(out_ptr_NC, o000, w000 * trgt, s000); bound::add(out_ptr_NC, o100, w100 * trgt, s100); bound::add(out_ptr_NC, o010, w010 * trgt, s010); bound::add(out_ptr_NC, o110, w110 * trgt, s110); bound::add(out_ptr_NC, o001, w001 * trgt, s001); bound::add(out_ptr_NC, o101, w101 * trgt, s101); bound::add(out_ptr_NC, o011, w011 * trgt, s011); bound::add(out_ptr_NC, o111, w111 * trgt, s111); } } else { // Diff w.r.t. sgrad scalar_t val; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt0 = *trgt_ptr_NCXYZ, trgt1 = trgt_ptr_NCXYZ[trgt_sK], trgt2 = trgt_ptr_NCXYZ[trgt_sK * 2]; val = -dy0 * dz0 * trgt0 - dx0 * dz0 * trgt1 - dx0 * dy0 * trgt2; bound::add(out_ptr_NC, o000, val, s000); val = dy0 * dz0 * trgt0 - dx1 * dz0 * trgt1 - dx1 * dy0 * trgt2; bound::add(out_ptr_NC, o100, val, s100); val = -dy1 * dz0 * trgt0 + dx0 * dz0 * trgt1 - dx0 * dy1 * trgt2; bound::add(out_ptr_NC, o010, val, s010); val = dy1 * dz0 * trgt0 + dx1 * dz0 * trgt1 - dx1 * dy1 * trgt2; bound::add(out_ptr_NC, o110, val, s110); val = -dy0 * dz1 * trgt0 - dx0 * dz1 * trgt1 + dx0 * dy0 * trgt2; bound::add(out_ptr_NC, o001, val, s001); val = dy0 * dz1 * trgt0 - dx1 * dz1 * trgt1 + dx1 * dy0 * trgt2; bound::add(out_ptr_NC, o101, val, s101); val = -dy1 * dz1 * trgt0 + dx0 * dz1 * trgt1 + dx0 * dy1 * trgt2; bound::add(out_ptr_NC, o011, val, s011); val = dy1 * dz1 * trgt0 + dx1 * dz1 * trgt1 + dx1 * dy1 * trgt2; bound::add(out_ptr_NC, o111, val, s111); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { // Offsets into 'push' volume o000 = ix0 * out_sX + iy0 * out_sY + iz0 * out_sZ; o100 = ix1 * out_sX + iy0 * out_sY + iz0 * out_sZ; o010 = ix0 * out_sX + iy1 * out_sY + iz0 * out_sZ; o001 = ix0 * out_sX + iy0 * out_sY + iz1 * out_sZ; o110 = ix1 * out_sX + iy1 * out_sY + iz0 * out_sZ; o011 = ix0 * out_sX + iy1 * out_sY + iz1 * out_sZ; o101 = ix1 * out_sX + iy0 * out_sY + iz1 * out_sZ; o111 = ix1 * out_sX + iy1 * out_sY + iz1 * out_sZ; scalar_t* out_ptr_N = out_ptr + n * out_sN; bound::add(out_ptr_N, o000, w000, s000); bound::add(out_ptr_N, o100, w100, s100); bound::add(out_ptr_N, o010, w010, s010); bound::add(out_ptr_N, o110, w110, s110); bound::add(out_ptr_N, o001, w001, s001); bound::add(out_ptr_N, o101, w101, s101); bound::add(out_ptr_N, o011, w011, s011); bound::add(out_ptr_N, o111, w111, s111); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 2D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_bilinear( scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { // Get corner pixel values from (x, y, z) offset_t ix0 = static_cast<offset_t>(std::floor(x)); offset_t iy0 = static_cast<offset_t>(std::floor(y)); // Interpolation weights (inversely proportional to distance) scalar_t dx1 = x - ix0; scalar_t dy1 = y - iy0; scalar_t dx0 = 1. - dx1; scalar_t dy0 = 1. - dy1; scalar_t w00 = dx0 * dy0; scalar_t w10 = dx1 * dy0; scalar_t w01 = dx0 * dy1; scalar_t w11 = dx1 * dy1; ; // Sign (/!\ compute sign before warping indices) int8_t sx1 = bound::sign(bound0, ix0 + 1, src_X); int8_t sy1 = bound::sign(bound1, iy0 + 1, src_Y); int8_t sx0 = bound::sign(bound0, ix0, src_X); int8_t sy0 = bound::sign(bound1, iy0, src_Y); int8_t s00 = sx0 * sy0; int8_t s10 = sx1 * sy0; int8_t s01 = sx0 * sy1; int8_t s11 = sx1 * sy1; // Warp indices offset_t ix1, iy1; ix1 = bound::index(bound0, ix0 + 1, src_X); iy1 = bound::index(bound1, iy0 + 1, src_Y); ix0 = bound::index(bound0, ix0, src_X); iy0 = bound::index(bound1, iy0, src_Y); // Offsets into source volume offset_t o00, o10, o01, o11; if (do_pull || do_grad || do_sgrad) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~ Grid gradient ~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_grad) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; scalar_t gx = static_cast<scalar_t>(0); scalar_t gy = static_cast<scalar_t>(0); scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; if (trgt_K == 1) { // backward w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt = trgt_ptr ? *trgt_ptr_NCXY : static_cast<scalar_t>(1); // ^ trgt_ptr == 0 during the backward pass of count src = bound::get(src_ptr_NC, o00, s00); if (trgt_ptr) src *= trgt; gx -= dy0 * src; gy -= dx0 * src; src = bound::get(src_ptr_NC, o10, s10); if (trgt_ptr) src *= trgt; gx += dy0 * src; gy -= dx1 * src; src = bound::get(src_ptr_NC, o01, s01); if (trgt_ptr) src *= trgt; gx -= dy1 * src; gy += dx0 * src; src = bound::get(src_ptr_NC, o11, s11); if (trgt_ptr) src *= trgt; gx += dy1 * src; gy += dx1 * src; } } else { // backward w.r.t. sgrad for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, src_ptr_NC += src_sC) { scalar_t src; scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK]; src = bound::get(src_ptr_NC, o00, s00); gx += trgt1 * src; gy += trgt0 * src; src = bound::get(src_ptr_NC, o10, s10); gx -= trgt1 * src; gy -= trgt0 * src; src = bound::get(src_ptr_NC, o01, s01); gx -= trgt1 * src; gy -= trgt0 * src; src = bound::get(src_ptr_NC, o11, s11); gx += trgt1 * src; gy += trgt0 * src; } } scalar_t* grad_ptr_NXYZ = grad_ptr + n * grad_sN + w * grad_sX + h * grad_sY; (*grad_ptr_NXYZ) = gx; grad_ptr_NXYZ[grad_sC] = gy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Pull ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if (do_pull) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) { *out_ptr_NCXY = bound::get(src_ptr_NC, o00, s00) * w00 + bound::get(src_ptr_NC, o10, s10) * w10 + bound::get(src_ptr_NC, o01, s01) * w01 + bound::get(src_ptr_NC, o11, s11) * w11; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SGrad ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_sgrad) { o00 = ix0 * src_sX + iy0 * src_sY; o10 = ix1 * src_sX + iy0 * src_sY; o01 = ix0 * src_sX + iy1 * src_sY; o11 = ix1 * src_sX + iy1 * src_sY; scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) { scalar_t src00 = bound::get(src_ptr_NC, o00, s00); scalar_t src10 = bound::get(src_ptr_NC, o10, s10); scalar_t src01 = bound::get(src_ptr_NC, o01, s01); scalar_t src11 = bound::get(src_ptr_NC, o11, s11); *out_ptr_NCXY = -dy0 * src00 + dy0 * src10 - dy1 * src01 + dy1 * src11; out_ptr_NCXY[out_sK] = -dx0 * src00 - dx1 * src10 + dx0 * src01 + dx1 * src11; } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_push) { // Offsets into 'push' volume o00 = ix0 * out_sX + iy0 * out_sY; o10 = ix1 * out_sX + iy0 * out_sY; o01 = ix0 * out_sX + iy1 * out_sY; o11 = ix1 * out_sX + iy1 * out_sY; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; if (trgt_K == 1) { // Diff w.r.t. push/pull for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt = *trgt_ptr_NCXY; bound::add(out_ptr_NC, o00, w00 * trgt, s00); bound::add(out_ptr_NC, o10, w10 * trgt, s10); bound::add(out_ptr_NC, o01, w01 * trgt, s01); bound::add(out_ptr_NC, o11, w11 * trgt, s11); } } else { // Diff w.r.t. sgrad scalar_t val; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) { scalar_t trgt0 = *trgt_ptr_NCXY, trgt1 = trgt_ptr_NCXY[trgt_sK]; val = -dy0 * trgt0 - dx0 * trgt1; bound::add(out_ptr_NC, o00, val, s00); val = dy0 * trgt0 - dx1 * trgt1; bound::add(out_ptr_NC, o10, val, s10); val = -dy1 * trgt0 + dx0 * trgt1; bound::add(out_ptr_NC, o01, val, s01); val = dy1 * trgt0 + dx1 * trgt1; bound::add(out_ptr_NC, o11, val, s11); } } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Push ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else if (do_count) { // Offsets into 'push' volume o00 = ix0 * out_sX + iy0 * out_sY; o10 = ix1 * out_sX + iy0 * out_sY; o01 = ix0 * out_sX + iy1 * out_sY; o11 = ix1 * out_sX + iy1 * out_sY; scalar_t* out_ptr_N = out_ptr + n * out_sN; bound::add(out_ptr_N, o00, w00, s00); bound::add(out_ptr_N, o10, w10, s10); bound::add(out_ptr_N, o01, w01, s01); bound::add(out_ptr_N, o11, w11, s11); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // NEAREST NEIGHBOR INTERPOLATION 3D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate3d_nearest( scalar_t x, scalar_t y, scalar_t z, offset_t w, offset_t h, offset_t d, offset_t n) const { offset_t ix = static_cast<offset_t>(std::round(x)); offset_t iy = static_cast<offset_t>(std::round(y)); offset_t iz = static_cast<offset_t>(std::round(z)); // Boundary condition (/!\ compute sign before warping indices) int8_t sx = bound::sign(bound0, ix, src_X); int8_t sy = bound::sign(bound1, iy, src_Y); int8_t sz = bound::sign(bound2, iz, src_Z); ix = bound::index(bound0, ix, src_X); iy = bound::index(bound1, iy, src_Y); iz = bound::index(bound2, iz, src_Z); // Sign int8_t s = sz * sy * sx; if (do_pull) { offset_t o = iz * src_sZ + iy * src_sY + ix * src_sX; scalar_t* out_ptr_NCXYZ = out_ptr + n * out_sN + w * out_sX + h * out_sY + d * out_sZ; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXYZ += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXYZ = bound::get(src_ptr_NC, o, s); } else if (do_push && trgt_K == 1) { offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXYZ = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY + d * trgt_sZ; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXYZ += trgt_sC, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, *trgt_ptr_NCXYZ, s); } else if (do_count) { offset_t o = iz * out_sZ + iy * out_sY + ix * out_sX; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // NEAREST NEIGHBOR INTERPOLATION 2D // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t, typename offset_t> MONAI_DEVICE void PushPullImpl<scalar_t, offset_t>::interpolate2d_nearest( scalar_t x, scalar_t y, offset_t w, offset_t h, offset_t n) const { offset_t ix = static_cast<offset_t>(std::round(x)); offset_t iy = static_cast<offset_t>(std::round(y)); // Boundary condition (/!\ compute sign before warping indices) int8_t sx = bound::sign(bound0, ix, src_X); int8_t sy = bound::sign(bound1, iy, src_Y); ix = bound::index(bound0, ix, src_X); iy = bound::index(bound1, iy, src_Y); // Sign int8_t s = sy * sx; if (do_pull) { offset_t o = iy * src_sY + ix * src_sX; scalar_t* out_ptr_NCXY = out_ptr + n * out_sN + w * out_sX + h * out_sY; scalar_t* src_ptr_NC = src_ptr + n * src_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NCXY += out_sC, src_ptr_NC += src_sC) *out_ptr_NCXY = bound::get(src_ptr_NC, o, s); } else if (do_push && trgt_K == 1) { offset_t o = iy * out_sY + ix * out_sX; scalar_t* trgt_ptr_NCXY = trgt_ptr + n * trgt_sN + w * trgt_sX + h * trgt_sY; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, trgt_ptr_NCXY += trgt_sC, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, *trgt_ptr_NCXY, s); } else if (do_count) { offset_t o = iy * out_sY + ix * out_sX; scalar_t* out_ptr_NC = out_ptr + n * out_sN; for (offset_t c = 0; c < C; ++c, out_ptr_NC += out_sC) bound::add(out_ptr_NC, o, static_cast<scalar_t>(1), s); } } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // LINEAR INTERPOLATION 3D + SLIDING BOUNDARY // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // TODO // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // CUDA KERNEL (MUST BE OUT OF CLASS) // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // CUDA Kernel template <typename scalar_t, typename offset_t> C10_LAUNCH_BOUNDS_1(1024) __global__ void pushpull_kernel(PushPullImpl<scalar_t, offset_t> f) { f.loop(threadIdx.x, blockIdx.x, blockDim.x, gridDim.x); } } // namespace // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // FUNCTIONAL FORM WITH DISPATCH // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #define PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, SourceType0) \ template std::deque<Tensor> pushpull( \ const SourceType0&, \ const Tensor&, \ const Tensor&, \ BoundType0, \ InterpolationType0, \ bool, \ bool, \ bool, \ bool, \ bool, \ bool); \ template std::deque<Tensor> pushpull( \ const SourceType0&, const Tensor&, BoundType0, InterpolationType0, bool, bool, bool, bool, bool, bool) #define PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType0) \ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, IntArrayRef); \ PUSHPULL_INSTANTIATE3(BoundType0, InterpolationType0, Tensor) #define PUSHPULL_INSTANTIATE1(BoundType0) \ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationType); \ PUSHPULL_INSTANTIATE2(BoundType0, InterpolationVectorRef) #define PUSHPULL_INSTANTIATE \ PUSHPULL_INSTANTIATE1(BoundType); \ PUSHPULL_INSTANTIATE1(BoundVectorRef) // ~~~ CUDA ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Two arguments (source, grid) // > `bound` and `interpolation` can be single arguments or vectors. template <typename BoundType, typename InterpolationType, typename SourceType> MONAI_HOST std::deque<Tensor> pushpull( const SourceType& source, const Tensor& grid, BoundType bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) { return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] { PushPullImpl<scalar_t, int64_t> f( grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); f.ioset(source, grid); pushpull_kernel<<<GET_BLOCKS(f.voxcount()), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(f); return f.output; }); } // Three arguments (source, grid, target) // > `bound` and `interpolation` can be single arguments or vectors. // > `source` can be a tensor or a vector of dimensions. template <typename BoundType, typename InterpolationType, typename SourceType> MONAI_HOST std::deque<Tensor> pushpull( const SourceType& source, const Tensor& grid, const Tensor& target, BoundType bound, InterpolationType interpolation, bool extrapolate, bool do_pull, bool do_push, bool do_count, bool do_grad, bool do_sgrad) { return AT_DISPATCH_FLOATING_TYPES_AND_HALF(grid.scalar_type(), "pushpull", [&] { PushPullImpl<scalar_t, int64_t> f( grid.dim() - 2, bound, interpolation, extrapolate, do_pull, do_push, do_count, do_grad, do_sgrad); f.ioset(source, grid, target); pushpull_kernel<<<GET_BLOCKS(f.voxcount()), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(f); return f.output; }); } PUSHPULL_INSTANTIATE; } // namespace <device> } // namespace monai
4f6e0c39e24b3843cf592a4a2b733c4946487c3a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> // includes, kernels #include "vector_reduction_kernel.hip" // For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements. #define NUM_ELEMENTS 512 #define NUM_ELEMENTS_LESS 3 #define NUM_ELEMENTS_MORE 2222 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest(argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run naive scan test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements; //flags points the data over than 512 if(argc != 2){ fprintf(stderr,"need num_elements\n"); exit(1); } if((num_elements = atoi(argv[1])) == 0){ fprintf(stderr,"need a number\n"); exit(1); } const unsigned int array_mem_size = sizeof( float) * num_elements; // allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); for( unsigned int i = 0; i < num_elements; ++i) h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); // compute reference solution float reference = 0.0f; computeGold(&reference , h_data, num_elements); // **===-------- Modify the body of this function -----------===** float result = computeOnDevice(h_data, num_elements); // **===-----------------------------------------------------------===** // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } int ReadFile(float* M, char* file_name) { printf("reading"); unsigned int elements_read = NUM_ELEMENTS; FILE *file; if((file = fopen(file_name,"r")) == NULL) return 1; else{ for(unsigned i = 0; i < elements_read; ++i) fscanf(file, "%f", &M[i]); } return 0; } // **===----------------- Modify this function ---------------------===** // Take h_data from host, copies it to device, setup grid and thread // dimentions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { int block_size, grid_size, len; block_size = 512; float* d_data; len = num_elements * sizeof(float); hipMalloc(&d_data, len); hipMemcpy(d_data, h_data, len, hipMemcpyHostToDevice); grid_size = (int)ceil((double)num_elements/(double)block_size); if(num_elements <= 512) { hipLaunchKernelGGL(( reduction_less), dim3(grid_size),dim3(block_size), 0, 0, d_data, num_elements); hipMemcpy(h_data, d_data, sizeof(float), hipMemcpyDeviceToHost); hipFree(d_data); return *h_data; }else{ float *d_result; hipMalloc(&d_result, grid_size * sizeof(float)); hipLaunchKernelGGL(( reduction_more), dim3(grid_size),dim3(block_size), 0, 0, d_data, num_elements, d_result); hipMemcpy(h_data, d_result, grid_size * sizeof(float), hipMemcpyDeviceToHost); hipFree(d_result); hipFree(d_data); for (int i = 1; i < grid_size; ++i) h_data[0] += h_data[i]; return *h_data; } /* retrun *h_data; */ } void computeGold( float* reference, float* idata, const unsigned int len) { reference[0] = 0; double total_sum = 0; unsigned int i; for( i = 0; i < len; ++i) total_sum += idata[i]; *reference = total_sum; }
4f6e0c39e24b3843cf592a4a2b733c4946487c3a.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> // includes, kernels #include "vector_reduction_kernel.cu" // For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements. #define NUM_ELEMENTS 512 #define NUM_ELEMENTS_LESS 3 #define NUM_ELEMENTS_MORE 2222 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name); float computeOnDevice(float* h_data, int array_mem_size); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest(argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run naive scan test //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int num_elements; //flags points the data over than 512 if(argc != 2){ fprintf(stderr,"need num_elements\n"); exit(1); } if((num_elements = atoi(argv[1])) == 0){ fprintf(stderr,"need a number\n"); exit(1); } const unsigned int array_mem_size = sizeof( float) * num_elements; // allocate host memory to store the input data float* h_data = (float*) malloc( array_mem_size); for( unsigned int i = 0; i < num_elements; ++i) h_data[i] = floorf(1000*(rand()/(float)RAND_MAX)); // compute reference solution float reference = 0.0f; computeGold(&reference , h_data, num_elements); // **===-------- Modify the body of this function -----------===** float result = computeOnDevice(h_data, num_elements); // **===-----------------------------------------------------------===** // We can use an epsilon of 0 since values are integral and in a range // that can be exactly represented float epsilon = 0.0f; unsigned int result_regtest = (abs(result - reference) <= epsilon); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); printf( "device: %f host: %f\n", result, reference); // cleanup memory free( h_data); } int ReadFile(float* M, char* file_name) { printf("reading"); unsigned int elements_read = NUM_ELEMENTS; FILE *file; if((file = fopen(file_name,"r")) == NULL) return 1; else{ for(unsigned i = 0; i < elements_read; ++i) fscanf(file, "%f", &M[i]); } return 0; } // **===----------------- Modify this function ---------------------===** // Take h_data from host, copies it to device, setup grid and thread // dimentions, excutes kernel function, and copy result of scan back // to h_data. // Note: float* h_data is both the input and the output of this function. float computeOnDevice(float* h_data, int num_elements) { int block_size, grid_size, len; block_size = 512; float* d_data; len = num_elements * sizeof(float); cudaMalloc(&d_data, len); cudaMemcpy(d_data, h_data, len, cudaMemcpyHostToDevice); grid_size = (int)ceil((double)num_elements/(double)block_size); if(num_elements <= 512) { reduction_less<<<grid_size,block_size>>>(d_data, num_elements); cudaMemcpy(h_data, d_data, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_data); return *h_data; }else{ float *d_result; cudaMalloc(&d_result, grid_size * sizeof(float)); reduction_more<<<grid_size,block_size>>>(d_data, num_elements, d_result); cudaMemcpy(h_data, d_result, grid_size * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_result); cudaFree(d_data); for (int i = 1; i < grid_size; ++i) h_data[0] += h_data[i]; return *h_data; } /* retrun *h_data; */ } void computeGold( float* reference, float* idata, const unsigned int len) { reference[0] = 0; double total_sum = 0; unsigned int i; for( i = 0; i < len; ++i) total_sum += idata[i]; *reference = total_sum; }
e97ef563af755196373ad328bc08b73e96d74de7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "doubleArray2floatArray.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *doubleArray = NULL; hipMalloc(&doubleArray, XSIZE*YSIZE); float *floatArray = NULL; hipMalloc(&floatArray, XSIZE*YSIZE); const int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( doubleArray2floatArray), dim3(gridBlock),dim3(threadBlock), 0, 0, doubleArray,floatArray,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( doubleArray2floatArray), dim3(gridBlock),dim3(threadBlock), 0, 0, doubleArray,floatArray,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( doubleArray2floatArray), dim3(gridBlock),dim3(threadBlock), 0, 0, doubleArray,floatArray,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e97ef563af755196373ad328bc08b73e96d74de7.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "doubleArray2floatArray.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *doubleArray = NULL; cudaMalloc(&doubleArray, XSIZE*YSIZE); float *floatArray = NULL; cudaMalloc(&floatArray, XSIZE*YSIZE); const int size = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); doubleArray2floatArray<<<gridBlock,threadBlock>>>(doubleArray,floatArray,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { doubleArray2floatArray<<<gridBlock,threadBlock>>>(doubleArray,floatArray,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { doubleArray2floatArray<<<gridBlock,threadBlock>>>(doubleArray,floatArray,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f0dce11f52326aefacce85c45852a6645a644960.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __NVCC__ #define POS_INFINITY __int_as_float(0x7f800000) #define INFINITY POS_INFINITY #define NEG_INFINITY __int_as_float(0xff800000) #define NAN __int_as_float(0x7fffffff) //===----------------------------------------------------------------------===// // The following namespace std is modified from LLVM, see the following copyright // information // // -*- C++ -*- //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // copy-pasted from the following llvm file: // https://github.com/llvm/llvm-project/blob/main/libcxx/include/complex namespace std { template <class _Tp> class complex; template <class _Tp> complex<_Tp> operator*(const complex<_Tp>& __z, const complex<_Tp>& __w); template <class _Tp> complex<_Tp> operator/(const complex<_Tp>& __x, const complex<_Tp>& __y); template <class _Tp> class complex { public: typedef _Tp value_type; private: value_type __re_; value_type __im_; public: constexpr complex( const value_type& __re = value_type(), const value_type& __im = value_type()) : __re_(__re), __im_(__im) {} template <class _Xp> constexpr complex(const complex<_Xp>& __c) : __re_(__c.real()), __im_(__c.imag()) {} constexpr value_type real() const { return __re_; } constexpr value_type imag() const { return __im_; } void real(value_type __re) { __re_ = __re; } void imag(value_type __im) { __im_ = __im; } constexpr operator bool() const { return real() || imag(); } complex& operator=(const value_type& __re) { __re_ = __re; __im_ = value_type(); return *this; } complex& operator+=(const value_type& __re) { __re_ += __re; return *this; } complex& operator-=(const value_type& __re) { __re_ -= __re; return *this; } complex& operator*=(const value_type& __re) { __re_ *= __re; __im_ *= __re; return *this; } complex& operator/=(const value_type& __re) { __re_ /= __re; __im_ /= __re; return *this; } template <class _Xp> complex& operator=(const complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template <class _Xp> complex& operator+=(const complex<_Xp>& __c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template <class _Xp> complex& operator-=(const complex<_Xp>& __c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template <class _Xp> complex& operator*=(const complex<_Xp>& __c) { *this = *this * complex(__c.real(), __c.imag()); return *this; } template <class _Xp> complex& operator/=(const complex<_Xp>& __c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; template <> class complex<double>; template <> class complex<float> { float __re_; float __im_; public: typedef float value_type; constexpr complex(float __re = 0.0f, float __im = 0.0f) : __re_(__re), __im_(__im) {} explicit constexpr complex(const complex<double>& __c); // copy volatile to non-volatile constexpr complex(const volatile complex<float>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr complex(const complex<float>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr float real() const { return __re_; } constexpr float imag() const { return __im_; } void real(value_type __re) { __re_ = __re; } void imag(value_type __im) { __im_ = __im; } constexpr operator bool() const { return real() || imag(); } complex& operator=(float __re) { __re_ = __re; __im_ = value_type(); return *this; } complex& operator+=(float __re) { __re_ += __re; return *this; } complex& operator-=(float __re) { __re_ -= __re; return *this; } complex& operator*=(float __re) { __re_ *= __re; __im_ *= __re; return *this; } complex& operator/=(float __re) { __re_ /= __re; __im_ /= __re; return *this; } template <class _Xp> complex& operator=(const complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // non-volatile to volatile template <class _Xp> volatile complex& operator=(const complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to non-volatile template <class _Xp> complex& operator=(const volatile complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to volatile template <class _Xp> volatile complex& operator=(const volatile complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template <class _Xp> complex& operator+=(const complex<_Xp>& __c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template <class _Xp> complex& operator-=(const complex<_Xp>& __c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template <class _Xp> complex& operator*=(const complex<_Xp>& __c) { *this = *this * complex(__c.real(), __c.imag()); return *this; } template <class _Xp> complex& operator/=(const complex<_Xp>& __c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; template <> class complex<double> { double __re_; double __im_; public: typedef double value_type; constexpr complex(double __re = 0.0, double __im = 0.0) : __re_(__re), __im_(__im) {} constexpr complex(const complex<float>& __c); // copy volatile to non-volatile constexpr complex(const volatile complex<double>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr complex(const complex<double>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr double real() const { return __re_; } constexpr double imag() const { return __im_; } void real(value_type __re) { __re_ = __re; } void imag(value_type __im) { __im_ = __im; } constexpr operator bool() const { return real() || imag(); } complex& operator=(double __re) { __re_ = __re; __im_ = value_type(); return *this; } complex& operator+=(double __re) { __re_ += __re; return *this; } complex& operator-=(double __re) { __re_ -= __re; return *this; } complex& operator*=(double __re) { __re_ *= __re; __im_ *= __re; return *this; } complex& operator/=(double __re) { __re_ /= __re; __im_ /= __re; return *this; } template <class _Xp> complex& operator=(const complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // non-volatile to volatile template <class _Xp> volatile complex& operator=(const complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to non-volatile template <class _Xp> complex& operator=(const volatile complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to volatile template <class _Xp> volatile complex& operator=(const volatile complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template <class _Xp> complex& operator+=(const complex<_Xp>& __c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template <class _Xp> complex& operator-=(const complex<_Xp>& __c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template <class _Xp> complex& operator*=(const complex<_Xp>& __c) { *this = *this * complex(__c.real(), __c.imag()); return *this; } template <class _Xp> complex& operator/=(const complex<_Xp>& __c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; inline constexpr complex<float>::complex(const complex<double>& __c) : __re_(__c.real()), __im_(__c.imag()) {} inline constexpr complex<double>::complex(const complex<float>& __c) : __re_(__c.real()), __im_(__c.imag()) {} // 26.3.6 operators: template <class _Tp> inline complex<_Tp> operator+( const complex<_Tp>& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__x); __t += __y; return __t; } template <class _Tp> inline complex<_Tp> operator+(const complex<_Tp>& __x, const _Tp& __y) { complex<_Tp> __t(__x); __t += __y; return __t; } template <class _Tp> inline complex<_Tp> operator+(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__y); __t += __x; return __t; } template <class _Tp> inline complex<_Tp> operator-( const complex<_Tp>& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__x); __t -= __y; return __t; } template <class _Tp> inline complex<_Tp> operator-(const complex<_Tp>& __x, const _Tp& __y) { complex<_Tp> __t(__x); __t -= __y; return __t; } template <class _Tp> inline complex<_Tp> operator-(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(-__y); __t += __x; return __t; } template <class _Tp> complex<_Tp> operator*(const complex<_Tp>& __z, const complex<_Tp>& __w) { _Tp __a = __z.real(); _Tp __b = __z.imag(); _Tp __c = __w.real(); _Tp __d = __w.imag(); _Tp __ac = __a * __c; _Tp __bd = __b * __d; _Tp __ad = __a * __d; _Tp __bc = __b * __c; _Tp __x = __ac - __bd; _Tp __y = __ad + __bc; if (isnan(__x) && isnan(__y)) { bool __recalc = false; if (isinf(__a) || isinf(__b)) { __a = copysign(isinf(__a) ? _Tp(1) : _Tp(0), __a); __b = copysign(isinf(__b) ? _Tp(1) : _Tp(0), __b); if (isnan(__c)) __c = copysign(_Tp(0), __c); if (isnan(__d)) __d = copysign(_Tp(0), __d); __recalc = true; } if (isinf(__c) || isinf(__d)) { __c = copysign(isinf(__c) ? _Tp(1) : _Tp(0), __c); __d = copysign(isinf(__d) ? _Tp(1) : _Tp(0), __d); if (isnan(__a)) __a = copysign(_Tp(0), __a); if (isnan(__b)) __b = copysign(_Tp(0), __b); __recalc = true; } if (!__recalc && (isinf(__ac) || isinf(__bd) || isinf(__ad) || isinf(__bc))) { if (isnan(__a)) __a = copysign(_Tp(0), __a); if (isnan(__b)) __b = copysign(_Tp(0), __b); if (isnan(__c)) __c = copysign(_Tp(0), __c); if (isnan(__d)) __d = copysign(_Tp(0), __d); __recalc = true; } if (__recalc) { __x = _Tp(INFINITY) * (__a * __c - __b * __d); __y = _Tp(INFINITY) * (__a * __d + __b * __c); } } return complex<_Tp>(__x, __y); } template <class _Tp> inline complex<_Tp> operator*(const complex<_Tp>& __x, const _Tp& __y) { complex<_Tp> __t(__x); __t *= __y; return __t; } template <class _Tp> inline complex<_Tp> operator*(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__y); __t *= __x; return __t; } template <class _Tp> complex<_Tp> operator/(const complex<_Tp>& __z, const complex<_Tp>& __w) { int __ilogbw = 0; _Tp __a = __z.real(); _Tp __b = __z.imag(); _Tp __c = __w.real(); _Tp __d = __w.imag(); _Tp __logbw = logb(fmax(fabs(__c), fabs(__d))); if (isfinite(__logbw)) { __ilogbw = static_cast<int>(__logbw); __c = scalbn(__c, -__ilogbw); __d = scalbn(__d, -__ilogbw); } _Tp __denom = __c * __c + __d * __d; _Tp __x = scalbn((__a * __c + __b * __d) / __denom, -__ilogbw); _Tp __y = scalbn((__b * __c - __a * __d) / __denom, -__ilogbw); if (isnan(__x) && isnan(__y)) { if ((__denom == _Tp(0)) && (!isnan(__a) || !isnan(__b))) { __x = copysign(_Tp(INFINITY), __c) * __a; __y = copysign(_Tp(INFINITY), __c) * __b; } else if ((isinf(__a) || isinf(__b)) && isfinite(__c) && isfinite(__d)) { __a = copysign(isinf(__a) ? _Tp(1) : _Tp(0), __a); __b = copysign(isinf(__b) ? _Tp(1) : _Tp(0), __b); __x = _Tp(INFINITY) * (__a * __c + __b * __d); __y = _Tp(INFINITY) * (__b * __c - __a * __d); } else if ( isinf(__logbw) && __logbw > _Tp(0) && isfinite(__a) && isfinite(__b)) { __c = copysign(isinf(__c) ? _Tp(1) : _Tp(0), __c); __d = copysign(isinf(__d) ? _Tp(1) : _Tp(0), __d); __x = _Tp(0) * (__a * __c + __b * __d); __y = _Tp(0) * (__b * __c - __a * __d); } } return complex<_Tp>(__x, __y); } template <class _Tp> inline complex<_Tp> operator/(const complex<_Tp>& __x, const _Tp& __y) { return complex<_Tp>(__x.real() / __y, __x.imag() / __y); } template <class _Tp> inline complex<_Tp> operator/(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__x); __t /= __y; return __t; } template <class _Tp> inline complex<_Tp> operator+(const complex<_Tp>& __x) { return __x; } template <class _Tp> inline complex<_Tp> operator-(const complex<_Tp>& __x) { return complex<_Tp>(-__x.real(), -__x.imag()); } template <class _Tp> inline constexpr bool operator==( const complex<_Tp>& __x, const complex<_Tp>& __y) { return __x.real() == __y.real() && __x.imag() == __y.imag(); } template <class _Tp> inline constexpr bool operator==(const complex<_Tp>& __x, const _Tp& __y) { return __x.real() == __y && __x.imag() == 0; } template <class _Tp> inline constexpr bool operator==(const _Tp& __x, const complex<_Tp>& __y) { return __x == __y.real() && 0 == __y.imag(); } template <class _Tp> inline constexpr bool operator!=( const complex<_Tp>& __x, const complex<_Tp>& __y) { return !(__x == __y); } template <class _Tp> inline constexpr bool operator!=(const complex<_Tp>& __x, const _Tp& __y) { return !(__x == __y); } template <class _Tp> inline constexpr bool operator!=(const _Tp& __x, const complex<_Tp>& __y) { return !(__x == __y); } template <class _Tp> inline constexpr bool operator&&( const complex<_Tp>& __x, const complex<_Tp>& __y) { return bool(__x) && bool(__y); } template <class _Tp> inline constexpr bool isnan(const complex<_Tp>& __x) { return isnan(__x.real()) || isnan(__x.imag()); } template <class _Tp> inline constexpr bool operator||( const complex<_Tp>& __x, const complex<_Tp>& __y) { return bool(__x) || bool(__y); } // 26.3.7 values: template < class _Tp, bool = is_integral<_Tp>::value, bool = is_floating_point<_Tp>::value> struct __libcpp_complex_overload_traits {}; // Integral Types template <class _Tp> struct __libcpp_complex_overload_traits<_Tp, true, false> { typedef double _ValueType; typedef complex<double> _ComplexType; }; // Floating point types template <class _Tp> struct __libcpp_complex_overload_traits<_Tp, false, true> { typedef _Tp _ValueType; typedef complex<_Tp> _ComplexType; }; // real template <class _Tp> inline constexpr _Tp real(const complex<_Tp>& __c) { return __c.real(); } template <class _Tp> inline constexpr typename __libcpp_complex_overload_traits<_Tp>::_ValueType real( _Tp __re) { return __re; } // imag template <class _Tp> inline constexpr _Tp imag(const complex<_Tp>& __c) { return __c.imag(); } template <class _Tp> inline constexpr typename __libcpp_complex_overload_traits<_Tp>::_ValueType imag( _Tp) { return 0; } // abs template <class _Tp> inline _Tp abs(const complex<_Tp>& __c) { return hypot(__c.real(), __c.imag()); } // arg template <class _Tp> inline _Tp arg(const complex<_Tp>& __c) { return atan2(__c.imag(), __c.real()); } template <class _Tp> inline typename enable_if< is_integral<_Tp>::value || is_same<_Tp, double>::value, double>::type arg(_Tp __re) { return atan2(0., __re); } template <class _Tp> inline typename enable_if<is_same<_Tp, float>::value, float>::type arg( _Tp __re) { return atan2f(0.F, __re); } } // namespace std namespace std { using ::isfinite; using ::isinf; using ::isnan; using ::signbit; using ::abs; using ::acos; using ::acosf; using ::asin; using ::asinf; using ::atan; using ::atan2; using ::atan2f; using ::atanf; using ::ceil; using ::ceilf; using ::cos; using ::cosf; using ::cosh; using ::coshf; using ::exp; using ::expf; using ::fabs; using ::fabsf; using ::floor; using ::floorf; using ::fmod; using ::fmodf; using ::frexp; using ::frexpf; using ::ldexp; using ::ldexpf; using ::log; using ::logf; using ::log10; using ::log10f; using ::modf; using ::modff; using ::pow; using ::powf; using ::sin; using ::sinf; using ::sinh; using ::sinhf; using ::sqrt; using ::sqrtf; using ::tan; using ::tanf; using ::tanh; using ::tanhf; using ::acosh; using ::acoshf; using ::asinh; using ::asinhf; using ::atanh; using ::atanhf; using ::cbrt; using ::cbrtf; using ::copysign; using ::copysignf; using ::erf; using ::erfc; using ::erfcf; using ::erff; using ::exp2; using ::exp2f; using ::expm1; using ::expm1f; using ::fdim; using ::fdimf; using ::fma; using ::fmaf; using ::fmax; using ::fmaxf; using ::fmin; using ::fminf; using ::hypot; using ::hypotf; using ::ilogb; using ::ilogbf; using ::lgamma; using ::lgammaf; using ::llrint; using ::llrintf; using ::llround; using ::llroundf; using ::log1p; using ::log1pf; using ::log2; using ::log2f; using ::logb; using ::logbf; using ::lrint; using ::lrintf; using ::lround; using ::lroundf; using ::nan; using ::nanf; using ::nearbyint; using ::nearbyintf; using ::nextafter; using ::nextafterf; using ::remainder; using ::remainderf; using ::remquo; using ::remquof; using ::rint; using ::rintf; using ::round; using ::roundf; using ::scalbln; using ::scalblnf; using ::scalbn; using ::scalbnf; using ::tgamma; using ::tgammaf; using ::trunc; using ::truncf; } // namespace std namespace std { // norm template <class _Tp> inline _Tp norm(const complex<_Tp>& __c) { if (isinf(__c.real())) return abs(__c.real()); if (isinf(__c.imag())) return abs(__c.imag()); return __c.real() * __c.real() + __c.imag() * __c.imag(); } template <class _Tp> inline typename __libcpp_complex_overload_traits<_Tp>::_ValueType norm( _Tp __re) { typedef typename __libcpp_complex_overload_traits<_Tp>::_ValueType _ValueType; return static_cast<_ValueType>(__re) * __re; } // conj template <class _Tp> inline complex<_Tp> conj(const complex<_Tp>& __c) { return complex<_Tp>(__c.real(), -__c.imag()); } template <class _Tp> inline typename __libcpp_complex_overload_traits<_Tp>::_ComplexType conj( _Tp __re) { typedef typename __libcpp_complex_overload_traits<_Tp>::_ComplexType _ComplexType; return _ComplexType(__re); } // proj template <class _Tp> inline complex<_Tp> proj(const complex<_Tp>& __c) { complex<_Tp> __r = __c; if (isinf(__c.real()) || isinf(__c.imag())) __r = complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag())); return __r; } template <class _Tp> inline typename enable_if< is_floating_point<_Tp>::value, typename __libcpp_complex_overload_traits<_Tp>::_ComplexType>::type proj(_Tp __re) { if (isinf(__re)) __re = abs(__re); return complex<_Tp>(__re); } template <class _Tp> inline typename enable_if< is_integral<_Tp>::value, typename __libcpp_complex_overload_traits<_Tp>::_ComplexType>::type proj(_Tp __re) { typedef typename __libcpp_complex_overload_traits<_Tp>::_ComplexType _ComplexType; return _ComplexType(__re); } // polar template <class _Tp> complex<_Tp> polar(const _Tp& __rho, const _Tp& __theta = _Tp()) { if (isnan(__rho) || signbit(__rho)) return complex<_Tp>(_Tp(NAN), _Tp(NAN)); if (isnan(__theta)) { if (isinf(__rho)) return complex<_Tp>(__rho, __theta); return complex<_Tp>(__theta, __theta); } if (isinf(__theta)) { if (isinf(__rho)) return complex<_Tp>(__rho, _Tp(NAN)); return complex<_Tp>(_Tp(NAN), _Tp(NAN)); } _Tp __x = __rho * cos(__theta); if (isnan(__x)) __x = 0; _Tp __y = __rho * sin(__theta); if (isnan(__y)) __y = 0; return complex<_Tp>(__x, __y); } // log template <class _Tp> inline complex<_Tp> log(const complex<_Tp>& __x) { return complex<_Tp>(log(abs(__x)), arg(__x)); } // log10 template <class _Tp> inline complex<_Tp> log10(const complex<_Tp>& __x) { return log(__x) / log(_Tp(10)); } // log2 template <class _Tp> inline complex<_Tp> log2(const complex<_Tp>& __x) { return log(__x) / log(_Tp(2)); } // sqrt template <class _Tp> complex<_Tp> sqrt(const complex<_Tp>& __x) { if (isinf(__x.imag())) return complex<_Tp>(_Tp(INFINITY), __x.imag()); if (isinf(__x.real())) { if (__x.real() > _Tp(0)) return complex<_Tp>( __x.real(), isnan(__x.imag()) ? __x.imag() : copysign(_Tp(0), __x.imag())); return complex<_Tp>( isnan(__x.imag()) ? __x.imag() : _Tp(0), copysign(__x.real(), __x.imag())); } return polar(sqrt(abs(__x)), arg(__x) / _Tp(2)); } // exp template <class _Tp> complex<_Tp> exp(const complex<_Tp>& __x) { _Tp __i = __x.imag(); if (__i == 0) { return complex<_Tp>(exp(__x.real()), copysign(_Tp(0), __x.imag())); } if (isinf(__x.real())) { if (__x.real() < _Tp(0)) { if (!isfinite(__i)) __i = _Tp(1); } else if (__i == 0 || !isfinite(__i)) { if (isinf(__i)) __i = _Tp(NAN); return complex<_Tp>(__x.real(), __i); } } _Tp __e = exp(__x.real()); return complex<_Tp>(__e * cos(__i), __e * sin(__i)); } // pow template <class _Tp> inline complex<_Tp> pow(const complex<_Tp>& __x, const complex<_Tp>& __y) { return exp(__y * log(__x)); } template <class _Tp, class _Up> inline complex<typename __promote<_Tp, _Up>::type> pow( const complex<_Tp>& __x, const complex<_Up>& __y) { typedef complex<typename __promote<_Tp, _Up>::type> result_type; return ::pow(result_type(__x), result_type(__y)); } template <class _Tp, class _Up> inline typename enable_if< is_arithmetic<_Up>::value, complex<typename __promote<_Tp, _Up>::type>>::type pow(const complex<_Tp>& __x, const _Up& __y) { typedef complex<typename __promote<_Tp, _Up>::type> result_type; return ::pow(result_type(__x), result_type(__y)); } template <class _Tp, class _Up> inline typename enable_if< is_arithmetic<_Tp>::value, complex<typename __promote<_Tp, _Up>::type>>::type pow(const _Tp& __x, const complex<_Up>& __y) { typedef complex<typename __promote<_Tp, _Up>::type> result_type; return ::pow(result_type(__x), result_type(__y)); } // __sqr, computes pow(x, 2) template <class _Tp> inline complex<_Tp> __sqr(const complex<_Tp>& __x) { return complex<_Tp>( (__x.real() - __x.imag()) * (__x.real() + __x.imag()), _Tp(2) * __x.real() * __x.imag()); } // asinh template <class _Tp> complex<_Tp> asinh(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return __x; if (isinf(__x.imag())) return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag())); return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(__x.imag(), __x.real()); if (__x.imag() == 0) return __x; return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>( copysign(__x.imag(), __x.real()), copysign(__pi / _Tp(2), __x.imag())); complex<_Tp> __z = log(__x + sqrt(__sqr(__x) + _Tp(1))); return complex<_Tp>( copysign(__z.real(), __x.real()), copysign(__z.imag(), __x.imag())); } // acosh template <class _Tp> complex<_Tp> acosh(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return complex<_Tp>(abs(__x.real()), __x.imag()); if (isinf(__x.imag())) { if (__x.real() > 0) return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag())); else return complex<_Tp>( -__x.real(), copysign(__pi * _Tp(0.75), __x.imag())); } if (__x.real() < 0) return complex<_Tp>(-__x.real(), copysign(__pi, __x.imag())); return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(abs(__x.imag()), __x.real()); return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>(abs(__x.imag()), copysign(__pi / _Tp(2), __x.imag())); complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1))); return complex<_Tp>( copysign(__z.real(), _Tp(0)), copysign(__z.imag(), __x.imag())); } // atanh template <class _Tp> complex<_Tp> atanh(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.imag())) { return complex<_Tp>( copysign(_Tp(0), __x.real()), copysign(__pi / _Tp(2), __x.imag())); } if (isnan(__x.imag())) { if (isinf(__x.real()) || __x.real() == 0) return complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag()); return complex<_Tp>(__x.imag(), __x.imag()); } if (isnan(__x.real())) { return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.real())) { return complex<_Tp>( copysign(_Tp(0), __x.real()), copysign(__pi / _Tp(2), __x.imag())); } if (abs(__x.real()) == _Tp(1) && __x.imag() == _Tp(0)) { return complex<_Tp>( copysign(_Tp(INFINITY), __x.real()), copysign(_Tp(0), __x.imag())); } complex<_Tp> __z = log((_Tp(1) + __x) / (_Tp(1) - __x)) / _Tp(2); return complex<_Tp>( copysign(__z.real(), __x.real()), copysign(__z.imag(), __x.imag())); } // sinh template <class _Tp> complex<_Tp> sinh(const complex<_Tp>& __x) { if (isinf(__x.real()) && !isfinite(__x.imag())) return complex<_Tp>(__x.real(), _Tp(NAN)); if (__x.real() == 0 && !isfinite(__x.imag())) return complex<_Tp>(__x.real(), _Tp(NAN)); if (__x.imag() == 0 && !isfinite(__x.real())) return __x; return complex<_Tp>( sinh(__x.real()) * cos(__x.imag()), cosh(__x.real()) * sin(__x.imag())); } // cosh template <class _Tp> complex<_Tp> cosh(const complex<_Tp>& __x) { if (isinf(__x.real()) && !isfinite(__x.imag())) return complex<_Tp>(abs(__x.real()), _Tp(NAN)); if (__x.real() == 0 && !isfinite(__x.imag())) return complex<_Tp>(_Tp(NAN), __x.real()); if (__x.real() == 0 && __x.imag() == 0) return complex<_Tp>(_Tp(1), __x.imag()); if (__x.imag() == 0 && !isfinite(__x.real())) return complex<_Tp>(abs(__x.real()), __x.imag()); return complex<_Tp>( cosh(__x.real()) * cos(__x.imag()), sinh(__x.real()) * sin(__x.imag())); } // tanh template <class _Tp> complex<_Tp> tanh(const complex<_Tp>& __x) { if (isinf(__x.real())) { if (!isfinite(__x.imag())) return complex<_Tp>(copysign(_Tp(1), __x.real()), _Tp(0)); return complex<_Tp>( copysign(_Tp(1), __x.real()), copysign(_Tp(0), sin(_Tp(2) * __x.imag()))); } if (isnan(__x.real()) && __x.imag() == 0) return __x; _Tp __2r(_Tp(2) * __x.real()); _Tp __2i(_Tp(2) * __x.imag()); _Tp __d(cosh(__2r) + cos(__2i)); _Tp __2rsh(sinh(__2r)); if (isinf(__2rsh) && isinf(__d)) return complex<_Tp>( __2rsh > _Tp(0) ? _Tp(1) : _Tp(-1), __2i > _Tp(0) ? _Tp(0) : _Tp(-0.)); return complex<_Tp>(__2rsh / __d, sin(__2i) / __d); } // asin template <class _Tp> complex<_Tp> asin(const complex<_Tp>& __x) { complex<_Tp> __z = asinh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // acos template <class _Tp> complex<_Tp> acos(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return complex<_Tp>(__x.imag(), __x.real()); if (isinf(__x.imag())) { if (__x.real() < _Tp(0)) return complex<_Tp>(_Tp(0.75) * __pi, -__x.imag()); return complex<_Tp>(_Tp(0.25) * __pi, -__x.imag()); } if (__x.real() < _Tp(0)) return complex<_Tp>(__pi, signbit(__x.imag()) ? -__x.real() : __x.real()); return complex<_Tp>(_Tp(0), signbit(__x.imag()) ? __x.real() : -__x.real()); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(__x.real(), -__x.imag()); return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>(__pi / _Tp(2), -__x.imag()); if (__x.real() == 0 && (__x.imag() == 0 || isnan(__x.imag()))) return complex<_Tp>(__pi / _Tp(2), -__x.imag()); complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1))); if (signbit(__x.imag())) return complex<_Tp>(abs(__z.imag()), abs(__z.real())); return complex<_Tp>(abs(__z.imag()), -abs(__z.real())); } // atan template <class _Tp> complex<_Tp> atan(const complex<_Tp>& __x) { complex<_Tp> __z = atanh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // sin template <class _Tp> complex<_Tp> sin(const complex<_Tp>& __x) { complex<_Tp> __z = sinh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // cos template <class _Tp> inline complex<_Tp> cos(const complex<_Tp>& __x) { return cosh(complex<_Tp>(-__x.imag(), __x.real())); } // tan template <class _Tp> complex<_Tp> tan(const complex<_Tp>& __x) { complex<_Tp> __z = tanh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // Literal suffix for complex number literals [complex.literals] inline namespace literals { inline namespace complex_literals { constexpr complex<double> operator""i(long double __im) { return {0.0, static_cast<double>(__im)}; } constexpr complex<double> operator""i(unsigned long long __im) { return {0.0, static_cast<double>(__im)}; } constexpr complex<float> operator""if(long double __im) { return {0.0f, static_cast<float>(__im)}; } constexpr complex<float> operator""if(unsigned long long __im) { return {0.0f, static_cast<float>(__im)}; } } // namespace complex_literals } // namespace literals } // namespace std __device__ std::complex<double> lerp( std::complex<double> start, std::complex<double> end, std::complex<double> weight) { if (abs(weight) < 0.5) { return start + weight * (end - start); } else { return end - (end - start) * (1.0 - weight); } } __device__ std::complex<float> lerp( std::complex<float> start, std::complex<float> end, std::complex<float> weight) { if (abs(weight) < 0.5f) { return start + weight * (end - start); } else { return end - (end - start) * (1.0f - weight); } } __device__ std::complex<double> reciprocal(std::complex<double> x) { return 1.0 / x; } __device__ std::complex<float> reciprocal(std::complex<float> x) { return 1.0f / x; } __device__ std::complex<double> sigmoid(std::complex<double> x) { return 1.0 / (1.0 + exp(-x)); } __device__ std::complex<float> sigmoid(std::complex<float> x) { return 1.0f / (1.0f + exp(-x)); } // The reciprocal of a complex number z is // 1/z = conj(z)/|z|^2. // The principal square root of a complex number z can be obtained by [1] // sqrt(z) = sqrt(|z|) (z + |z|) / |z + |z||. // Combining these formulas we have // 1/sqrt(z) = (conj(z) + |z|) / (sqrt(|z|) |z + |z||). // [1] https://math.stackexchange.com/a/44500 __device__ std::complex<float> rsqrt(std::complex<float> z) { auto a = std::real(z); auto b = std::imag(z); auto absa = ::fabsf(a); auto absb = ::fabsf(b); // scale to avoid precision loss due to underflow/overflow auto scale = fmax(absa, absb); a /= scale; b /= scale; auto a_sq = a * a; auto b_sq = b * b; auto modz_sq = a_sq + b_sq; auto modz = ::sqrtf(modz_sq); auto a_plus_modz = a + modz; auto mod_zplusmodz_sq = a_plus_modz * a_plus_modz + b_sq; auto fac = ::rsqrtf(scale * modz * mod_zplusmodz_sq); return std::complex<float>(a_plus_modz * fac, -b * fac); } __device__ std::complex<double> rsqrt(std::complex<double> z) { auto a = std::real(z); auto b = std::imag(z); auto absa = ::abs(a); auto absb = ::abs(b); // scale to avoid precision loss due to underflow/overflow auto scale = fmax(absa, absb); a /= scale; b /= scale; auto a_sq = a * a; auto b_sq = b * b; auto modz_sq = a_sq + b_sq; auto modz = ::sqrt(modz_sq); auto a_plus_modz = a + modz; auto mod_zplusmodz_sq = a_plus_modz * a_plus_modz + b_sq; auto fac = ::rsqrt(scale * modz * mod_zplusmodz_sq); return std::complex<double>(a_plus_modz * fac, -b * fac); } template <typename T> bool isfinite(std::complex<T> x) { return ::isfinite(std::real(x)) && ::isfinite(std::imag(x)); } template <typename T> bool isinf(std::complex<T> x) { return ::isinf(std::real(x)) || ::isinf(std::imag(x)); } template <typename T> bool isreal(std::complex<T> x) { return std::imag(x) == 0; } #endif // __NVCC__
f0dce11f52326aefacce85c45852a6645a644960.cu
#ifndef __NVCC__ #define POS_INFINITY __int_as_float(0x7f800000) #define INFINITY POS_INFINITY #define NEG_INFINITY __int_as_float(0xff800000) #define NAN __int_as_float(0x7fffffff) //===----------------------------------------------------------------------===// // The following namespace std is modified from LLVM, see the following copyright // information // // -*- C++ -*- //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // copy-pasted from the following llvm file: // https://github.com/llvm/llvm-project/blob/main/libcxx/include/complex namespace std { template <class _Tp> class complex; template <class _Tp> complex<_Tp> operator*(const complex<_Tp>& __z, const complex<_Tp>& __w); template <class _Tp> complex<_Tp> operator/(const complex<_Tp>& __x, const complex<_Tp>& __y); template <class _Tp> class complex { public: typedef _Tp value_type; private: value_type __re_; value_type __im_; public: constexpr complex( const value_type& __re = value_type(), const value_type& __im = value_type()) : __re_(__re), __im_(__im) {} template <class _Xp> constexpr complex(const complex<_Xp>& __c) : __re_(__c.real()), __im_(__c.imag()) {} constexpr value_type real() const { return __re_; } constexpr value_type imag() const { return __im_; } void real(value_type __re) { __re_ = __re; } void imag(value_type __im) { __im_ = __im; } constexpr operator bool() const { return real() || imag(); } complex& operator=(const value_type& __re) { __re_ = __re; __im_ = value_type(); return *this; } complex& operator+=(const value_type& __re) { __re_ += __re; return *this; } complex& operator-=(const value_type& __re) { __re_ -= __re; return *this; } complex& operator*=(const value_type& __re) { __re_ *= __re; __im_ *= __re; return *this; } complex& operator/=(const value_type& __re) { __re_ /= __re; __im_ /= __re; return *this; } template <class _Xp> complex& operator=(const complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template <class _Xp> complex& operator+=(const complex<_Xp>& __c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template <class _Xp> complex& operator-=(const complex<_Xp>& __c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template <class _Xp> complex& operator*=(const complex<_Xp>& __c) { *this = *this * complex(__c.real(), __c.imag()); return *this; } template <class _Xp> complex& operator/=(const complex<_Xp>& __c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; template <> class complex<double>; template <> class complex<float> { float __re_; float __im_; public: typedef float value_type; constexpr complex(float __re = 0.0f, float __im = 0.0f) : __re_(__re), __im_(__im) {} explicit constexpr complex(const complex<double>& __c); // copy volatile to non-volatile constexpr complex(const volatile complex<float>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr complex(const complex<float>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr float real() const { return __re_; } constexpr float imag() const { return __im_; } void real(value_type __re) { __re_ = __re; } void imag(value_type __im) { __im_ = __im; } constexpr operator bool() const { return real() || imag(); } complex& operator=(float __re) { __re_ = __re; __im_ = value_type(); return *this; } complex& operator+=(float __re) { __re_ += __re; return *this; } complex& operator-=(float __re) { __re_ -= __re; return *this; } complex& operator*=(float __re) { __re_ *= __re; __im_ *= __re; return *this; } complex& operator/=(float __re) { __re_ /= __re; __im_ /= __re; return *this; } template <class _Xp> complex& operator=(const complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // non-volatile to volatile template <class _Xp> volatile complex& operator=(const complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to non-volatile template <class _Xp> complex& operator=(const volatile complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to volatile template <class _Xp> volatile complex& operator=(const volatile complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template <class _Xp> complex& operator+=(const complex<_Xp>& __c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template <class _Xp> complex& operator-=(const complex<_Xp>& __c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template <class _Xp> complex& operator*=(const complex<_Xp>& __c) { *this = *this * complex(__c.real(), __c.imag()); return *this; } template <class _Xp> complex& operator/=(const complex<_Xp>& __c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; template <> class complex<double> { double __re_; double __im_; public: typedef double value_type; constexpr complex(double __re = 0.0, double __im = 0.0) : __re_(__re), __im_(__im) {} constexpr complex(const complex<float>& __c); // copy volatile to non-volatile constexpr complex(const volatile complex<double>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr complex(const complex<double>& other) : __re_(other.__re_), __im_(other.__im_) {} constexpr double real() const { return __re_; } constexpr double imag() const { return __im_; } void real(value_type __re) { __re_ = __re; } void imag(value_type __im) { __im_ = __im; } constexpr operator bool() const { return real() || imag(); } complex& operator=(double __re) { __re_ = __re; __im_ = value_type(); return *this; } complex& operator+=(double __re) { __re_ += __re; return *this; } complex& operator-=(double __re) { __re_ -= __re; return *this; } complex& operator*=(double __re) { __re_ *= __re; __im_ *= __re; return *this; } complex& operator/=(double __re) { __re_ /= __re; __im_ /= __re; return *this; } template <class _Xp> complex& operator=(const complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // non-volatile to volatile template <class _Xp> volatile complex& operator=(const complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to non-volatile template <class _Xp> complex& operator=(const volatile complex<_Xp>& __c) { __re_ = __c.real(); __im_ = __c.imag(); return *this; } // volatile to volatile template <class _Xp> volatile complex& operator=(const volatile complex<_Xp>& __c) volatile { __re_ = __c.real(); __im_ = __c.imag(); return *this; } template <class _Xp> complex& operator+=(const complex<_Xp>& __c) { __re_ += __c.real(); __im_ += __c.imag(); return *this; } template <class _Xp> complex& operator-=(const complex<_Xp>& __c) { __re_ -= __c.real(); __im_ -= __c.imag(); return *this; } template <class _Xp> complex& operator*=(const complex<_Xp>& __c) { *this = *this * complex(__c.real(), __c.imag()); return *this; } template <class _Xp> complex& operator/=(const complex<_Xp>& __c) { *this = *this / complex(__c.real(), __c.imag()); return *this; } }; inline constexpr complex<float>::complex(const complex<double>& __c) : __re_(__c.real()), __im_(__c.imag()) {} inline constexpr complex<double>::complex(const complex<float>& __c) : __re_(__c.real()), __im_(__c.imag()) {} // 26.3.6 operators: template <class _Tp> inline complex<_Tp> operator+( const complex<_Tp>& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__x); __t += __y; return __t; } template <class _Tp> inline complex<_Tp> operator+(const complex<_Tp>& __x, const _Tp& __y) { complex<_Tp> __t(__x); __t += __y; return __t; } template <class _Tp> inline complex<_Tp> operator+(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__y); __t += __x; return __t; } template <class _Tp> inline complex<_Tp> operator-( const complex<_Tp>& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__x); __t -= __y; return __t; } template <class _Tp> inline complex<_Tp> operator-(const complex<_Tp>& __x, const _Tp& __y) { complex<_Tp> __t(__x); __t -= __y; return __t; } template <class _Tp> inline complex<_Tp> operator-(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(-__y); __t += __x; return __t; } template <class _Tp> complex<_Tp> operator*(const complex<_Tp>& __z, const complex<_Tp>& __w) { _Tp __a = __z.real(); _Tp __b = __z.imag(); _Tp __c = __w.real(); _Tp __d = __w.imag(); _Tp __ac = __a * __c; _Tp __bd = __b * __d; _Tp __ad = __a * __d; _Tp __bc = __b * __c; _Tp __x = __ac - __bd; _Tp __y = __ad + __bc; if (isnan(__x) && isnan(__y)) { bool __recalc = false; if (isinf(__a) || isinf(__b)) { __a = copysign(isinf(__a) ? _Tp(1) : _Tp(0), __a); __b = copysign(isinf(__b) ? _Tp(1) : _Tp(0), __b); if (isnan(__c)) __c = copysign(_Tp(0), __c); if (isnan(__d)) __d = copysign(_Tp(0), __d); __recalc = true; } if (isinf(__c) || isinf(__d)) { __c = copysign(isinf(__c) ? _Tp(1) : _Tp(0), __c); __d = copysign(isinf(__d) ? _Tp(1) : _Tp(0), __d); if (isnan(__a)) __a = copysign(_Tp(0), __a); if (isnan(__b)) __b = copysign(_Tp(0), __b); __recalc = true; } if (!__recalc && (isinf(__ac) || isinf(__bd) || isinf(__ad) || isinf(__bc))) { if (isnan(__a)) __a = copysign(_Tp(0), __a); if (isnan(__b)) __b = copysign(_Tp(0), __b); if (isnan(__c)) __c = copysign(_Tp(0), __c); if (isnan(__d)) __d = copysign(_Tp(0), __d); __recalc = true; } if (__recalc) { __x = _Tp(INFINITY) * (__a * __c - __b * __d); __y = _Tp(INFINITY) * (__a * __d + __b * __c); } } return complex<_Tp>(__x, __y); } template <class _Tp> inline complex<_Tp> operator*(const complex<_Tp>& __x, const _Tp& __y) { complex<_Tp> __t(__x); __t *= __y; return __t; } template <class _Tp> inline complex<_Tp> operator*(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__y); __t *= __x; return __t; } template <class _Tp> complex<_Tp> operator/(const complex<_Tp>& __z, const complex<_Tp>& __w) { int __ilogbw = 0; _Tp __a = __z.real(); _Tp __b = __z.imag(); _Tp __c = __w.real(); _Tp __d = __w.imag(); _Tp __logbw = logb(fmax(fabs(__c), fabs(__d))); if (isfinite(__logbw)) { __ilogbw = static_cast<int>(__logbw); __c = scalbn(__c, -__ilogbw); __d = scalbn(__d, -__ilogbw); } _Tp __denom = __c * __c + __d * __d; _Tp __x = scalbn((__a * __c + __b * __d) / __denom, -__ilogbw); _Tp __y = scalbn((__b * __c - __a * __d) / __denom, -__ilogbw); if (isnan(__x) && isnan(__y)) { if ((__denom == _Tp(0)) && (!isnan(__a) || !isnan(__b))) { __x = copysign(_Tp(INFINITY), __c) * __a; __y = copysign(_Tp(INFINITY), __c) * __b; } else if ((isinf(__a) || isinf(__b)) && isfinite(__c) && isfinite(__d)) { __a = copysign(isinf(__a) ? _Tp(1) : _Tp(0), __a); __b = copysign(isinf(__b) ? _Tp(1) : _Tp(0), __b); __x = _Tp(INFINITY) * (__a * __c + __b * __d); __y = _Tp(INFINITY) * (__b * __c - __a * __d); } else if ( isinf(__logbw) && __logbw > _Tp(0) && isfinite(__a) && isfinite(__b)) { __c = copysign(isinf(__c) ? _Tp(1) : _Tp(0), __c); __d = copysign(isinf(__d) ? _Tp(1) : _Tp(0), __d); __x = _Tp(0) * (__a * __c + __b * __d); __y = _Tp(0) * (__b * __c - __a * __d); } } return complex<_Tp>(__x, __y); } template <class _Tp> inline complex<_Tp> operator/(const complex<_Tp>& __x, const _Tp& __y) { return complex<_Tp>(__x.real() / __y, __x.imag() / __y); } template <class _Tp> inline complex<_Tp> operator/(const _Tp& __x, const complex<_Tp>& __y) { complex<_Tp> __t(__x); __t /= __y; return __t; } template <class _Tp> inline complex<_Tp> operator+(const complex<_Tp>& __x) { return __x; } template <class _Tp> inline complex<_Tp> operator-(const complex<_Tp>& __x) { return complex<_Tp>(-__x.real(), -__x.imag()); } template <class _Tp> inline constexpr bool operator==( const complex<_Tp>& __x, const complex<_Tp>& __y) { return __x.real() == __y.real() && __x.imag() == __y.imag(); } template <class _Tp> inline constexpr bool operator==(const complex<_Tp>& __x, const _Tp& __y) { return __x.real() == __y && __x.imag() == 0; } template <class _Tp> inline constexpr bool operator==(const _Tp& __x, const complex<_Tp>& __y) { return __x == __y.real() && 0 == __y.imag(); } template <class _Tp> inline constexpr bool operator!=( const complex<_Tp>& __x, const complex<_Tp>& __y) { return !(__x == __y); } template <class _Tp> inline constexpr bool operator!=(const complex<_Tp>& __x, const _Tp& __y) { return !(__x == __y); } template <class _Tp> inline constexpr bool operator!=(const _Tp& __x, const complex<_Tp>& __y) { return !(__x == __y); } template <class _Tp> inline constexpr bool operator&&( const complex<_Tp>& __x, const complex<_Tp>& __y) { return bool(__x) && bool(__y); } template <class _Tp> inline constexpr bool isnan(const complex<_Tp>& __x) { return isnan(__x.real()) || isnan(__x.imag()); } template <class _Tp> inline constexpr bool operator||( const complex<_Tp>& __x, const complex<_Tp>& __y) { return bool(__x) || bool(__y); } // 26.3.7 values: template < class _Tp, bool = is_integral<_Tp>::value, bool = is_floating_point<_Tp>::value> struct __libcpp_complex_overload_traits {}; // Integral Types template <class _Tp> struct __libcpp_complex_overload_traits<_Tp, true, false> { typedef double _ValueType; typedef complex<double> _ComplexType; }; // Floating point types template <class _Tp> struct __libcpp_complex_overload_traits<_Tp, false, true> { typedef _Tp _ValueType; typedef complex<_Tp> _ComplexType; }; // real template <class _Tp> inline constexpr _Tp real(const complex<_Tp>& __c) { return __c.real(); } template <class _Tp> inline constexpr typename __libcpp_complex_overload_traits<_Tp>::_ValueType real( _Tp __re) { return __re; } // imag template <class _Tp> inline constexpr _Tp imag(const complex<_Tp>& __c) { return __c.imag(); } template <class _Tp> inline constexpr typename __libcpp_complex_overload_traits<_Tp>::_ValueType imag( _Tp) { return 0; } // abs template <class _Tp> inline _Tp abs(const complex<_Tp>& __c) { return hypot(__c.real(), __c.imag()); } // arg template <class _Tp> inline _Tp arg(const complex<_Tp>& __c) { return atan2(__c.imag(), __c.real()); } template <class _Tp> inline typename enable_if< is_integral<_Tp>::value || is_same<_Tp, double>::value, double>::type arg(_Tp __re) { return atan2(0., __re); } template <class _Tp> inline typename enable_if<is_same<_Tp, float>::value, float>::type arg( _Tp __re) { return atan2f(0.F, __re); } } // namespace std namespace std { using ::isfinite; using ::isinf; using ::isnan; using ::signbit; using ::abs; using ::acos; using ::acosf; using ::asin; using ::asinf; using ::atan; using ::atan2; using ::atan2f; using ::atanf; using ::ceil; using ::ceilf; using ::cos; using ::cosf; using ::cosh; using ::coshf; using ::exp; using ::expf; using ::fabs; using ::fabsf; using ::floor; using ::floorf; using ::fmod; using ::fmodf; using ::frexp; using ::frexpf; using ::ldexp; using ::ldexpf; using ::log; using ::logf; using ::log10; using ::log10f; using ::modf; using ::modff; using ::pow; using ::powf; using ::sin; using ::sinf; using ::sinh; using ::sinhf; using ::sqrt; using ::sqrtf; using ::tan; using ::tanf; using ::tanh; using ::tanhf; using ::acosh; using ::acoshf; using ::asinh; using ::asinhf; using ::atanh; using ::atanhf; using ::cbrt; using ::cbrtf; using ::copysign; using ::copysignf; using ::erf; using ::erfc; using ::erfcf; using ::erff; using ::exp2; using ::exp2f; using ::expm1; using ::expm1f; using ::fdim; using ::fdimf; using ::fma; using ::fmaf; using ::fmax; using ::fmaxf; using ::fmin; using ::fminf; using ::hypot; using ::hypotf; using ::ilogb; using ::ilogbf; using ::lgamma; using ::lgammaf; using ::llrint; using ::llrintf; using ::llround; using ::llroundf; using ::log1p; using ::log1pf; using ::log2; using ::log2f; using ::logb; using ::logbf; using ::lrint; using ::lrintf; using ::lround; using ::lroundf; using ::nan; using ::nanf; using ::nearbyint; using ::nearbyintf; using ::nextafter; using ::nextafterf; using ::remainder; using ::remainderf; using ::remquo; using ::remquof; using ::rint; using ::rintf; using ::round; using ::roundf; using ::scalbln; using ::scalblnf; using ::scalbn; using ::scalbnf; using ::tgamma; using ::tgammaf; using ::trunc; using ::truncf; } // namespace std namespace std { // norm template <class _Tp> inline _Tp norm(const complex<_Tp>& __c) { if (isinf(__c.real())) return abs(__c.real()); if (isinf(__c.imag())) return abs(__c.imag()); return __c.real() * __c.real() + __c.imag() * __c.imag(); } template <class _Tp> inline typename __libcpp_complex_overload_traits<_Tp>::_ValueType norm( _Tp __re) { typedef typename __libcpp_complex_overload_traits<_Tp>::_ValueType _ValueType; return static_cast<_ValueType>(__re) * __re; } // conj template <class _Tp> inline complex<_Tp> conj(const complex<_Tp>& __c) { return complex<_Tp>(__c.real(), -__c.imag()); } template <class _Tp> inline typename __libcpp_complex_overload_traits<_Tp>::_ComplexType conj( _Tp __re) { typedef typename __libcpp_complex_overload_traits<_Tp>::_ComplexType _ComplexType; return _ComplexType(__re); } // proj template <class _Tp> inline complex<_Tp> proj(const complex<_Tp>& __c) { complex<_Tp> __r = __c; if (isinf(__c.real()) || isinf(__c.imag())) __r = complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag())); return __r; } template <class _Tp> inline typename enable_if< is_floating_point<_Tp>::value, typename __libcpp_complex_overload_traits<_Tp>::_ComplexType>::type proj(_Tp __re) { if (isinf(__re)) __re = abs(__re); return complex<_Tp>(__re); } template <class _Tp> inline typename enable_if< is_integral<_Tp>::value, typename __libcpp_complex_overload_traits<_Tp>::_ComplexType>::type proj(_Tp __re) { typedef typename __libcpp_complex_overload_traits<_Tp>::_ComplexType _ComplexType; return _ComplexType(__re); } // polar template <class _Tp> complex<_Tp> polar(const _Tp& __rho, const _Tp& __theta = _Tp()) { if (isnan(__rho) || signbit(__rho)) return complex<_Tp>(_Tp(NAN), _Tp(NAN)); if (isnan(__theta)) { if (isinf(__rho)) return complex<_Tp>(__rho, __theta); return complex<_Tp>(__theta, __theta); } if (isinf(__theta)) { if (isinf(__rho)) return complex<_Tp>(__rho, _Tp(NAN)); return complex<_Tp>(_Tp(NAN), _Tp(NAN)); } _Tp __x = __rho * cos(__theta); if (isnan(__x)) __x = 0; _Tp __y = __rho * sin(__theta); if (isnan(__y)) __y = 0; return complex<_Tp>(__x, __y); } // log template <class _Tp> inline complex<_Tp> log(const complex<_Tp>& __x) { return complex<_Tp>(log(abs(__x)), arg(__x)); } // log10 template <class _Tp> inline complex<_Tp> log10(const complex<_Tp>& __x) { return log(__x) / log(_Tp(10)); } // log2 template <class _Tp> inline complex<_Tp> log2(const complex<_Tp>& __x) { return log(__x) / log(_Tp(2)); } // sqrt template <class _Tp> complex<_Tp> sqrt(const complex<_Tp>& __x) { if (isinf(__x.imag())) return complex<_Tp>(_Tp(INFINITY), __x.imag()); if (isinf(__x.real())) { if (__x.real() > _Tp(0)) return complex<_Tp>( __x.real(), isnan(__x.imag()) ? __x.imag() : copysign(_Tp(0), __x.imag())); return complex<_Tp>( isnan(__x.imag()) ? __x.imag() : _Tp(0), copysign(__x.real(), __x.imag())); } return polar(sqrt(abs(__x)), arg(__x) / _Tp(2)); } // exp template <class _Tp> complex<_Tp> exp(const complex<_Tp>& __x) { _Tp __i = __x.imag(); if (__i == 0) { return complex<_Tp>(exp(__x.real()), copysign(_Tp(0), __x.imag())); } if (isinf(__x.real())) { if (__x.real() < _Tp(0)) { if (!isfinite(__i)) __i = _Tp(1); } else if (__i == 0 || !isfinite(__i)) { if (isinf(__i)) __i = _Tp(NAN); return complex<_Tp>(__x.real(), __i); } } _Tp __e = exp(__x.real()); return complex<_Tp>(__e * cos(__i), __e * sin(__i)); } // pow template <class _Tp> inline complex<_Tp> pow(const complex<_Tp>& __x, const complex<_Tp>& __y) { return exp(__y * log(__x)); } template <class _Tp, class _Up> inline complex<typename __promote<_Tp, _Up>::type> pow( const complex<_Tp>& __x, const complex<_Up>& __y) { typedef complex<typename __promote<_Tp, _Up>::type> result_type; return std::pow(result_type(__x), result_type(__y)); } template <class _Tp, class _Up> inline typename enable_if< is_arithmetic<_Up>::value, complex<typename __promote<_Tp, _Up>::type>>::type pow(const complex<_Tp>& __x, const _Up& __y) { typedef complex<typename __promote<_Tp, _Up>::type> result_type; return std::pow(result_type(__x), result_type(__y)); } template <class _Tp, class _Up> inline typename enable_if< is_arithmetic<_Tp>::value, complex<typename __promote<_Tp, _Up>::type>>::type pow(const _Tp& __x, const complex<_Up>& __y) { typedef complex<typename __promote<_Tp, _Up>::type> result_type; return std::pow(result_type(__x), result_type(__y)); } // __sqr, computes pow(x, 2) template <class _Tp> inline complex<_Tp> __sqr(const complex<_Tp>& __x) { return complex<_Tp>( (__x.real() - __x.imag()) * (__x.real() + __x.imag()), _Tp(2) * __x.real() * __x.imag()); } // asinh template <class _Tp> complex<_Tp> asinh(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return __x; if (isinf(__x.imag())) return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag())); return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(__x.imag(), __x.real()); if (__x.imag() == 0) return __x; return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>( copysign(__x.imag(), __x.real()), copysign(__pi / _Tp(2), __x.imag())); complex<_Tp> __z = log(__x + sqrt(__sqr(__x) + _Tp(1))); return complex<_Tp>( copysign(__z.real(), __x.real()), copysign(__z.imag(), __x.imag())); } // acosh template <class _Tp> complex<_Tp> acosh(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return complex<_Tp>(abs(__x.real()), __x.imag()); if (isinf(__x.imag())) { if (__x.real() > 0) return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag())); else return complex<_Tp>( -__x.real(), copysign(__pi * _Tp(0.75), __x.imag())); } if (__x.real() < 0) return complex<_Tp>(-__x.real(), copysign(__pi, __x.imag())); return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag())); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(abs(__x.imag()), __x.real()); return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>(abs(__x.imag()), copysign(__pi / _Tp(2), __x.imag())); complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1))); return complex<_Tp>( copysign(__z.real(), _Tp(0)), copysign(__z.imag(), __x.imag())); } // atanh template <class _Tp> complex<_Tp> atanh(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.imag())) { return complex<_Tp>( copysign(_Tp(0), __x.real()), copysign(__pi / _Tp(2), __x.imag())); } if (isnan(__x.imag())) { if (isinf(__x.real()) || __x.real() == 0) return complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag()); return complex<_Tp>(__x.imag(), __x.imag()); } if (isnan(__x.real())) { return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.real())) { return complex<_Tp>( copysign(_Tp(0), __x.real()), copysign(__pi / _Tp(2), __x.imag())); } if (abs(__x.real()) == _Tp(1) && __x.imag() == _Tp(0)) { return complex<_Tp>( copysign(_Tp(INFINITY), __x.real()), copysign(_Tp(0), __x.imag())); } complex<_Tp> __z = log((_Tp(1) + __x) / (_Tp(1) - __x)) / _Tp(2); return complex<_Tp>( copysign(__z.real(), __x.real()), copysign(__z.imag(), __x.imag())); } // sinh template <class _Tp> complex<_Tp> sinh(const complex<_Tp>& __x) { if (isinf(__x.real()) && !isfinite(__x.imag())) return complex<_Tp>(__x.real(), _Tp(NAN)); if (__x.real() == 0 && !isfinite(__x.imag())) return complex<_Tp>(__x.real(), _Tp(NAN)); if (__x.imag() == 0 && !isfinite(__x.real())) return __x; return complex<_Tp>( sinh(__x.real()) * cos(__x.imag()), cosh(__x.real()) * sin(__x.imag())); } // cosh template <class _Tp> complex<_Tp> cosh(const complex<_Tp>& __x) { if (isinf(__x.real()) && !isfinite(__x.imag())) return complex<_Tp>(abs(__x.real()), _Tp(NAN)); if (__x.real() == 0 && !isfinite(__x.imag())) return complex<_Tp>(_Tp(NAN), __x.real()); if (__x.real() == 0 && __x.imag() == 0) return complex<_Tp>(_Tp(1), __x.imag()); if (__x.imag() == 0 && !isfinite(__x.real())) return complex<_Tp>(abs(__x.real()), __x.imag()); return complex<_Tp>( cosh(__x.real()) * cos(__x.imag()), sinh(__x.real()) * sin(__x.imag())); } // tanh template <class _Tp> complex<_Tp> tanh(const complex<_Tp>& __x) { if (isinf(__x.real())) { if (!isfinite(__x.imag())) return complex<_Tp>(copysign(_Tp(1), __x.real()), _Tp(0)); return complex<_Tp>( copysign(_Tp(1), __x.real()), copysign(_Tp(0), sin(_Tp(2) * __x.imag()))); } if (isnan(__x.real()) && __x.imag() == 0) return __x; _Tp __2r(_Tp(2) * __x.real()); _Tp __2i(_Tp(2) * __x.imag()); _Tp __d(cosh(__2r) + cos(__2i)); _Tp __2rsh(sinh(__2r)); if (isinf(__2rsh) && isinf(__d)) return complex<_Tp>( __2rsh > _Tp(0) ? _Tp(1) : _Tp(-1), __2i > _Tp(0) ? _Tp(0) : _Tp(-0.)); return complex<_Tp>(__2rsh / __d, sin(__2i) / __d); } // asin template <class _Tp> complex<_Tp> asin(const complex<_Tp>& __x) { complex<_Tp> __z = asinh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // acos template <class _Tp> complex<_Tp> acos(const complex<_Tp>& __x) { const _Tp __pi(atan2(+0., -0.)); if (isinf(__x.real())) { if (isnan(__x.imag())) return complex<_Tp>(__x.imag(), __x.real()); if (isinf(__x.imag())) { if (__x.real() < _Tp(0)) return complex<_Tp>(_Tp(0.75) * __pi, -__x.imag()); return complex<_Tp>(_Tp(0.25) * __pi, -__x.imag()); } if (__x.real() < _Tp(0)) return complex<_Tp>(__pi, signbit(__x.imag()) ? -__x.real() : __x.real()); return complex<_Tp>(_Tp(0), signbit(__x.imag()) ? __x.real() : -__x.real()); } if (isnan(__x.real())) { if (isinf(__x.imag())) return complex<_Tp>(__x.real(), -__x.imag()); return complex<_Tp>(__x.real(), __x.real()); } if (isinf(__x.imag())) return complex<_Tp>(__pi / _Tp(2), -__x.imag()); if (__x.real() == 0 && (__x.imag() == 0 || isnan(__x.imag()))) return complex<_Tp>(__pi / _Tp(2), -__x.imag()); complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1))); if (signbit(__x.imag())) return complex<_Tp>(abs(__z.imag()), abs(__z.real())); return complex<_Tp>(abs(__z.imag()), -abs(__z.real())); } // atan template <class _Tp> complex<_Tp> atan(const complex<_Tp>& __x) { complex<_Tp> __z = atanh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // sin template <class _Tp> complex<_Tp> sin(const complex<_Tp>& __x) { complex<_Tp> __z = sinh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // cos template <class _Tp> inline complex<_Tp> cos(const complex<_Tp>& __x) { return cosh(complex<_Tp>(-__x.imag(), __x.real())); } // tan template <class _Tp> complex<_Tp> tan(const complex<_Tp>& __x) { complex<_Tp> __z = tanh(complex<_Tp>(-__x.imag(), __x.real())); return complex<_Tp>(__z.imag(), -__z.real()); } // Literal suffix for complex number literals [complex.literals] inline namespace literals { inline namespace complex_literals { constexpr complex<double> operator""i(long double __im) { return {0.0, static_cast<double>(__im)}; } constexpr complex<double> operator""i(unsigned long long __im) { return {0.0, static_cast<double>(__im)}; } constexpr complex<float> operator""if(long double __im) { return {0.0f, static_cast<float>(__im)}; } constexpr complex<float> operator""if(unsigned long long __im) { return {0.0f, static_cast<float>(__im)}; } } // namespace complex_literals } // namespace literals } // namespace std __device__ std::complex<double> lerp( std::complex<double> start, std::complex<double> end, std::complex<double> weight) { if (abs(weight) < 0.5) { return start + weight * (end - start); } else { return end - (end - start) * (1.0 - weight); } } __device__ std::complex<float> lerp( std::complex<float> start, std::complex<float> end, std::complex<float> weight) { if (abs(weight) < 0.5f) { return start + weight * (end - start); } else { return end - (end - start) * (1.0f - weight); } } __device__ std::complex<double> reciprocal(std::complex<double> x) { return 1.0 / x; } __device__ std::complex<float> reciprocal(std::complex<float> x) { return 1.0f / x; } __device__ std::complex<double> sigmoid(std::complex<double> x) { return 1.0 / (1.0 + exp(-x)); } __device__ std::complex<float> sigmoid(std::complex<float> x) { return 1.0f / (1.0f + exp(-x)); } // The reciprocal of a complex number z is // 1/z = conj(z)/|z|^2. // The principal square root of a complex number z can be obtained by [1] // sqrt(z) = sqrt(|z|) (z + |z|) / |z + |z||. // Combining these formulas we have // 1/sqrt(z) = (conj(z) + |z|) / (sqrt(|z|) |z + |z||). // [1] https://math.stackexchange.com/a/44500 __device__ std::complex<float> rsqrt(std::complex<float> z) { auto a = std::real(z); auto b = std::imag(z); auto absa = ::fabsf(a); auto absb = ::fabsf(b); // scale to avoid precision loss due to underflow/overflow auto scale = fmax(absa, absb); a /= scale; b /= scale; auto a_sq = a * a; auto b_sq = b * b; auto modz_sq = a_sq + b_sq; auto modz = ::sqrtf(modz_sq); auto a_plus_modz = a + modz; auto mod_zplusmodz_sq = a_plus_modz * a_plus_modz + b_sq; auto fac = ::rsqrtf(scale * modz * mod_zplusmodz_sq); return std::complex<float>(a_plus_modz * fac, -b * fac); } __device__ std::complex<double> rsqrt(std::complex<double> z) { auto a = std::real(z); auto b = std::imag(z); auto absa = ::abs(a); auto absb = ::abs(b); // scale to avoid precision loss due to underflow/overflow auto scale = fmax(absa, absb); a /= scale; b /= scale; auto a_sq = a * a; auto b_sq = b * b; auto modz_sq = a_sq + b_sq; auto modz = ::sqrt(modz_sq); auto a_plus_modz = a + modz; auto mod_zplusmodz_sq = a_plus_modz * a_plus_modz + b_sq; auto fac = ::rsqrt(scale * modz * mod_zplusmodz_sq); return std::complex<double>(a_plus_modz * fac, -b * fac); } template <typename T> bool isfinite(std::complex<T> x) { return ::isfinite(std::real(x)) && ::isfinite(std::imag(x)); } template <typename T> bool isinf(std::complex<T> x) { return ::isinf(std::real(x)) || ::isinf(std::imag(x)); } template <typename T> bool isreal(std::complex<T> x) { return std::imag(x) == 0; } #endif // __NVCC__
422de9fd5aa05d45f1d8cd232105b9d99282822e.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<stdlib.h> #include <hip/hip_runtime.h> #include "rocblas.h" //declare matrix void print(double *C, int size ); int main(int argc, char const *argv[]) { //Declare number of streams int num_streams, N, size; //get values N = atoi(argv[1]); num_streams = atoi(argv[2]); //calculate size in bytes size = N * N * sizeof(double); // cuda create event hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //allocate CPU memory double *A, *B *C; hipHostMalloc((void **)&A, size,hipHostMallocDefault); hipHostMalloc((void **)&B, size,hipHostMallocDefault); hipHostMalloc((void **)&C, size,hipHostMallocDefault); //Allocate GPU memory double *d_A,*d_B, *d_C; hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C, size); //initialize A,B,C for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { A[i + j*N] = 0.5; B[i + j*N] = 0.25; C[i + j* N] = 0.0; } } // set alphars const double alpha = 1.0; const double beta = 0.0; //create handle hipblasHandle_t handle; hipblasCreate(&handle); //Create streams hipStream_t streams[num_streams]; //compute hipEventRecord(start, 0); for (int i = 0; i < num_streams; i++) { //printf("%g \n",0.0); hipStreamCreate(&streams[i]); hipMemcpyAsync(d_A , A ,size, hipMemcpyHostToDevice, streams[i]); hipMemcpyAsync(d_B , B ,size, hipMemcpyHostToDevice, streams[i]); // hipMemcpyAsync(d_C , C ,size, hipMemcpyHostToDevice, streams[i]); hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,N,N,N,&alpha,d_A,N,d_B,N,&beta,d_C,N); hipMemcpyAsync(C, d_C ,size, hipMemcpyDeviceToHost, streams[i]); hipStreamSynchronize (streams[i]); } hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); //Destroy handle hipblasDestroy(handle); //destory event hipEventDestroy(stop); hipEventDestroy(start); // destroy stream for (int i = 0; i < num_streams; ++i) { hipStreamDestroy(streams[i]); } // print elapsed time printf("%d %d% .6f %g \n",N, num_streams, elapsedTime / 1000.0f, num_streams * 2e-9 * N*N*N /(elapsedTime / 1000.0f)); //print(C,5); //Free CPU memory hipHostFree(A); hipHostFree(B); hipHostFree(C); //Freee GPU memory hipFree(d_A); hipFree(d_B); hipFree(d_C); return 0; } // print results of the array void print(double *C, int size ) { int i,j; for(i=0; i < size; i++) { for(j=0; j < size; j++) printf(" %d", C[size*i+j]); printf("\n"); } }
422de9fd5aa05d45f1d8cd232105b9d99282822e.cu
#include<stdio.h> #include<stdlib.h> #include <cuda_runtime.h> #include "cublas_v2.h" //declare matrix void print(double *C, int size ); int main(int argc, char const *argv[]) { //Declare number of streams int num_streams, N, size; //get values N = atoi(argv[1]); num_streams = atoi(argv[2]); //calculate size in bytes size = N * N * sizeof(double); // cuda create event cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //allocate CPU memory double *A, *B *C; cudaHostAlloc((void **)&A, size,cudaHostAllocDefault); cudaHostAlloc((void **)&B, size,cudaHostAllocDefault); cudaHostAlloc((void **)&C, size,cudaHostAllocDefault); //Allocate GPU memory double *d_A,*d_B, *d_C; cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); //initialize A,B,C for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { A[i + j*N] = 0.5; B[i + j*N] = 0.25; C[i + j* N] = 0.0; } } // set alphars const double alpha = 1.0; const double beta = 0.0; //create handle cublasHandle_t handle; cublasCreate(&handle); //Create streams cudaStream_t streams[num_streams]; //compute cudaEventRecord(start, 0); for (int i = 0; i < num_streams; i++) { //printf("%g \n",0.0); cudaStreamCreate(&streams[i]); cudaMemcpyAsync(d_A , A ,size, cudaMemcpyHostToDevice, streams[i]); cudaMemcpyAsync(d_B , B ,size, cudaMemcpyHostToDevice, streams[i]); // cudaMemcpyAsync(d_C , C ,size, cudaMemcpyHostToDevice, streams[i]); cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,N,N,N,&alpha,d_A,N,d_B,N,&beta,d_C,N); cudaMemcpyAsync(C, d_C ,size, cudaMemcpyDeviceToHost, streams[i]); cudaStreamSynchronize (streams[i]); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); //Destroy handle cublasDestroy(handle); //destory event cudaEventDestroy(stop); cudaEventDestroy(start); // destroy stream for (int i = 0; i < num_streams; ++i) { cudaStreamDestroy(streams[i]); } // print elapsed time printf("%d %d% .6f %g \n",N, num_streams, elapsedTime / 1000.0f, num_streams * 2e-9 * N*N*N /(elapsedTime / 1000.0f)); //print(C,5); //Free CPU memory cudaFreeHost(A); cudaFreeHost(B); cudaFreeHost(C); //Freee GPU memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; } // print results of the array void print(double *C, int size ) { int i,j; for(i=0; i < size; i++) { for(j=0; j < size; j++) printf(" %d", C[size*i+j]); printf("\n"); } }
dc243f13215261d21dcc2c878ea42b5ddfcf9393.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudaarithm.hpp" #include "opencv2/cudev.hpp" using namespace cv::cudev; namespace { template <typename T1, typename T2, typename D, typename S> struct AddWeightedOp : binary_function<T1, T2, D> { S alpha; S beta; S gamma; __device__ __forceinline__ D operator ()(T1 a, T2 b) const { return saturate_cast<D>(a * alpha + b * beta + gamma); } }; template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy { }; template <> struct TransformPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T1, typename T2, typename D> void addWeightedImpl(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream) { typedef typename LargerType<T1, T2>::type larger_type1; typedef typename LargerType<larger_type1, D>::type larger_type2; typedef typename LargerType<larger_type2, float>::type scalar_type; AddWeightedOp<T1, T2, D, scalar_type> op; op.alpha = static_cast<scalar_type>(alpha); op.beta = static_cast<scalar_type>(beta); op.gamma = static_cast<scalar_type>(gamma); gridTransformBinary_< TransformPolicy<scalar_type> >(globPtr<T1>(src1), globPtr<T2>(src2), globPtr<D>(dst), op, stream); } } void cv::cuda::addWeighted(InputArray _src1, double alpha, InputArray _src2, double beta, double gamma, OutputArray _dst, int ddepth, Stream& stream) { typedef void (*func_t)(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream); static const func_t funcs[7][7][7] = { { { addWeightedImpl<uchar, uchar, uchar >, addWeightedImpl<uchar, uchar, schar >, addWeightedImpl<uchar, uchar, ushort>, addWeightedImpl<uchar, uchar, short >, addWeightedImpl<uchar, uchar, int >, addWeightedImpl<uchar, uchar, float >, addWeightedImpl<uchar, uchar, double> }, { addWeightedImpl<uchar, schar, uchar >, addWeightedImpl<uchar, schar, schar >, addWeightedImpl<uchar, schar, ushort>, addWeightedImpl<uchar, schar, short >, addWeightedImpl<uchar, schar, int >, addWeightedImpl<uchar, schar, float >, addWeightedImpl<uchar, schar, double> }, { addWeightedImpl<uchar, ushort, uchar >, addWeightedImpl<uchar, ushort, schar >, addWeightedImpl<uchar, ushort, ushort>, addWeightedImpl<uchar, ushort, short >, addWeightedImpl<uchar, ushort, int >, addWeightedImpl<uchar, ushort, float >, addWeightedImpl<uchar, ushort, double> }, { addWeightedImpl<uchar, short, uchar >, addWeightedImpl<uchar, short, schar >, addWeightedImpl<uchar, short, ushort>, addWeightedImpl<uchar, short, short >, addWeightedImpl<uchar, short, int >, addWeightedImpl<uchar, short, float >, addWeightedImpl<uchar, short, double> }, { addWeightedImpl<uchar, int, uchar >, addWeightedImpl<uchar, int, schar >, addWeightedImpl<uchar, int, ushort>, addWeightedImpl<uchar, int, short >, addWeightedImpl<uchar, int, int >, addWeightedImpl<uchar, int, float >, addWeightedImpl<uchar, int, double> }, { addWeightedImpl<uchar, float, uchar >, addWeightedImpl<uchar, float, schar >, addWeightedImpl<uchar, float, ushort>, addWeightedImpl<uchar, float, short >, addWeightedImpl<uchar, float, int >, addWeightedImpl<uchar, float, float >, addWeightedImpl<uchar, float, double> }, { addWeightedImpl<uchar, double, uchar >, addWeightedImpl<uchar, double, schar >, addWeightedImpl<uchar, double, ushort>, addWeightedImpl<uchar, double, short >, addWeightedImpl<uchar, double, int >, addWeightedImpl<uchar, double, float >, addWeightedImpl<uchar, double, double> } }, { { 0/*addWeightedImpl<schar, uchar, uchar >*/, 0/*addWeightedImpl<schar, uchar, schar >*/, 0/*addWeightedImpl<schar, uchar, ushort>*/, 0/*addWeightedImpl<schar, uchar, short >*/, 0/*addWeightedImpl<schar, uchar, int >*/, 0/*addWeightedImpl<schar, uchar, float >*/, 0/*addWeightedImpl<schar, uchar, double>*/ }, { addWeightedImpl<schar, schar, uchar >, addWeightedImpl<schar, schar, schar >, addWeightedImpl<schar, schar, ushort>, addWeightedImpl<schar, schar, short >, addWeightedImpl<schar, schar, int >, addWeightedImpl<schar, schar, float >, addWeightedImpl<schar, schar, double> }, { addWeightedImpl<schar, ushort, uchar >, addWeightedImpl<schar, ushort, schar >, addWeightedImpl<schar, ushort, ushort>, addWeightedImpl<schar, ushort, short >, addWeightedImpl<schar, ushort, int >, addWeightedImpl<schar, ushort, float >, addWeightedImpl<schar, ushort, double> }, { addWeightedImpl<schar, short, uchar >, addWeightedImpl<schar, short, schar >, addWeightedImpl<schar, short, ushort>, addWeightedImpl<schar, short, short >, addWeightedImpl<schar, short, int >, addWeightedImpl<schar, short, float >, addWeightedImpl<schar, short, double> }, { addWeightedImpl<schar, int, uchar >, addWeightedImpl<schar, int, schar >, addWeightedImpl<schar, int, ushort>, addWeightedImpl<schar, int, short >, addWeightedImpl<schar, int, int >, addWeightedImpl<schar, int, float >, addWeightedImpl<schar, int, double> }, { addWeightedImpl<schar, float, uchar >, addWeightedImpl<schar, float, schar >, addWeightedImpl<schar, float, ushort>, addWeightedImpl<schar, float, short >, addWeightedImpl<schar, float, int >, addWeightedImpl<schar, float, float >, addWeightedImpl<schar, float, double> }, { addWeightedImpl<schar, double, uchar >, addWeightedImpl<schar, double, schar >, addWeightedImpl<schar, double, ushort>, addWeightedImpl<schar, double, short >, addWeightedImpl<schar, double, int >, addWeightedImpl<schar, double, float >, addWeightedImpl<schar, double, double> } }, { { 0/*addWeightedImpl<ushort, uchar, uchar >*/, 0/*addWeightedImpl<ushort, uchar, schar >*/, 0/*addWeightedImpl<ushort, uchar, ushort>*/, 0/*addWeightedImpl<ushort, uchar, short >*/, 0/*addWeightedImpl<ushort, uchar, int >*/, 0/*addWeightedImpl<ushort, uchar, float >*/, 0/*addWeightedImpl<ushort, uchar, double>*/ }, { 0/*addWeightedImpl<ushort, schar, uchar >*/, 0/*addWeightedImpl<ushort, schar, schar >*/, 0/*addWeightedImpl<ushort, schar, ushort>*/, 0/*addWeightedImpl<ushort, schar, short >*/, 0/*addWeightedImpl<ushort, schar, int >*/, 0/*addWeightedImpl<ushort, schar, float >*/, 0/*addWeightedImpl<ushort, schar, double>*/ }, { addWeightedImpl<ushort, ushort, uchar >, addWeightedImpl<ushort, ushort, schar >, addWeightedImpl<ushort, ushort, ushort>, addWeightedImpl<ushort, ushort, short >, addWeightedImpl<ushort, ushort, int >, addWeightedImpl<ushort, ushort, float >, addWeightedImpl<ushort, ushort, double> }, { addWeightedImpl<ushort, short, uchar >, addWeightedImpl<ushort, short, schar >, addWeightedImpl<ushort, short, ushort>, addWeightedImpl<ushort, short, short >, addWeightedImpl<ushort, short, int >, addWeightedImpl<ushort, short, float >, addWeightedImpl<ushort, short, double> }, { addWeightedImpl<ushort, int, uchar >, addWeightedImpl<ushort, int, schar >, addWeightedImpl<ushort, int, ushort>, addWeightedImpl<ushort, int, short >, addWeightedImpl<ushort, int, int >, addWeightedImpl<ushort, int, float >, addWeightedImpl<ushort, int, double> }, { addWeightedImpl<ushort, float, uchar >, addWeightedImpl<ushort, float, schar >, addWeightedImpl<ushort, float, ushort>, addWeightedImpl<ushort, float, short >, addWeightedImpl<ushort, float, int >, addWeightedImpl<ushort, float, float >, addWeightedImpl<ushort, float, double> }, { addWeightedImpl<ushort, double, uchar >, addWeightedImpl<ushort, double, schar >, addWeightedImpl<ushort, double, ushort>, addWeightedImpl<ushort, double, short >, addWeightedImpl<ushort, double, int >, addWeightedImpl<ushort, double, float >, addWeightedImpl<ushort, double, double> } }, { { 0/*addWeightedImpl<short, uchar, uchar >*/, 0/*addWeightedImpl<short, uchar, schar >*/, 0/*addWeightedImpl<short, uchar, ushort>*/, 0/*addWeightedImpl<short, uchar, short >*/, 0/*addWeightedImpl<short, uchar, int >*/, 0/*addWeightedImpl<short, uchar, float >*/, 0/*addWeightedImpl<short, uchar, double>*/ }, { 0/*addWeightedImpl<short, schar, uchar >*/, 0/*addWeightedImpl<short, schar, schar >*/, 0/*addWeightedImpl<short, schar, ushort>*/, 0/*addWeightedImpl<short, schar, short >*/, 0/*addWeightedImpl<short, schar, int >*/, 0/*addWeightedImpl<short, schar, float >*/, 0/*addWeightedImpl<short, schar, double>*/ }, { 0/*addWeightedImpl<short, ushort, uchar >*/, 0/*addWeightedImpl<short, ushort, schar >*/, 0/*addWeightedImpl<short, ushort, ushort>*/, 0/*addWeightedImpl<short, ushort, short >*/, 0/*addWeightedImpl<short, ushort, int >*/, 0/*addWeightedImpl<short, ushort, float >*/, 0/*addWeightedImpl<short, ushort, double>*/ }, { addWeightedImpl<short, short, uchar >, addWeightedImpl<short, short, schar >, addWeightedImpl<short, short, ushort>, addWeightedImpl<short, short, short >, addWeightedImpl<short, short, int >, addWeightedImpl<short, short, float >, addWeightedImpl<short, short, double> }, { addWeightedImpl<short, int, uchar >, addWeightedImpl<short, int, schar >, addWeightedImpl<short, int, ushort>, addWeightedImpl<short, int, short >, addWeightedImpl<short, int, int >, addWeightedImpl<short, int, float >, addWeightedImpl<short, int, double> }, { addWeightedImpl<short, float, uchar >, addWeightedImpl<short, float, schar >, addWeightedImpl<short, float, ushort>, addWeightedImpl<short, float, short >, addWeightedImpl<short, float, int >, addWeightedImpl<short, float, float >, addWeightedImpl<short, float, double> }, { addWeightedImpl<short, double, uchar >, addWeightedImpl<short, double, schar >, addWeightedImpl<short, double, ushort>, addWeightedImpl<short, double, short >, addWeightedImpl<short, double, int >, addWeightedImpl<short, double, float >, addWeightedImpl<short, double, double> } }, { { 0/*addWeightedImpl<int, uchar, uchar >*/, 0/*addWeightedImpl<int, uchar, schar >*/, 0/*addWeightedImpl<int, uchar, ushort>*/, 0/*addWeightedImpl<int, uchar, short >*/, 0/*addWeightedImpl<int, uchar, int >*/, 0/*addWeightedImpl<int, uchar, float >*/, 0/*addWeightedImpl<int, uchar, double>*/ }, { 0/*addWeightedImpl<int, schar, uchar >*/, 0/*addWeightedImpl<int, schar, schar >*/, 0/*addWeightedImpl<int, schar, ushort>*/, 0/*addWeightedImpl<int, schar, short >*/, 0/*addWeightedImpl<int, schar, int >*/, 0/*addWeightedImpl<int, schar, float >*/, 0/*addWeightedImpl<int, schar, double>*/ }, { 0/*addWeightedImpl<int, ushort, uchar >*/, 0/*addWeightedImpl<int, ushort, schar >*/, 0/*addWeightedImpl<int, ushort, ushort>*/, 0/*addWeightedImpl<int, ushort, short >*/, 0/*addWeightedImpl<int, ushort, int >*/, 0/*addWeightedImpl<int, ushort, float >*/, 0/*addWeightedImpl<int, ushort, double>*/ }, { 0/*addWeightedImpl<int, short, uchar >*/, 0/*addWeightedImpl<int, short, schar >*/, 0/*addWeightedImpl<int, short, ushort>*/, 0/*addWeightedImpl<int, short, short >*/, 0/*addWeightedImpl<int, short, int >*/, 0/*addWeightedImpl<int, short, float >*/, 0/*addWeightedImpl<int, short, double>*/ }, { addWeightedImpl<int, int, uchar >, addWeightedImpl<int, int, schar >, addWeightedImpl<int, int, ushort>, addWeightedImpl<int, int, short >, addWeightedImpl<int, int, int >, addWeightedImpl<int, int, float >, addWeightedImpl<int, int, double> }, { addWeightedImpl<int, float, uchar >, addWeightedImpl<int, float, schar >, addWeightedImpl<int, float, ushort>, addWeightedImpl<int, float, short >, addWeightedImpl<int, float, int >, addWeightedImpl<int, float, float >, addWeightedImpl<int, float, double> }, { addWeightedImpl<int, double, uchar >, addWeightedImpl<int, double, schar >, addWeightedImpl<int, double, ushort>, addWeightedImpl<int, double, short >, addWeightedImpl<int, double, int >, addWeightedImpl<int, double, float >, addWeightedImpl<int, double, double> } }, { { 0/*addWeightedImpl<float, uchar, uchar >*/, 0/*addWeightedImpl<float, uchar, schar >*/, 0/*addWeightedImpl<float, uchar, ushort>*/, 0/*addWeightedImpl<float, uchar, short >*/, 0/*addWeightedImpl<float, uchar, int >*/, 0/*addWeightedImpl<float, uchar, float >*/, 0/*addWeightedImpl<float, uchar, double>*/ }, { 0/*addWeightedImpl<float, schar, uchar >*/, 0/*addWeightedImpl<float, schar, schar >*/, 0/*addWeightedImpl<float, schar, ushort>*/, 0/*addWeightedImpl<float, schar, short >*/, 0/*addWeightedImpl<float, schar, int >*/, 0/*addWeightedImpl<float, schar, float >*/, 0/*addWeightedImpl<float, schar, double>*/ }, { 0/*addWeightedImpl<float, ushort, uchar >*/, 0/*addWeightedImpl<float, ushort, schar >*/, 0/*addWeightedImpl<float, ushort, ushort>*/, 0/*addWeightedImpl<float, ushort, short >*/, 0/*addWeightedImpl<float, ushort, int >*/, 0/*addWeightedImpl<float, ushort, float >*/, 0/*addWeightedImpl<float, ushort, double>*/ }, { 0/*addWeightedImpl<float, short, uchar >*/, 0/*addWeightedImpl<float, short, schar >*/, 0/*addWeightedImpl<float, short, ushort>*/, 0/*addWeightedImpl<float, short, short >*/, 0/*addWeightedImpl<float, short, int >*/, 0/*addWeightedImpl<float, short, float >*/, 0/*addWeightedImpl<float, short, double>*/ }, { 0/*addWeightedImpl<float, int, uchar >*/, 0/*addWeightedImpl<float, int, schar >*/, 0/*addWeightedImpl<float, int, ushort>*/, 0/*addWeightedImpl<float, int, short >*/, 0/*addWeightedImpl<float, int, int >*/, 0/*addWeightedImpl<float, int, float >*/, 0/*addWeightedImpl<float, int, double>*/ }, { addWeightedImpl<float, float, uchar >, addWeightedImpl<float, float, schar >, addWeightedImpl<float, float, ushort>, addWeightedImpl<float, float, short >, addWeightedImpl<float, float, int >, addWeightedImpl<float, float, float >, addWeightedImpl<float, float, double> }, { addWeightedImpl<float, double, uchar >, addWeightedImpl<float, double, schar >, addWeightedImpl<float, double, ushort>, addWeightedImpl<float, double, short >, addWeightedImpl<float, double, int >, addWeightedImpl<float, double, float >, addWeightedImpl<float, double, double> } }, { { 0/*addWeightedImpl<double, uchar, uchar >*/, 0/*addWeightedImpl<double, uchar, schar >*/, 0/*addWeightedImpl<double, uchar, ushort>*/, 0/*addWeightedImpl<double, uchar, short >*/, 0/*addWeightedImpl<double, uchar, int >*/, 0/*addWeightedImpl<double, uchar, float >*/, 0/*addWeightedImpl<double, uchar, double>*/ }, { 0/*addWeightedImpl<double, schar, uchar >*/, 0/*addWeightedImpl<double, schar, schar >*/, 0/*addWeightedImpl<double, schar, ushort>*/, 0/*addWeightedImpl<double, schar, short >*/, 0/*addWeightedImpl<double, schar, int >*/, 0/*addWeightedImpl<double, schar, float >*/, 0/*addWeightedImpl<double, schar, double>*/ }, { 0/*addWeightedImpl<double, ushort, uchar >*/, 0/*addWeightedImpl<double, ushort, schar >*/, 0/*addWeightedImpl<double, ushort, ushort>*/, 0/*addWeightedImpl<double, ushort, short >*/, 0/*addWeightedImpl<double, ushort, int >*/, 0/*addWeightedImpl<double, ushort, float >*/, 0/*addWeightedImpl<double, ushort, double>*/ }, { 0/*addWeightedImpl<double, short, uchar >*/, 0/*addWeightedImpl<double, short, schar >*/, 0/*addWeightedImpl<double, short, ushort>*/, 0/*addWeightedImpl<double, short, short >*/, 0/*addWeightedImpl<double, short, int >*/, 0/*addWeightedImpl<double, short, float >*/, 0/*addWeightedImpl<double, short, double>*/ }, { 0/*addWeightedImpl<double, int, uchar >*/, 0/*addWeightedImpl<double, int, schar >*/, 0/*addWeightedImpl<double, int, ushort>*/, 0/*addWeightedImpl<double, int, short >*/, 0/*addWeightedImpl<double, int, int >*/, 0/*addWeightedImpl<double, int, float >*/, 0/*addWeightedImpl<double, int, double>*/ }, { 0/*addWeightedImpl<double, float, uchar >*/, 0/*addWeightedImpl<double, float, schar >*/, 0/*addWeightedImpl<double, float, ushort>*/, 0/*addWeightedImpl<double, float, short >*/, 0/*addWeightedImpl<double, float, int >*/, 0/*addWeightedImpl<double, float, float >*/, 0/*addWeightedImpl<double, float, double>*/ }, { addWeightedImpl<double, double, uchar >, addWeightedImpl<double, double, schar >, addWeightedImpl<double, double, ushort>, addWeightedImpl<double, double, short >, addWeightedImpl<double, double, int >, addWeightedImpl<double, double, float >, addWeightedImpl<double, double, double> } } }; GpuMat src1 = _src1.getGpuMat(); GpuMat src2 = _src2.getGpuMat(); int sdepth1 = src1.depth(); int sdepth2 = src2.depth(); ddepth = ddepth >= 0 ? CV_MAT_DEPTH(ddepth) : ::max(sdepth1, sdepth2); const int cn = src1.channels(); CV_DbgAssert( src2.size() == src1.size() && src2.channels() == cn ); CV_DbgAssert( sdepth1 <= CV_64F && sdepth2 <= CV_64F && ddepth <= CV_64F ); _dst.create(src1.size(), CV_MAKE_TYPE(ddepth, cn)); GpuMat dst = _dst.getGpuMat(); GpuMat src1_ = src1.reshape(1); GpuMat src2_ = src2.reshape(1); GpuMat dst_ = dst.reshape(1); if (sdepth1 > sdepth2) { src1_.swap(src2_); std::swap(alpha, beta); std::swap(sdepth1, sdepth2); } const func_t func = funcs[sdepth1][sdepth2][ddepth]; if (!func) CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types"); func(src1_, alpha, src2_, beta, gamma, dst_, stream); } #endif
dc243f13215261d21dcc2c878ea42b5ddfcf9393.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include "opencv2/cudaarithm.hpp" #include "opencv2/cudev.hpp" using namespace cv::cudev; namespace { template <typename T1, typename T2, typename D, typename S> struct AddWeightedOp : binary_function<T1, T2, D> { S alpha; S beta; S gamma; __device__ __forceinline__ D operator ()(T1 a, T2 b) const { return saturate_cast<D>(a * alpha + b * beta + gamma); } }; template <typename ScalarDepth> struct TransformPolicy : DefaultTransformPolicy { }; template <> struct TransformPolicy<double> : DefaultTransformPolicy { enum { shift = 1 }; }; template <typename T1, typename T2, typename D> void addWeightedImpl(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream) { typedef typename LargerType<T1, T2>::type larger_type1; typedef typename LargerType<larger_type1, D>::type larger_type2; typedef typename LargerType<larger_type2, float>::type scalar_type; AddWeightedOp<T1, T2, D, scalar_type> op; op.alpha = static_cast<scalar_type>(alpha); op.beta = static_cast<scalar_type>(beta); op.gamma = static_cast<scalar_type>(gamma); gridTransformBinary_< TransformPolicy<scalar_type> >(globPtr<T1>(src1), globPtr<T2>(src2), globPtr<D>(dst), op, stream); } } void cv::cuda::addWeighted(InputArray _src1, double alpha, InputArray _src2, double beta, double gamma, OutputArray _dst, int ddepth, Stream& stream) { typedef void (*func_t)(const GpuMat& src1, double alpha, const GpuMat& src2, double beta, double gamma, GpuMat& dst, Stream& stream); static const func_t funcs[7][7][7] = { { { addWeightedImpl<uchar, uchar, uchar >, addWeightedImpl<uchar, uchar, schar >, addWeightedImpl<uchar, uchar, ushort>, addWeightedImpl<uchar, uchar, short >, addWeightedImpl<uchar, uchar, int >, addWeightedImpl<uchar, uchar, float >, addWeightedImpl<uchar, uchar, double> }, { addWeightedImpl<uchar, schar, uchar >, addWeightedImpl<uchar, schar, schar >, addWeightedImpl<uchar, schar, ushort>, addWeightedImpl<uchar, schar, short >, addWeightedImpl<uchar, schar, int >, addWeightedImpl<uchar, schar, float >, addWeightedImpl<uchar, schar, double> }, { addWeightedImpl<uchar, ushort, uchar >, addWeightedImpl<uchar, ushort, schar >, addWeightedImpl<uchar, ushort, ushort>, addWeightedImpl<uchar, ushort, short >, addWeightedImpl<uchar, ushort, int >, addWeightedImpl<uchar, ushort, float >, addWeightedImpl<uchar, ushort, double> }, { addWeightedImpl<uchar, short, uchar >, addWeightedImpl<uchar, short, schar >, addWeightedImpl<uchar, short, ushort>, addWeightedImpl<uchar, short, short >, addWeightedImpl<uchar, short, int >, addWeightedImpl<uchar, short, float >, addWeightedImpl<uchar, short, double> }, { addWeightedImpl<uchar, int, uchar >, addWeightedImpl<uchar, int, schar >, addWeightedImpl<uchar, int, ushort>, addWeightedImpl<uchar, int, short >, addWeightedImpl<uchar, int, int >, addWeightedImpl<uchar, int, float >, addWeightedImpl<uchar, int, double> }, { addWeightedImpl<uchar, float, uchar >, addWeightedImpl<uchar, float, schar >, addWeightedImpl<uchar, float, ushort>, addWeightedImpl<uchar, float, short >, addWeightedImpl<uchar, float, int >, addWeightedImpl<uchar, float, float >, addWeightedImpl<uchar, float, double> }, { addWeightedImpl<uchar, double, uchar >, addWeightedImpl<uchar, double, schar >, addWeightedImpl<uchar, double, ushort>, addWeightedImpl<uchar, double, short >, addWeightedImpl<uchar, double, int >, addWeightedImpl<uchar, double, float >, addWeightedImpl<uchar, double, double> } }, { { 0/*addWeightedImpl<schar, uchar, uchar >*/, 0/*addWeightedImpl<schar, uchar, schar >*/, 0/*addWeightedImpl<schar, uchar, ushort>*/, 0/*addWeightedImpl<schar, uchar, short >*/, 0/*addWeightedImpl<schar, uchar, int >*/, 0/*addWeightedImpl<schar, uchar, float >*/, 0/*addWeightedImpl<schar, uchar, double>*/ }, { addWeightedImpl<schar, schar, uchar >, addWeightedImpl<schar, schar, schar >, addWeightedImpl<schar, schar, ushort>, addWeightedImpl<schar, schar, short >, addWeightedImpl<schar, schar, int >, addWeightedImpl<schar, schar, float >, addWeightedImpl<schar, schar, double> }, { addWeightedImpl<schar, ushort, uchar >, addWeightedImpl<schar, ushort, schar >, addWeightedImpl<schar, ushort, ushort>, addWeightedImpl<schar, ushort, short >, addWeightedImpl<schar, ushort, int >, addWeightedImpl<schar, ushort, float >, addWeightedImpl<schar, ushort, double> }, { addWeightedImpl<schar, short, uchar >, addWeightedImpl<schar, short, schar >, addWeightedImpl<schar, short, ushort>, addWeightedImpl<schar, short, short >, addWeightedImpl<schar, short, int >, addWeightedImpl<schar, short, float >, addWeightedImpl<schar, short, double> }, { addWeightedImpl<schar, int, uchar >, addWeightedImpl<schar, int, schar >, addWeightedImpl<schar, int, ushort>, addWeightedImpl<schar, int, short >, addWeightedImpl<schar, int, int >, addWeightedImpl<schar, int, float >, addWeightedImpl<schar, int, double> }, { addWeightedImpl<schar, float, uchar >, addWeightedImpl<schar, float, schar >, addWeightedImpl<schar, float, ushort>, addWeightedImpl<schar, float, short >, addWeightedImpl<schar, float, int >, addWeightedImpl<schar, float, float >, addWeightedImpl<schar, float, double> }, { addWeightedImpl<schar, double, uchar >, addWeightedImpl<schar, double, schar >, addWeightedImpl<schar, double, ushort>, addWeightedImpl<schar, double, short >, addWeightedImpl<schar, double, int >, addWeightedImpl<schar, double, float >, addWeightedImpl<schar, double, double> } }, { { 0/*addWeightedImpl<ushort, uchar, uchar >*/, 0/*addWeightedImpl<ushort, uchar, schar >*/, 0/*addWeightedImpl<ushort, uchar, ushort>*/, 0/*addWeightedImpl<ushort, uchar, short >*/, 0/*addWeightedImpl<ushort, uchar, int >*/, 0/*addWeightedImpl<ushort, uchar, float >*/, 0/*addWeightedImpl<ushort, uchar, double>*/ }, { 0/*addWeightedImpl<ushort, schar, uchar >*/, 0/*addWeightedImpl<ushort, schar, schar >*/, 0/*addWeightedImpl<ushort, schar, ushort>*/, 0/*addWeightedImpl<ushort, schar, short >*/, 0/*addWeightedImpl<ushort, schar, int >*/, 0/*addWeightedImpl<ushort, schar, float >*/, 0/*addWeightedImpl<ushort, schar, double>*/ }, { addWeightedImpl<ushort, ushort, uchar >, addWeightedImpl<ushort, ushort, schar >, addWeightedImpl<ushort, ushort, ushort>, addWeightedImpl<ushort, ushort, short >, addWeightedImpl<ushort, ushort, int >, addWeightedImpl<ushort, ushort, float >, addWeightedImpl<ushort, ushort, double> }, { addWeightedImpl<ushort, short, uchar >, addWeightedImpl<ushort, short, schar >, addWeightedImpl<ushort, short, ushort>, addWeightedImpl<ushort, short, short >, addWeightedImpl<ushort, short, int >, addWeightedImpl<ushort, short, float >, addWeightedImpl<ushort, short, double> }, { addWeightedImpl<ushort, int, uchar >, addWeightedImpl<ushort, int, schar >, addWeightedImpl<ushort, int, ushort>, addWeightedImpl<ushort, int, short >, addWeightedImpl<ushort, int, int >, addWeightedImpl<ushort, int, float >, addWeightedImpl<ushort, int, double> }, { addWeightedImpl<ushort, float, uchar >, addWeightedImpl<ushort, float, schar >, addWeightedImpl<ushort, float, ushort>, addWeightedImpl<ushort, float, short >, addWeightedImpl<ushort, float, int >, addWeightedImpl<ushort, float, float >, addWeightedImpl<ushort, float, double> }, { addWeightedImpl<ushort, double, uchar >, addWeightedImpl<ushort, double, schar >, addWeightedImpl<ushort, double, ushort>, addWeightedImpl<ushort, double, short >, addWeightedImpl<ushort, double, int >, addWeightedImpl<ushort, double, float >, addWeightedImpl<ushort, double, double> } }, { { 0/*addWeightedImpl<short, uchar, uchar >*/, 0/*addWeightedImpl<short, uchar, schar >*/, 0/*addWeightedImpl<short, uchar, ushort>*/, 0/*addWeightedImpl<short, uchar, short >*/, 0/*addWeightedImpl<short, uchar, int >*/, 0/*addWeightedImpl<short, uchar, float >*/, 0/*addWeightedImpl<short, uchar, double>*/ }, { 0/*addWeightedImpl<short, schar, uchar >*/, 0/*addWeightedImpl<short, schar, schar >*/, 0/*addWeightedImpl<short, schar, ushort>*/, 0/*addWeightedImpl<short, schar, short >*/, 0/*addWeightedImpl<short, schar, int >*/, 0/*addWeightedImpl<short, schar, float >*/, 0/*addWeightedImpl<short, schar, double>*/ }, { 0/*addWeightedImpl<short, ushort, uchar >*/, 0/*addWeightedImpl<short, ushort, schar >*/, 0/*addWeightedImpl<short, ushort, ushort>*/, 0/*addWeightedImpl<short, ushort, short >*/, 0/*addWeightedImpl<short, ushort, int >*/, 0/*addWeightedImpl<short, ushort, float >*/, 0/*addWeightedImpl<short, ushort, double>*/ }, { addWeightedImpl<short, short, uchar >, addWeightedImpl<short, short, schar >, addWeightedImpl<short, short, ushort>, addWeightedImpl<short, short, short >, addWeightedImpl<short, short, int >, addWeightedImpl<short, short, float >, addWeightedImpl<short, short, double> }, { addWeightedImpl<short, int, uchar >, addWeightedImpl<short, int, schar >, addWeightedImpl<short, int, ushort>, addWeightedImpl<short, int, short >, addWeightedImpl<short, int, int >, addWeightedImpl<short, int, float >, addWeightedImpl<short, int, double> }, { addWeightedImpl<short, float, uchar >, addWeightedImpl<short, float, schar >, addWeightedImpl<short, float, ushort>, addWeightedImpl<short, float, short >, addWeightedImpl<short, float, int >, addWeightedImpl<short, float, float >, addWeightedImpl<short, float, double> }, { addWeightedImpl<short, double, uchar >, addWeightedImpl<short, double, schar >, addWeightedImpl<short, double, ushort>, addWeightedImpl<short, double, short >, addWeightedImpl<short, double, int >, addWeightedImpl<short, double, float >, addWeightedImpl<short, double, double> } }, { { 0/*addWeightedImpl<int, uchar, uchar >*/, 0/*addWeightedImpl<int, uchar, schar >*/, 0/*addWeightedImpl<int, uchar, ushort>*/, 0/*addWeightedImpl<int, uchar, short >*/, 0/*addWeightedImpl<int, uchar, int >*/, 0/*addWeightedImpl<int, uchar, float >*/, 0/*addWeightedImpl<int, uchar, double>*/ }, { 0/*addWeightedImpl<int, schar, uchar >*/, 0/*addWeightedImpl<int, schar, schar >*/, 0/*addWeightedImpl<int, schar, ushort>*/, 0/*addWeightedImpl<int, schar, short >*/, 0/*addWeightedImpl<int, schar, int >*/, 0/*addWeightedImpl<int, schar, float >*/, 0/*addWeightedImpl<int, schar, double>*/ }, { 0/*addWeightedImpl<int, ushort, uchar >*/, 0/*addWeightedImpl<int, ushort, schar >*/, 0/*addWeightedImpl<int, ushort, ushort>*/, 0/*addWeightedImpl<int, ushort, short >*/, 0/*addWeightedImpl<int, ushort, int >*/, 0/*addWeightedImpl<int, ushort, float >*/, 0/*addWeightedImpl<int, ushort, double>*/ }, { 0/*addWeightedImpl<int, short, uchar >*/, 0/*addWeightedImpl<int, short, schar >*/, 0/*addWeightedImpl<int, short, ushort>*/, 0/*addWeightedImpl<int, short, short >*/, 0/*addWeightedImpl<int, short, int >*/, 0/*addWeightedImpl<int, short, float >*/, 0/*addWeightedImpl<int, short, double>*/ }, { addWeightedImpl<int, int, uchar >, addWeightedImpl<int, int, schar >, addWeightedImpl<int, int, ushort>, addWeightedImpl<int, int, short >, addWeightedImpl<int, int, int >, addWeightedImpl<int, int, float >, addWeightedImpl<int, int, double> }, { addWeightedImpl<int, float, uchar >, addWeightedImpl<int, float, schar >, addWeightedImpl<int, float, ushort>, addWeightedImpl<int, float, short >, addWeightedImpl<int, float, int >, addWeightedImpl<int, float, float >, addWeightedImpl<int, float, double> }, { addWeightedImpl<int, double, uchar >, addWeightedImpl<int, double, schar >, addWeightedImpl<int, double, ushort>, addWeightedImpl<int, double, short >, addWeightedImpl<int, double, int >, addWeightedImpl<int, double, float >, addWeightedImpl<int, double, double> } }, { { 0/*addWeightedImpl<float, uchar, uchar >*/, 0/*addWeightedImpl<float, uchar, schar >*/, 0/*addWeightedImpl<float, uchar, ushort>*/, 0/*addWeightedImpl<float, uchar, short >*/, 0/*addWeightedImpl<float, uchar, int >*/, 0/*addWeightedImpl<float, uchar, float >*/, 0/*addWeightedImpl<float, uchar, double>*/ }, { 0/*addWeightedImpl<float, schar, uchar >*/, 0/*addWeightedImpl<float, schar, schar >*/, 0/*addWeightedImpl<float, schar, ushort>*/, 0/*addWeightedImpl<float, schar, short >*/, 0/*addWeightedImpl<float, schar, int >*/, 0/*addWeightedImpl<float, schar, float >*/, 0/*addWeightedImpl<float, schar, double>*/ }, { 0/*addWeightedImpl<float, ushort, uchar >*/, 0/*addWeightedImpl<float, ushort, schar >*/, 0/*addWeightedImpl<float, ushort, ushort>*/, 0/*addWeightedImpl<float, ushort, short >*/, 0/*addWeightedImpl<float, ushort, int >*/, 0/*addWeightedImpl<float, ushort, float >*/, 0/*addWeightedImpl<float, ushort, double>*/ }, { 0/*addWeightedImpl<float, short, uchar >*/, 0/*addWeightedImpl<float, short, schar >*/, 0/*addWeightedImpl<float, short, ushort>*/, 0/*addWeightedImpl<float, short, short >*/, 0/*addWeightedImpl<float, short, int >*/, 0/*addWeightedImpl<float, short, float >*/, 0/*addWeightedImpl<float, short, double>*/ }, { 0/*addWeightedImpl<float, int, uchar >*/, 0/*addWeightedImpl<float, int, schar >*/, 0/*addWeightedImpl<float, int, ushort>*/, 0/*addWeightedImpl<float, int, short >*/, 0/*addWeightedImpl<float, int, int >*/, 0/*addWeightedImpl<float, int, float >*/, 0/*addWeightedImpl<float, int, double>*/ }, { addWeightedImpl<float, float, uchar >, addWeightedImpl<float, float, schar >, addWeightedImpl<float, float, ushort>, addWeightedImpl<float, float, short >, addWeightedImpl<float, float, int >, addWeightedImpl<float, float, float >, addWeightedImpl<float, float, double> }, { addWeightedImpl<float, double, uchar >, addWeightedImpl<float, double, schar >, addWeightedImpl<float, double, ushort>, addWeightedImpl<float, double, short >, addWeightedImpl<float, double, int >, addWeightedImpl<float, double, float >, addWeightedImpl<float, double, double> } }, { { 0/*addWeightedImpl<double, uchar, uchar >*/, 0/*addWeightedImpl<double, uchar, schar >*/, 0/*addWeightedImpl<double, uchar, ushort>*/, 0/*addWeightedImpl<double, uchar, short >*/, 0/*addWeightedImpl<double, uchar, int >*/, 0/*addWeightedImpl<double, uchar, float >*/, 0/*addWeightedImpl<double, uchar, double>*/ }, { 0/*addWeightedImpl<double, schar, uchar >*/, 0/*addWeightedImpl<double, schar, schar >*/, 0/*addWeightedImpl<double, schar, ushort>*/, 0/*addWeightedImpl<double, schar, short >*/, 0/*addWeightedImpl<double, schar, int >*/, 0/*addWeightedImpl<double, schar, float >*/, 0/*addWeightedImpl<double, schar, double>*/ }, { 0/*addWeightedImpl<double, ushort, uchar >*/, 0/*addWeightedImpl<double, ushort, schar >*/, 0/*addWeightedImpl<double, ushort, ushort>*/, 0/*addWeightedImpl<double, ushort, short >*/, 0/*addWeightedImpl<double, ushort, int >*/, 0/*addWeightedImpl<double, ushort, float >*/, 0/*addWeightedImpl<double, ushort, double>*/ }, { 0/*addWeightedImpl<double, short, uchar >*/, 0/*addWeightedImpl<double, short, schar >*/, 0/*addWeightedImpl<double, short, ushort>*/, 0/*addWeightedImpl<double, short, short >*/, 0/*addWeightedImpl<double, short, int >*/, 0/*addWeightedImpl<double, short, float >*/, 0/*addWeightedImpl<double, short, double>*/ }, { 0/*addWeightedImpl<double, int, uchar >*/, 0/*addWeightedImpl<double, int, schar >*/, 0/*addWeightedImpl<double, int, ushort>*/, 0/*addWeightedImpl<double, int, short >*/, 0/*addWeightedImpl<double, int, int >*/, 0/*addWeightedImpl<double, int, float >*/, 0/*addWeightedImpl<double, int, double>*/ }, { 0/*addWeightedImpl<double, float, uchar >*/, 0/*addWeightedImpl<double, float, schar >*/, 0/*addWeightedImpl<double, float, ushort>*/, 0/*addWeightedImpl<double, float, short >*/, 0/*addWeightedImpl<double, float, int >*/, 0/*addWeightedImpl<double, float, float >*/, 0/*addWeightedImpl<double, float, double>*/ }, { addWeightedImpl<double, double, uchar >, addWeightedImpl<double, double, schar >, addWeightedImpl<double, double, ushort>, addWeightedImpl<double, double, short >, addWeightedImpl<double, double, int >, addWeightedImpl<double, double, float >, addWeightedImpl<double, double, double> } } }; GpuMat src1 = _src1.getGpuMat(); GpuMat src2 = _src2.getGpuMat(); int sdepth1 = src1.depth(); int sdepth2 = src2.depth(); ddepth = ddepth >= 0 ? CV_MAT_DEPTH(ddepth) : std::max(sdepth1, sdepth2); const int cn = src1.channels(); CV_DbgAssert( src2.size() == src1.size() && src2.channels() == cn ); CV_DbgAssert( sdepth1 <= CV_64F && sdepth2 <= CV_64F && ddepth <= CV_64F ); _dst.create(src1.size(), CV_MAKE_TYPE(ddepth, cn)); GpuMat dst = _dst.getGpuMat(); GpuMat src1_ = src1.reshape(1); GpuMat src2_ = src2.reshape(1); GpuMat dst_ = dst.reshape(1); if (sdepth1 > sdepth2) { src1_.swap(src2_); std::swap(alpha, beta); std::swap(sdepth1, sdepth2); } const func_t func = funcs[sdepth1][sdepth2][ddepth]; if (!func) CV_Error(cv::Error::StsUnsupportedFormat, "Unsupported combination of source and destination types"); func(src1_, alpha, src2_, beta, gamma, dst_, stream); } #endif
29033e8bfa0667c5d1572788552801a97085f965.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "../benchapi.h" #define FLT_MAX 3.40282347e+38 typedef struct { int *d_membership; float *d_clusters; float *d_features; float *d_features_flipped; } kmeans_conf_t; // t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]... texture<float, 1, hipReadModeElementType> t_features; // t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1] texture<float, 1, hipReadModeElementType> t_features_flipped; texture<float, 1, hipReadModeElementType> t_clusters; #define ASSUMED_NR_CLUSTERS 32 /* constant memory for cluster centers */ __constant__ float c_clusters[ASSUMED_NR_CLUSTERS * 34]; __device__ static void kmeansPoints(float *features, int nfeatures, int npoints, int nclusters, int niters, int *membership, float *clusters) { const unsigned block_id = get_gridDimX() * get_blockIdxY() + get_blockIdxX(); // point/thread ID const unsigned int point_id = block_id * get_blockDimX() * get_blockDimY() + get_threadIdxX(); int index = -1; int i, j; float min_dist = FLT_MAX; float dist; /* distance square between a point to cluster center */ for (int k = 0; k < niters; k++) { /* find the cluster center id with min distance to pt */ for (i = 0; i < nclusters; i++) { /* base index of cluster centers for inverted array */ int cluster_base_index = i * nfeatures; /* Euclidean distance sqaure */ float ans = 0.0; for (j = 0; j < nfeatures; j++) { /* appropriate index of data point */ int addr = point_id + j * npoints; /* distance between a data point to cluster centers */ float diff = (tex1Dfetch(t_features, addr) - c_clusters[cluster_base_index + j]); /* sum of squares */ ans += diff * diff; } dist = ans; /* see if distance is smaller than previous ones: * if so, change minimum distance and save index of cluster center */ if (dist < min_dist) { min_dist = dist; index = i; } } } if (point_id < npoints) { /* assign the membership to object point_id */ membership[point_id] = index; } } __device__ int kmeans(void *args[]) { int npoints_per_thread = (int)(long long)args[0]; int nclusters = (int)(long long)args[1]; int nfeatures = (int)(long long)args[2]; int niters = (int)(long long)args[3]; kmeans_conf_t *pkmc = (kmeans_conf_t *)args[4]; int npoints = npoints_per_thread * get_gridDimX() * get_gridDimY() * get_blockDimX() * get_blockDimY(); kmeansPoints(pkmc->d_features, nfeatures, npoints, nclusters, niters, pkmc->d_membership, pkmc->d_clusters); return 0; } static void init_membership(int npoints, kmeans_conf_t *pkmc) { int *membership; int i; membership = (int *)malloc(npoints * sizeof(int)); for (i = 0; i < npoints; i++) membership[i] = -1; hipMalloc(&pkmc->d_membership, npoints * sizeof(int)); hipMemcpy(pkmc->d_membership, membership, npoints * sizeof(int), hipMemcpyHostToDevice); free(membership); } static void invert_mapping(float *output, float *input, int npoints, int nfeatures) { int i; for (i = 0; i < npoints; i++) { int j; for (j = 0; j < nfeatures; j++) output[i + npoints * j] = input[i * nfeatures + j]; } } static float * setup_features(kmeans_conf_t *pkmc, int npoints, int nfeatures) { float *features, *features_inverted; int i; features = (float *)malloc(npoints * nfeatures * sizeof(float)); for (i = 0; i < npoints * nfeatures; i++) { features[i] = rand() / rand(); } features_inverted = (float *)malloc(npoints * nfeatures * sizeof(float)); invert_mapping(features_inverted, features, npoints, nfeatures); hipMalloc(&pkmc->d_features, npoints * nfeatures * sizeof(float)); hipMemcpy(pkmc->d_features, features_inverted, nfeatures * sizeof(float), hipMemcpyHostToDevice); hipMalloc(&pkmc->d_features_flipped, nfeatures * sizeof(float)); hipMemcpy(pkmc->d_features_flipped, features, nfeatures * sizeof(float), hipMemcpyHostToDevice); free(features_inverted); return features; } static void setup_clusters(kmeans_conf_t *pkmc, float *features, int npoints, int nclusters, int nfeatures) { float *clusters; int i; clusters = (float *)malloc(nclusters * nfeatures * sizeof(float)); hipMalloc(&pkmc->d_clusters, nclusters * nfeatures * sizeof(float)); /* copy clusters (host to device) */ hipMemcpy(pkmc->d_clusters, clusters, nclusters * nfeatures * sizeof(float), hipMemcpyHostToDevice); /* randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { int n = (int)rand() % npoints; int j; for (j = 0; j < nfeatures; j++) { clusters[i * nfeatures + j] = features[n * nfeatures + j]; // remapped } } free(clusters); } static int cookarg_kmeans(dim3 dimGrid, dim3 dimBlock, void *args[]) { int npoints_per_thread = (int)(long long)args[0]; int nclusters = (int)(long long)args[1]; int nfeatures = (int)(long long)args[2]; int npoints = npoints_per_thread * dimGrid.x * dimGrid.y * dimBlock.x * dimBlock.y; kmeans_conf_t kmc, *d_pkmc; float *features; init_membership(npoints, &kmc); features = setup_features(&kmc, npoints, nfeatures); setup_clusters(&kmc, features, npoints, nclusters, nfeatures); /* set up texture */ hipChannelFormatDesc chDesc0 = hipCreateChannelDesc<float>(); t_features.filterMode = hipFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; hipBindTexture(NULL, &t_features, kmc.d_features, &chDesc0, npoints * nfeatures * sizeof(float)); hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<float>(); t_features_flipped.filterMode = hipFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; hipBindTexture(NULL, &t_features_flipped, kmc.d_features_flipped, &chDesc1, npoints * nfeatures * sizeof(float)); hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<float>(); t_clusters.filterMode = hipFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; hipBindTexture(NULL, &t_clusters, kmc.d_clusters, &chDesc2, nclusters * nfeatures * sizeof(float)); free(features); hipMalloc(&d_pkmc, sizeof(kmeans_conf_t)); hipMemcpy(d_pkmc, &kmc, sizeof(kmeans_conf_t), hipMemcpyHostToDevice); args[4] = d_pkmc; return 0; } int bench_kmeans(dim3 dimGrid, dim3 dimBlock, void *args[]) { vstream_t strm; sk_t sk; int res; cookarg_kmeans(dimGrid, dimBlock, args); strm = create_vstream(); sk = launch_kernel(KMEANS, strm, dimGrid, dimBlock, args); wait_kernel(sk, strm, &res); destroy_vstream(strm); mtbs_cudaFree(args[4]); return res; }
29033e8bfa0667c5d1572788552801a97085f965.cu
#include <stdio.h> #include "../benchapi.h" #define FLT_MAX 3.40282347e+38 typedef struct { int *d_membership; float *d_clusters; float *d_features; float *d_features_flipped; } kmeans_conf_t; // t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]... texture<float, 1, cudaReadModeElementType> t_features; // t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1] texture<float, 1, cudaReadModeElementType> t_features_flipped; texture<float, 1, cudaReadModeElementType> t_clusters; #define ASSUMED_NR_CLUSTERS 32 /* constant memory for cluster centers */ __constant__ float c_clusters[ASSUMED_NR_CLUSTERS * 34]; __device__ static void kmeansPoints(float *features, int nfeatures, int npoints, int nclusters, int niters, int *membership, float *clusters) { const unsigned block_id = get_gridDimX() * get_blockIdxY() + get_blockIdxX(); // point/thread ID const unsigned int point_id = block_id * get_blockDimX() * get_blockDimY() + get_threadIdxX(); int index = -1; int i, j; float min_dist = FLT_MAX; float dist; /* distance square between a point to cluster center */ for (int k = 0; k < niters; k++) { /* find the cluster center id with min distance to pt */ for (i = 0; i < nclusters; i++) { /* base index of cluster centers for inverted array */ int cluster_base_index = i * nfeatures; /* Euclidean distance sqaure */ float ans = 0.0; for (j = 0; j < nfeatures; j++) { /* appropriate index of data point */ int addr = point_id + j * npoints; /* distance between a data point to cluster centers */ float diff = (tex1Dfetch(t_features, addr) - c_clusters[cluster_base_index + j]); /* sum of squares */ ans += diff * diff; } dist = ans; /* see if distance is smaller than previous ones: * if so, change minimum distance and save index of cluster center */ if (dist < min_dist) { min_dist = dist; index = i; } } } if (point_id < npoints) { /* assign the membership to object point_id */ membership[point_id] = index; } } __device__ int kmeans(void *args[]) { int npoints_per_thread = (int)(long long)args[0]; int nclusters = (int)(long long)args[1]; int nfeatures = (int)(long long)args[2]; int niters = (int)(long long)args[3]; kmeans_conf_t *pkmc = (kmeans_conf_t *)args[4]; int npoints = npoints_per_thread * get_gridDimX() * get_gridDimY() * get_blockDimX() * get_blockDimY(); kmeansPoints(pkmc->d_features, nfeatures, npoints, nclusters, niters, pkmc->d_membership, pkmc->d_clusters); return 0; } static void init_membership(int npoints, kmeans_conf_t *pkmc) { int *membership; int i; membership = (int *)malloc(npoints * sizeof(int)); for (i = 0; i < npoints; i++) membership[i] = -1; cudaMalloc(&pkmc->d_membership, npoints * sizeof(int)); cudaMemcpy(pkmc->d_membership, membership, npoints * sizeof(int), cudaMemcpyHostToDevice); free(membership); } static void invert_mapping(float *output, float *input, int npoints, int nfeatures) { int i; for (i = 0; i < npoints; i++) { int j; for (j = 0; j < nfeatures; j++) output[i + npoints * j] = input[i * nfeatures + j]; } } static float * setup_features(kmeans_conf_t *pkmc, int npoints, int nfeatures) { float *features, *features_inverted; int i; features = (float *)malloc(npoints * nfeatures * sizeof(float)); for (i = 0; i < npoints * nfeatures; i++) { features[i] = rand() / rand(); } features_inverted = (float *)malloc(npoints * nfeatures * sizeof(float)); invert_mapping(features_inverted, features, npoints, nfeatures); cudaMalloc(&pkmc->d_features, npoints * nfeatures * sizeof(float)); cudaMemcpy(pkmc->d_features, features_inverted, nfeatures * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&pkmc->d_features_flipped, nfeatures * sizeof(float)); cudaMemcpy(pkmc->d_features_flipped, features, nfeatures * sizeof(float), cudaMemcpyHostToDevice); free(features_inverted); return features; } static void setup_clusters(kmeans_conf_t *pkmc, float *features, int npoints, int nclusters, int nfeatures) { float *clusters; int i; clusters = (float *)malloc(nclusters * nfeatures * sizeof(float)); cudaMalloc(&pkmc->d_clusters, nclusters * nfeatures * sizeof(float)); /* copy clusters (host to device) */ cudaMemcpy(pkmc->d_clusters, clusters, nclusters * nfeatures * sizeof(float), cudaMemcpyHostToDevice); /* randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { int n = (int)rand() % npoints; int j; for (j = 0; j < nfeatures; j++) { clusters[i * nfeatures + j] = features[n * nfeatures + j]; // remapped } } free(clusters); } static int cookarg_kmeans(dim3 dimGrid, dim3 dimBlock, void *args[]) { int npoints_per_thread = (int)(long long)args[0]; int nclusters = (int)(long long)args[1]; int nfeatures = (int)(long long)args[2]; int npoints = npoints_per_thread * dimGrid.x * dimGrid.y * dimBlock.x * dimBlock.y; kmeans_conf_t kmc, *d_pkmc; float *features; init_membership(npoints, &kmc); features = setup_features(&kmc, npoints, nfeatures); setup_clusters(&kmc, features, npoints, nclusters, nfeatures); /* set up texture */ cudaChannelFormatDesc chDesc0 = cudaCreateChannelDesc<float>(); t_features.filterMode = cudaFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; cudaBindTexture(NULL, &t_features, kmc.d_features, &chDesc0, npoints * nfeatures * sizeof(float)); cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<float>(); t_features_flipped.filterMode = cudaFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; cudaBindTexture(NULL, &t_features_flipped, kmc.d_features_flipped, &chDesc1, npoints * nfeatures * sizeof(float)); cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<float>(); t_clusters.filterMode = cudaFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; cudaBindTexture(NULL, &t_clusters, kmc.d_clusters, &chDesc2, nclusters * nfeatures * sizeof(float)); free(features); cudaMalloc(&d_pkmc, sizeof(kmeans_conf_t)); cudaMemcpy(d_pkmc, &kmc, sizeof(kmeans_conf_t), cudaMemcpyHostToDevice); args[4] = d_pkmc; return 0; } int bench_kmeans(dim3 dimGrid, dim3 dimBlock, void *args[]) { vstream_t strm; sk_t sk; int res; cookarg_kmeans(dimGrid, dimBlock, args); strm = create_vstream(); sk = launch_kernel(KMEANS, strm, dimGrid, dimBlock, args); wait_kernel(sk, strm, &res); destroy_vstream(strm); mtbs_cudaFree(args[4]); return res; }
ff920375bb7eb35c527e7882f04cc48208252705.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include "cudnnUtils.h" #include <ops/declarable/helpers/convolutions.h> namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// static void conv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { // cudnn support only two formats for weights {oC,iC,kH,kW} and {oC,kH,kW,iC} int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream()); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); // input descriptor cudnnTensorDescriptor_t x; cudnnCreateTensorDescriptor(&x); if(input->ews() == 1 && input->ordering() == 'c') err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err); // weights descriptor cudnnFilterDescriptor_t w; cudnnCreateFilterDescriptor(&w); err = cudnnSetFilter4dDescriptor(w, cudnnDataType(weights->dataType()), formatW, oC, iC, kH, kW); if(err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetFilter4dDescriptor failed", err); // output descriptor cudnnTensorDescriptor_t z; cudnnCreateTensorDescriptor(&z); if(output->ews() == 1 && output->ordering() == 'c') err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err); // description of convolution cudnnConvolutionDescriptor_t conv; cudnnCreateConvolutionDescriptor(&conv); err = cudnnSetConvolution2dDescriptor(conv, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetConvolution2dDescriptor failed", err); // algorithm description cudnnConvolutionFwdAlgo_t algo; cudnnConvolutionFwdAlgoPerf_t algoPerf; int count = 0; //err = cudnnGetConvolutionForwardAlgorithm(*handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo); err = cudnnFindConvolutionForwardAlgorithm(*handle, x, w, conv, z, 1, &count, &algoPerf); if (err != 0 || count == 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed", err); algo = algoPerf.algo; // allocate auxiliary device memory, abbreviation ws means workspace size_t wsSize; err = cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardWorkspaceSize failed", err); void* wsData; auto cudaErr = hipMalloc(&wsData, wsSize); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dCUDNN: hipMalloc for auxiliary workspace memory failed", cudaErr); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input, weights, bias}); // run calculation err = cudnnConvolutionForward(*handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnConvolutionForward failed", err); // add bias if it is present if (bias != nullptr) { cudnnTensorDescriptor_t b; cudnnCreateTensorDescriptor(&b); // err = cudnnSetTensor4dDescriptor(b, format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: bias->lengthOf()); err = cudnnSetTensor4dDescriptor(b, CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor for bias failed", err); err = cudnnAddTensor(*handle, alpha, b, bias->specialBuffer(), alpha, z, output->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnAddTensor bias failed", err); } // cudaErr = hipStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dCUDNN: hipStreamSynchronize failed !", cudaErr); cudaErr = hipFree(wsData); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dCUDNN: hipFree for auxiliary workspace memory failed", cudaErr); NDArray::registerSpecialUse({output}, {input, weights, bias}); } ////////////////////////////////////////////////////////////////////////// static void conv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: can't set stream for cuDNN", err); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); // input descriptor cudnnTensorDescriptor_t x; cudnnCreateTensorDescriptor(&x); if(input->ews() == 1 && input->ordering() == 'c') err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err); // gradO descriptor cudnnTensorDescriptor_t dz; cudnnCreateTensorDescriptor(&dz); if(gradO->ews() == 1 && gradO->ordering() == 'c') err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err); // gradI descriptor cudnnTensorDescriptor_t dx; cudnnCreateTensorDescriptor(&dx); if(gradI->ews() == 1 && gradI->ordering() == 'c') err = cudnnSetTensor4dDescriptor(dx, format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); else err = cudnnSetTensor4dDescriptorEx(dx, cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradI failed", err); // gradW descriptor cudnnFilterDescriptor_t dw; cudnnCreateFilterDescriptor(&dw); err = cudnnSetFilter4dDescriptor(dw, cudnnDataType(gradW->dataType()), formatW, oC, iC, kH, kW); if(err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetFilter4dDescriptor gradW failed", err); // description of convolution cudnnConvolutionDescriptor_t conv; cudnnCreateConvolutionDescriptor(&conv); err = cudnnSetConvolution2dDescriptor(conv, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetConvolution2dDescriptor failed", err); // gradW algorithm description cudnnConvolutionBwdFilterAlgo_t algoGradW; cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; int count = 0; //err = cudnnGetConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoGradW); err = cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf); if (err != 0 || count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed", err); algoGradW = algoGradWPerf.algo; // gradI algorithm description cudnnConvolutionBwdDataAlgo_t algoGradI; cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; //err = cudnnGetConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoGradI); err = cudnnFindConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, 1, &count, &algoGradIPerf); if (err != 0 || count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed", err); algoGradI = algoGradIPerf.algo; // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace size_t wsGradWSize; err = cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterWorkspaceSize failed", err); void* wsGradWData; auto cudaErr = hipMalloc(&wsGradWData, wsGradWSize); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: hipMalloc for auxiliary workspace memory wsGradWData failed", cudaErr); // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace size_t wsGradISize; err = cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataWorkspaceSize failed", err); void* wsGradIData; cudaErr = hipMalloc(&wsGradIData, wsGradISize); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: hipMalloc for auxiliary workspace memory wsGradIData failed", cudaErr); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); // run calculation for gradB (if not nullptr) if(gradB != nullptr) { cudnnTensorDescriptor_t db; cudnnCreateTensorDescriptor(&db); // err = cudnnSetTensor4dDescriptor(db, format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: gradB->lengthOf()); err = cudnnSetTensor4dDescriptor(db, CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor for gradB failed", err); err = cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardBias failed", err); } // run calculation for gradW err = cudnnConvolutionBackwardFilter(*handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardFilter failed", err); // run calculation for gradI err = cudnnConvolutionBackwardData(*handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardData failed", err); // cudaErr = hipStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dBpCUDNN: hipStreamSynchronize failed !", cudaErr); cudaErr = hipFree(wsGradWData); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: hipFree for auxiliary workspace memory wsGradWData failed", cudaErr); cudaErr = hipFree(wsGradIData); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: hipFree for auxiliary workspace memory wsGradIData failed", cudaErr); NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) { REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); REQUIRE_TRUE((bias->rankOf() == 1 && bias->strideAt(0) == 1) || (bias->rankOf() == 2 && bias->sizeAt(0) == 1 && bias->strideAt(1) == 1) || (bias->rankOf() == 2 && bias->sizeAt(1) == 1 && bias->strideAt(0) == 1), 0, "CUSTOM CONV2D CUDNN OP: bias array should be contiguous in memory !"); } NDArray* newWeights = weights; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { newWeights = new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext()); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } NDArray* newInput = input; NDArray* newGradI = nullptr; if(paddingMode == 1) // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings checkConv2dCUDNNPadAsymmetric(newInput, newGradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); conv2dCUDNN(block.launchContext(), newInput, newWeights, bias, output, kH,kW,sH,sW,pH,pW,dH,dW, paddingMode, isNCHW, wFormat); if(newInput != input) delete newInput; if(0 == wFormat) delete newWeights; return Status::OK(); } ////////////////////////////////////////////////////////////////////////// PLATFORM_CHECK(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); return paddingMode != 2 && !badInputType && !badWeightsType && !badBiasType; } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] int kH = INT_ARG(0); // filter(kernel) height int kW = INT_ARG(1); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); REQUIRE_TRUE(gradO->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of output's gradients (next epsilon) array must be equal to 4, but got %i instead !", gradO->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); int trueoH, trueoW; // true output height, width ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS,oC,trueoH,trueoW, 0,indIOioC,indOoH,indOoH+1}); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if(bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); NDArray *newWeights = weights, *newGradW = gradW; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { newGradW = new NDArray(gradW->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), gradW->dataType(), gradW->getContext()); newWeights = new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext()); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } NDArray* newInput = input; NDArray* newGradI = gradI; if(paddingMode == 1) // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings checkConv2dCUDNNPadAsymmetric(newInput, newGradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); conv2dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kH,kW,sH,sW,pH,pW,dH,dW,paddingMode,isNCHW,wFormat); if(0 == wFormat) { newGradW->permutei(isNCHW ? std::vector<int>({2,3,1,0}) : std::vector<int>({1,2,3,0})); // (oC, iC, kH, kW --> kH, kW, iC, oC) or (oC, kH, kW, iC --> kH, kW, iC, oC) gradW->assign(newGradW); } if(newInput != input) { if(isNCHW) gradI->assign((*newGradI)({0,0, 0,0, 0,gradI->sizeAt(2), 0,gradI->sizeAt(3)})); else gradI->assign((*newGradI)({0,0, 0,gradI->sizeAt(1), 0,gradI->sizeAt(2), 0,0})); delete newInput; delete newGradI; } if(0 == wFormat) { delete newWeights; delete newGradW; } return Status::OK(); } PLATFORM_CHECK(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; const bool badGradOType = gradO->dataType() != DataType::DOUBLE && gradO->dataType() != DataType::FLOAT32 && gradO->dataType() != DataType::HALF; const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); return isNCHW && paddingMode != 2 && !badInputType && !badWeightsType && !badGradOType && !badBiasType; } // PLATFORM_IMPL(conv2d, ENGINE_CUDA) { // auto handle = reinterpret_cast<cudnnHandle_t *>(block.launchContext()->getCuDnnHandle()); // auto res = cudnnSetStream(*handle, *block.launchContext()->getCudaStream()); // if (res != 0) // throw sd::cuda_exception::build("Can't set stream for cuDNN", res); // auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always // auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] // auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) // NDArray::prepareSpecialUse({output}, {input, weights, bias}); // int sH = INT_ARG(2); // strides height // int sW = INT_ARG(3); // strides width // int pH = INT_ARG(4); // paddings height // int pW = INT_ARG(5); // paddings width // int dH = INT_ARG(6); // dilations height // int dW = INT_ARG(7); // dilations width // int isSameMode = INT_ARG(8); // 0-VALID, 1-SAME // bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC // int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height // int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width // int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; // int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes // ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); // ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, isSameMode); // auto dtype = cudnnDataType(input->dataType()); // cudnnTensorDescriptor_t src; // cudnnCreateTensorDescriptor(&src); // res = cudnnSetTensor4dDescriptorEx(src, dtype, input->sizeAt(0), input->sizeAt(1), input->sizeAt(2), input->sizeAt(3), input->strideAt(0), input->strideAt(1), input->strideAt(2), input->strideAt(3)); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx src failed", res); // // TODO: we definitely want NHWC here as well // cudnnFilterDescriptor_t wght; // cudnnCreateFilterDescriptor(&wght); // res = cudnnSetFilter4dDescriptor(wght, dtype, CUDNN_TENSOR_NCHW, oC, iC, kH, kW); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetFilter4dDescriptor failed", res); // cudnnConvolutionDescriptor_t cdc; // cudnnCreateConvolutionDescriptor(&cdc); // res = cudnnSetConvolution2dDescriptor(cdc, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, dtype); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetConvolution2dDescriptor failed", res); // cudnnTensorDescriptor_t dst; // cudnnCreateTensorDescriptor(&dst); // res = cudnnSetTensor4dDescriptorEx(dst, dtype, output->sizeAt(0), output->sizeAt(1), output->sizeAt(2), output->sizeAt(3), output->strideAt(0), output->strideAt(1), output->strideAt(2), output->strideAt(3)); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx dst failed", res); // // TODO: workspace algorithms are supposed to be faster, so we should use it here if we have enough memory // cudnnConvolutionFwdAlgo_t algo; // res = cudnnGetConvolutionForwardAlgorithm(*handle, src, wght, cdc, dst, CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, &algo); // if (res != 0) // throw sd::cuda_exception::build("cudnnGetConvolutionForwardAlgorithm failed", res); // // TODO: should be float if dtype is half/float, and double otherwise // float alpha = 1.0f; // float beta = 0.0f; // res = cudnnConvolutionForward(*handle, &alpha, src, input->specialBuffer(), wght, weights->specialBuffer(), cdc, algo, nullptr, 0, &beta, dst, output->specialBuffer()); // if (res != 0) // throw sd::cuda_exception::build("cudnnConvolutionForward failed", res); // if (bias != nullptr) { // cudnnTensorDescriptor_t bs; // cudnnCreateTensorDescriptor(&bs); // if (isNCHW) { // res = cudnnSetTensor4dDescriptor(bs, CUDNN_TENSOR_NCHW, dtype, 1, bias->lengthOf(), 1, 1); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx bias NHWC failed", res); // } else { // res = cudnnSetTensor4dDescriptor(bs, CUDNN_TENSOR_NHWC, dtype, 1, 1, 1, bias->lengthOf()); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx bias NHWC failed", res); // } // res = cudnnAddTensor(*handle, &alpha, bs, bias->specialBuffer(), &alpha, dst, output->specialBuffer()); // if (res != 0) // throw sd::cuda_exception::build("cudnnAddTensor failed", res); // } // NDArray::registerSpecialUse({output}, {input, weights, bias}); // return Status::OK(); // } } } }
ff920375bb7eb35c527e7882f04cc48208252705.cu
/******************************************************************************* * Copyright (c) 2019 Konduit K.K. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include "cudnnUtils.h" #include <ops/declarable/helpers/convolutions.h> namespace sd { namespace ops { namespace platforms { ////////////////////////////////////////////////////////////////////////// static void conv2dCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* bias, NDArray* output, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { // cudnn support only two formats for weights {oC,iC,kH,kW} and {oC,kH,kW,iC} int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream()); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: can't set stream for cuDNN", err); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); // input descriptor cudnnTensorDescriptor_t x; cudnnCreateTensorDescriptor(&x); if(input->ews() == 1 && input->ordering() == 'c') err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err); // weights descriptor cudnnFilterDescriptor_t w; cudnnCreateFilterDescriptor(&w); err = cudnnSetFilter4dDescriptor(w, cudnnDataType(weights->dataType()), formatW, oC, iC, kH, kW); if(err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetFilter4dDescriptor failed", err); // output descriptor cudnnTensorDescriptor_t z; cudnnCreateTensorDescriptor(&z); if(output->ews() == 1 && output->ordering() == 'c') err = cudnnSetTensor4dDescriptor(z, format, cudnnDataType(output->dataType()), bS, oC, oH, oW); else err = cudnnSetTensor4dDescriptorEx(z, cudnnDataType(output->dataType()), bS, oC, oH, oW, output->strideAt(0), output->strideAt(indIOioC), output->strideAt(indOoH), output->strideAt(indOoH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for output failed", err); // description of convolution cudnnConvolutionDescriptor_t conv; cudnnCreateConvolutionDescriptor(&conv); err = cudnnSetConvolution2dDescriptor(conv, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(output->dataType())); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetConvolution2dDescriptor failed", err); // algorithm description cudnnConvolutionFwdAlgo_t algo; cudnnConvolutionFwdAlgoPerf_t algoPerf; int count = 0; //err = cudnnGetConvolutionForwardAlgorithm(*handle, x, w, conv, z, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo); err = cudnnFindConvolutionForwardAlgorithm(*handle, x, w, conv, z, 1, &count, &algoPerf); if (err != 0 || count == 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardAlgorithm failed", err); algo = algoPerf.algo; // allocate auxiliary device memory, abbreviation ws means workspace size_t wsSize; err = cudnnGetConvolutionForwardWorkspaceSize(*handle, x, w, conv, z, algo, &wsSize); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnGetConvolutionForwardWorkspaceSize failed", err); void* wsData; auto cudaErr = cudaMalloc(&wsData, wsSize); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudaMalloc for auxiliary workspace memory failed", cudaErr); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = output->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({output}, {input, weights, bias}); // run calculation err = cudnnConvolutionForward(*handle, alpha, x, input->specialBuffer(), w, weights->specialBuffer(), conv, algo, wsData, wsSize, beta, z, output->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnConvolutionForward failed", err); // add bias if it is present if (bias != nullptr) { cudnnTensorDescriptor_t b; cudnnCreateTensorDescriptor(&b); // err = cudnnSetTensor4dDescriptor(b, format, cudnnDataType(bias->dataType()), 1, isNCHW ? bias->lengthOf() : 1, 1, isNCHW ? 1: bias->lengthOf()); err = cudnnSetTensor4dDescriptor(b, CUDNN_TENSOR_NCHW, cudnnDataType(bias->dataType()), 1, oC, 1, 1); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnSetTensor4dDescriptor for bias failed", err); err = cudnnAddTensor(*handle, alpha, b, bias->specialBuffer(), alpha, z, output->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudnnAddTensor bias failed", err); } // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dCUDNN: cudaStreamSynchronize failed !", cudaErr); cudaErr = cudaFree(wsData); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dCUDNN: cudaFree for auxiliary workspace memory failed", cudaErr); NDArray::registerSpecialUse({output}, {input, weights, bias}); } ////////////////////////////////////////////////////////////////////////// static void conv2dBpCUDNN(const LaunchContext* context, const NDArray* input, const NDArray* weights, const NDArray* gradO, NDArray* gradI, NDArray* gradW, NDArray* gradB, const int kH, const int kW, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW, const int paddingMode, const bool isNCHW, const int wFormat) { int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); auto handle = reinterpret_cast<cudnnHandle_t *>(context->getCuDnnHandle()); cudnnStatus_t err = cudnnSetStream(*handle, *context->getCudaStream()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: can't set stream for cuDNN", err); cudnnTensorFormat_t format = isNCHW ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC; cudnnTensorFormat_t formatW = 0 == wFormat ? format : (1 == wFormat ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NHWC); // input descriptor cudnnTensorDescriptor_t x; cudnnCreateTensorDescriptor(&x); if(input->ews() == 1 && input->ordering() == 'c') err = cudnnSetTensor4dDescriptor(x, format, cudnnDataType(input->dataType()), bS, iC, iH, iW); else err = cudnnSetTensor4dDescriptorEx(x, cudnnDataType(input->dataType()), bS, iC, iH, iW, input->strideAt(0), input->strideAt(indIOioC), input->strideAt(indIiH), input->strideAt(indIiH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for input failed", err); // gradO descriptor cudnnTensorDescriptor_t dz; cudnnCreateTensorDescriptor(&dz); if(gradO->ews() == 1 && gradO->ordering() == 'c') err = cudnnSetTensor4dDescriptor(dz, format, cudnnDataType(gradO->dataType()), bS, oC, oH, oW); else err = cudnnSetTensor4dDescriptorEx(dz, cudnnDataType(gradO->dataType()), bS, oC, oH, oW, gradO->strideAt(0), gradO->strideAt(indIOioC), gradO->strideAt(indOoH), gradO->strideAt(indOoH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradO failed", err); // gradI descriptor cudnnTensorDescriptor_t dx; cudnnCreateTensorDescriptor(&dx); if(gradI->ews() == 1 && gradI->ordering() == 'c') err = cudnnSetTensor4dDescriptor(dx, format, cudnnDataType(gradI->dataType()), bS, iC, iH, iW); else err = cudnnSetTensor4dDescriptorEx(dx, cudnnDataType(gradI->dataType()), bS, iC, iH, iW, gradI->strideAt(0), gradI->strideAt(indIOioC), gradI->strideAt(indIiH), gradI->strideAt(indIiH + 1)); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor/cudnnSetTensor4dDescriptorEx for gradI failed", err); // gradW descriptor cudnnFilterDescriptor_t dw; cudnnCreateFilterDescriptor(&dw); err = cudnnSetFilter4dDescriptor(dw, cudnnDataType(gradW->dataType()), formatW, oC, iC, kH, kW); if(err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetFilter4dDescriptor gradW failed", err); // description of convolution cudnnConvolutionDescriptor_t conv; cudnnCreateConvolutionDescriptor(&conv); err = cudnnSetConvolution2dDescriptor(conv, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, cudnnDataType(gradO->dataType())); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetConvolution2dDescriptor failed", err); // gradW algorithm description cudnnConvolutionBwdFilterAlgo_t algoGradW; cudnnConvolutionBwdFilterAlgoPerf_t algoGradWPerf; int count = 0; //err = cudnnGetConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, 0, &algoGradW); err = cudnnFindConvolutionBackwardFilterAlgorithm(*handle, x, dz, conv, dw, 1, &count, &algoGradWPerf); if (err != 0 || count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterAlgorithm failed", err); algoGradW = algoGradWPerf.algo; // gradI algorithm description cudnnConvolutionBwdDataAlgo_t algoGradI; cudnnConvolutionBwdDataAlgoPerf_t algoGradIPerf; //err = cudnnGetConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, 0, &algoGradI); err = cudnnFindConvolutionBackwardDataAlgorithm(*handle, dw, dz, conv, x, 1, &count, &algoGradIPerf); if (err != 0 || count == 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataAlgorithm failed", err); algoGradI = algoGradIPerf.algo; // allocate auxiliary device memory for gradW calculation, abbreviation ws means workspace size_t wsGradWSize; err = cudnnGetConvolutionBackwardFilterWorkspaceSize(*handle, x, dz, conv, dw, algoGradW, &wsGradWSize); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardFilterWorkspaceSize failed", err); void* wsGradWData; auto cudaErr = cudaMalloc(&wsGradWData, wsGradWSize); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudaMalloc for auxiliary workspace memory wsGradWData failed", cudaErr); // allocate auxiliary device memory for gradI calculation, abbreviation ws means workspace size_t wsGradISize; err = cudnnGetConvolutionBackwardDataWorkspaceSize(*handle, dw, dz, conv, dx, algoGradI, &wsGradISize); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnGetConvolutionBackwardDataWorkspaceSize failed", err); void* wsGradIData; cudaErr = cudaMalloc(&wsGradIData, wsGradISize); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudaMalloc for auxiliary workspace memory wsGradIData failed", cudaErr); // provide scaling parameters const float alpha32(1), beta32(0); const double alpha64(1), beta64(0); const void* alpha = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&alpha32) : reinterpret_cast<const void*>(&alpha64); const void* beta = gradO->sizeOfT() <= 4 ? reinterpret_cast<const void*>(&beta32) : reinterpret_cast<const void*>(&beta64); NDArray::prepareSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); // run calculation for gradB (if not nullptr) if(gradB != nullptr) { cudnnTensorDescriptor_t db; cudnnCreateTensorDescriptor(&db); // err = cudnnSetTensor4dDescriptor(db, format, cudnnDataType(gradB->dataType()), 1, isNCHW ? gradB->lengthOf() : 1, 1, isNCHW ? 1: gradB->lengthOf()); err = cudnnSetTensor4dDescriptor(db, CUDNN_TENSOR_NCHW, cudnnDataType(gradB->dataType()), 1, oC, 1, 1); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnSetTensor4dDescriptor for gradB failed", err); err = cudnnConvolutionBackwardBias(*handle, alpha, dz, gradO->specialBuffer(), beta, db, gradB->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardBias failed", err); } // run calculation for gradW err = cudnnConvolutionBackwardFilter(*handle, alpha, x, input->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradW, wsGradWData, wsGradWSize, beta, dw, gradW->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardFilter failed", err); // run calculation for gradI err = cudnnConvolutionBackwardData(*handle, alpha, dw, weights->specialBuffer(), dz, gradO->specialBuffer(), conv, algoGradI, wsGradIData, wsGradISize, beta, dx, gradI->specialBuffer()); if (err != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudnnConvolutionBackwardData failed", err); // cudaErr = cudaStreamSynchronize(*context->getCudaStream()); // if (cudaErr != 0) // throw cuda_exception::build("conv2dBpCUDNN: cudaStreamSynchronize failed !", cudaErr); cudaErr = cudaFree(wsGradWData); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudaFree for auxiliary workspace memory wsGradWData failed", cudaErr); cudaErr = cudaFree(wsGradIData); if (cudaErr != 0) throw sd::cuda_exception::build("conv2dBpCUDNN: cudaFree for auxiliary workspace memory wsGradIData failed", cudaErr); NDArray::registerSpecialUse({gradI, gradW, gradB}, {input, weights, gradO}); } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if (bias) { REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); REQUIRE_TRUE((bias->rankOf() == 1 && bias->strideAt(0) == 1) || (bias->rankOf() == 2 && bias->sizeAt(0) == 1 && bias->strideAt(1) == 1) || (bias->rankOf() == 2 && bias->sizeAt(1) == 1 && bias->strideAt(0) == 1), 0, "CUSTOM CONV2D CUDNN OP: bias array should be contiguous in memory !"); } NDArray* newWeights = weights; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { newWeights = new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext()); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } NDArray* newInput = input; NDArray* newGradI = nullptr; if(paddingMode == 1) // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings checkConv2dCUDNNPadAsymmetric(newInput, newGradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); conv2dCUDNN(block.launchContext(), newInput, newWeights, bias, output, kH,kW,sH,sW,pH,pW,dH,dW, paddingMode, isNCHW, wFormat); if(newInput != input) delete newInput; if(0 == wFormat) delete newWeights; return Status::OK(); } ////////////////////////////////////////////////////////////////////////// PLATFORM_CHECK(conv2d, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); return paddingMode != 2 && !badInputType && !badWeightsType && !badBiasType; } ////////////////////////////////////////////////////////////////////////// PLATFORM_IMPL(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next auto gradI = OUTPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW), epsilon auto gradW = OUTPUT_VARIABLE(1); // [kH, kW, iC, oC], [oC, iC, kH, kW], [oC, kH, kW, iC] auto gradB = block.width() > 3 ? OUTPUT_VARIABLE(2) : nullptr; // [oC] int kH = INT_ARG(0); // filter(kernel) height int kW = INT_ARG(1); // filter(kernel) width int sH = INT_ARG(2); // strides height int sW = INT_ARG(3); // strides width int pH = INT_ARG(4); // paddings height int pW = INT_ARG(5); // paddings width int dH = INT_ARG(6); // dilations height int dW = INT_ARG(7); // dilations width int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC int wFormat = block.getIArguments()->size() > 10 ? INT_ARG(10) : 0; // 0 - [kH, kW, iC, oC], 1 - [oC, iC, kH, kW], 2 - [oC, kH, kW, iC] REQUIRE_TRUE(input->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of input array must be equal to 4, but got %i instead !", input->rankOf()); REQUIRE_TRUE(weights->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of weights array must be equal to 4, but got %i instead !", weights->rankOf()); REQUIRE_TRUE(gradO->rankOf() == 4, 0, "CUSTOM CONV2D_BP CUDNN OP: rank of output's gradients (next epsilon) array must be equal to 4, but got %i instead !", gradO->rankOf()); int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, wFormat, *input, *gradO, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); int trueoH, trueoW; // true output height, width ConvolutionUtils::calcOutSizePool2D(trueoH, trueoW, kH, kW, sH, sW, pH, pW, dH, dW, iH, iW, paddingMode); ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, paddingMode); std::vector<Nd4jLong> expectedGradOShape = ShapeUtils::composeShapeUsingDimsAndIdx({bS,oC,trueoH,trueoW, 0,indIOioC,indOoH,indOoH+1}); std::vector<Nd4jLong> expectedWeightsShape = ConvolutionUtils::expectWeightsShape(wFormat, kH, kW, iC, oC); REQUIRE_TRUE(gradO->isSameShape(expectedGradOShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of output gradients (next epsilon) array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedGradOShape).c_str(), ShapeUtils::shapeAsString(gradO).c_str()); REQUIRE_TRUE(weights->isSameShape(expectedWeightsShape), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of weights array, expected is %s, but got %s instead !", ShapeUtils::shapeAsString(expectedWeightsShape).c_str(), ShapeUtils::shapeAsString(weights).c_str()); if(bias) REQUIRE_TRUE(bias->rankOf() <= 2 && oC == bias->lengthOf(), 0, "CUSTOM CONV2D_BP CUDNN OP: wrong shape of array with biases, expected rank, length: <=2, %i, but got %i, %i instead !", oC, bias->rankOf(), bias->lengthOf()); NDArray *newWeights = weights, *newGradW = gradW; // cudnn support only two formats {oC,iC,kH,kW} and {oC,kH,kW,iC} if(0 == wFormat) { newGradW = new NDArray(gradW->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), gradW->dataType(), gradW->getContext()); newWeights = new NDArray(weights->ordering(), isNCHW ? std::vector<Nd4jLong>({oC, iC, kH, kW}) : std::vector<Nd4jLong>({oC, kH, kW, iC}), weights->dataType(), weights->getContext()); newWeights->assign(weights->permute(isNCHW ? std::vector<int>({3,2,0,1}) : std::vector<int>({3,0,1,2}))); // (kH, kW, iC, oC --> oC, iC, kH, kW) or (kH, kW, iC, oC --> oC, kH, kW, iC) } NDArray* newInput = input; NDArray* newGradI = gradI; if(paddingMode == 1) // in same paddingMode cudnn doesn't support asymmetric left/right top/bottopm paddings checkConv2dCUDNNPadAsymmetric(newInput, newGradI, iH, iW, oH, oW, kH, kW, sH, sW, pH, pW, dH, dW, isNCHW); conv2dBpCUDNN(block.launchContext(), newInput, newWeights, gradO, newGradI, newGradW, gradB, kH,kW,sH,sW,pH,pW,dH,dW,paddingMode,isNCHW,wFormat); if(0 == wFormat) { newGradW->permutei(isNCHW ? std::vector<int>({2,3,1,0}) : std::vector<int>({1,2,3,0})); // (oC, iC, kH, kW --> kH, kW, iC, oC) or (oC, kH, kW, iC --> kH, kW, iC, oC) gradW->assign(newGradW); } if(newInput != input) { if(isNCHW) gradI->assign((*newGradI)({0,0, 0,0, 0,gradI->sizeAt(2), 0,gradI->sizeAt(3)})); else gradI->assign((*newGradI)({0,0, 0,gradI->sizeAt(1), 0,gradI->sizeAt(2), 0,0})); delete newInput; delete newGradI; } if(0 == wFormat) { delete newWeights; delete newGradW; } return Status::OK(); } PLATFORM_CHECK(conv2d_bp, ENGINE_CUDA) { auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always auto bias = block.width() > 3 ? INPUT_VARIABLE(2) : nullptr; // [oC] auto gradO = block.width() > 3 ? INPUT_VARIABLE(3) : INPUT_VARIABLE(2); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW), epsilon_next const int paddingMode = INT_ARG(8); // 0-VALID, 1-SAME, 2-CAUSAL const int isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC const bool badInputType = input->dataType() != DataType::DOUBLE && input->dataType() != DataType::FLOAT32 && input->dataType() != DataType::HALF; const bool badWeightsType = weights->dataType() != DataType::DOUBLE && weights->dataType() != DataType::FLOAT32 && weights->dataType() != DataType::HALF; const bool badGradOType = gradO->dataType() != DataType::DOUBLE && gradO->dataType() != DataType::FLOAT32 && gradO->dataType() != DataType::HALF; const bool badBiasType = bias == nullptr ? false : (bias->dataType() != DataType::DOUBLE && bias->dataType() != DataType::FLOAT32 && bias->dataType() != DataType::HALF); return isNCHW && paddingMode != 2 && !badInputType && !badWeightsType && !badGradOType && !badBiasType; } // PLATFORM_IMPL(conv2d, ENGINE_CUDA) { // auto handle = reinterpret_cast<cudnnHandle_t *>(block.launchContext()->getCuDnnHandle()); // auto res = cudnnSetStream(*handle, *block.launchContext()->getCudaStream()); // if (res != 0) // throw sd::cuda_exception::build("Can't set stream for cuDNN", res); // auto input = INPUT_VARIABLE(0); // [bS, iH, iW, iC] (NHWC) or [bS, iC, iH, iW] (NCHW) // auto weights = INPUT_VARIABLE(1); // [kH, kW, iC, oC] always // auto bias = block.width() > 2 ? INPUT_VARIABLE(2) : nullptr; // [oC] // auto output = OUTPUT_VARIABLE(0); // [bS, oH, oW, oC] (NHWC) or [bS, oC, oH, oW] (NCHW) // NDArray::prepareSpecialUse({output}, {input, weights, bias}); // int sH = INT_ARG(2); // strides height // int sW = INT_ARG(3); // strides width // int pH = INT_ARG(4); // paddings height // int pW = INT_ARG(5); // paddings width // int dH = INT_ARG(6); // dilations height // int dW = INT_ARG(7); // dilations width // int isSameMode = INT_ARG(8); // 0-VALID, 1-SAME // bool isNCHW = block.getIArguments()->size() > 9 ? !INT_ARG(9) : 1; // INT_ARG(9): 0-NCHW, 1-NHWC // int kH = INT_ARG(0) > 0 ? INT_ARG(0) : static_cast<int>(weights->sizeAt(0)); // filter(kernel) height // int kW = INT_ARG(1) > 0 ? INT_ARG(1) : static_cast<int>(weights->sizeAt(1)); // filter(kernel) width // int bS, iC, iH, iW, oC, oH, oW; // batch size, input channels, input height/width, output channels, output height/width; // int indIOioC, indIiH, indWoC, indWiC, indWkH, indOoH; // corresponding indexes // ConvolutionUtils::getSizesAndIndexesConv2d(isNCHW, *input, *output, bS, iC, iH, iW, oC, oH, oW, indIOioC, indIiH, indWiC, indWoC, indWkH, indOoH); // ConvolutionUtils::calcPadding2D(pH, pW, oH, oW, iH, iW, kH, kW, sH, sW, dH, dW, isSameMode); // auto dtype = cudnnDataType(input->dataType()); // cudnnTensorDescriptor_t src; // cudnnCreateTensorDescriptor(&src); // res = cudnnSetTensor4dDescriptorEx(src, dtype, input->sizeAt(0), input->sizeAt(1), input->sizeAt(2), input->sizeAt(3), input->strideAt(0), input->strideAt(1), input->strideAt(2), input->strideAt(3)); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx src failed", res); // // TODO: we definitely want NHWC here as well // cudnnFilterDescriptor_t wght; // cudnnCreateFilterDescriptor(&wght); // res = cudnnSetFilter4dDescriptor(wght, dtype, CUDNN_TENSOR_NCHW, oC, iC, kH, kW); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetFilter4dDescriptor failed", res); // cudnnConvolutionDescriptor_t cdc; // cudnnCreateConvolutionDescriptor(&cdc); // res = cudnnSetConvolution2dDescriptor(cdc, pH, pW, sH, sW, dH, dW, CUDNN_CROSS_CORRELATION, dtype); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetConvolution2dDescriptor failed", res); // cudnnTensorDescriptor_t dst; // cudnnCreateTensorDescriptor(&dst); // res = cudnnSetTensor4dDescriptorEx(dst, dtype, output->sizeAt(0), output->sizeAt(1), output->sizeAt(2), output->sizeAt(3), output->strideAt(0), output->strideAt(1), output->strideAt(2), output->strideAt(3)); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx dst failed", res); // // TODO: workspace algorithms are supposed to be faster, so we should use it here if we have enough memory // cudnnConvolutionFwdAlgo_t algo; // res = cudnnGetConvolutionForwardAlgorithm(*handle, src, wght, cdc, dst, CUDNN_CONVOLUTION_FWD_NO_WORKSPACE, 0, &algo); // if (res != 0) // throw sd::cuda_exception::build("cudnnGetConvolutionForwardAlgorithm failed", res); // // TODO: should be float if dtype is half/float, and double otherwise // float alpha = 1.0f; // float beta = 0.0f; // res = cudnnConvolutionForward(*handle, &alpha, src, input->specialBuffer(), wght, weights->specialBuffer(), cdc, algo, nullptr, 0, &beta, dst, output->specialBuffer()); // if (res != 0) // throw sd::cuda_exception::build("cudnnConvolutionForward failed", res); // if (bias != nullptr) { // cudnnTensorDescriptor_t bs; // cudnnCreateTensorDescriptor(&bs); // if (isNCHW) { // res = cudnnSetTensor4dDescriptor(bs, CUDNN_TENSOR_NCHW, dtype, 1, bias->lengthOf(), 1, 1); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx bias NHWC failed", res); // } else { // res = cudnnSetTensor4dDescriptor(bs, CUDNN_TENSOR_NHWC, dtype, 1, 1, 1, bias->lengthOf()); // if (res != 0) // throw sd::cuda_exception::build("cudnnSetTensor4dDescriptorEx bias NHWC failed", res); // } // res = cudnnAddTensor(*handle, &alpha, bs, bias->specialBuffer(), &alpha, dst, output->specialBuffer()); // if (res != 0) // throw sd::cuda_exception::build("cudnnAddTensor failed", res); // } // NDArray::registerSpecialUse({output}, {input, weights, bias}); // return Status::OK(); // } } } }
d10896807bd1e3258dc33def988af2085d521f02.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "MatrixMul.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( MatrixMul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( MatrixMul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( MatrixMul), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d10896807bd1e3258dc33def988af2085d521f02.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "MatrixMul.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); MatrixMul<<<gridBlock,threadBlock>>>(A,B,C,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { MatrixMul<<<gridBlock,threadBlock>>>(A,B,C,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { MatrixMul<<<gridBlock,threadBlock>>>(A,B,C,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
32a44ad24e470c8848d0461e633232b62d4a7378.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ #include <cfloat> #include <cmath> /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper */ extern "C" __global__ void slice_sparse_dense(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu) { int index = blockIdx.x * blockDim.x + threadIdx.x; int rowIndex = index + rl; if (rowIndex <= ru){ int retClen = cu - cl + 1; // Iterate over elements of the row 'rowIndex'. for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) { // Only slice if the index falls into the given range if(cl <= colInd[i] && colInd[i] <= cu) { ret[ index*retClen + (colInd[i] - cl) ] = inVal[i]; } } } } /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" __global__ void copy_u2l_dense(double* ret, int dim, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / dim; int iy = tid % dim; int id_dest = iy * dim + ix; if(iy > ix && id_dest < N) { // TODO: Potential to reduce the number of threads by half int id_src = tid; ret[id_dest] = ret[id_src]; } } extern "C" __forceinline__ __device__ double getBoolean(int val) { if(val == 0) return 0.0; else return 1.0; } // op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power, // 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal, // 11=min, 12=max, 13=and, 14=or, 15=minus1multiply, 16=minusnz, // 17=modulus, 18=integer division} extern "C" __forceinline__ __device__ double binaryOp(double x, double y, int op) { switch(op) { case 0 : return x + y; case 1 : return x - y; case 2 : return x * y; case 3 : return x / y; case 4 : return pow(x, y); case 5 : return getBoolean(x < y); case 6 : return getBoolean(x <= y); case 7 : return getBoolean(x > y); case 8 : return getBoolean(x >= y); case 9 : return getBoolean(x == y); case 10 : return getBoolean(x != y); case 11 : return min(x, y); case 12 : return max(x, y); case 13 : return getBoolean((int)llrint(x) & (int)llrint(y)); case 14 : return getBoolean((int)llrint(x) | (int)llrint(y)); case 15 : return 1 - x * y; case 16 : return (x != 0.0 ? x - y : 0.0); case 17 : { if (y == 0.0 || y == -0.0){ return nan(""); } double v = x / y; // Check for v being NaN (v != v) or if it is infinity if (isnan(v) || isinf(v)){ return v; } else { v = floor(v); } return x - v * y; } case 18:{ double v = x / y; if (isnan(v) || isinf(v)){ return v; } else { return floor(v); } } default : return DBL_MAX; } } extern "C" __global__ void relu(double* A, double* ret, int rlen, int clen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] = max(0.0, A[index]); } } // This method computes the backpropagation errors for previous layer of relu operation extern "C" __global__ void relu_backward(double* X, double* dout, double* ret, int rlen, int clen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] = X[index] > 0 ? dout[index] : 0; } } /** * Performs inplace addition: ret += input * * @param input rhs input array allocated on the GPU * @param ret the input and output array allocated on the GPU * @param rlen the number of rows * @param clen the number of columns */ extern "C" __global__ void inplace_add(double* input, double* ret, int rlen, int clen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] += input[index]; } } // Performs the operation corresponding to the DML script: // ones = matrix(1, rows=1, cols=Hout*Wout) // output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout) // This operation is often followed by conv2d and hence we have introduced bias_add(input, bias) built-in function extern "C" __global__ void bias_add(double* input, double* bias, double* ret, int rlen, int clen, int PQ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; int biasIndex = iy / PQ; ret[index] = input[index] + bias[biasIndex]; } } // Performs the operation "ret <- A + alpha*B", where B is a vector extern "C" __global__ void daxpy_matrix_vector(double* A, double* B, double alpha, double* ret, int rlenA, int clenA, int rlenB, int clenB) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clenA; int iy = tid % clenA; if(ix < rlenA && iy < clenA) { int index = ix * clenA + iy; if(rlenB == 1) { ret[index] = A[index] + alpha*B[iy]; } else { ret[index] = A[index] + alpha*B[ix]; } } } // Performs similar operation as bias_add except elementwise multiplication instead of add extern "C" __global__ void bias_multiply(double* input, double* bias, double* ret, int rlen, int clen, int PQ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; int biasIndex = iy / PQ; ret[index] = input[index] * bias[biasIndex]; } } // Compares the value and set extern "C" __global__ void compare_and_set(double* A, double* ret, int rlen, int clen, double compareVal, double tol, double ifEqualsVal, double ifLessThanVal, double ifGreaterThanVal) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; int index = ix * clen + iy; if(ix < rlen && iy < clen) { if(abs(A[index]-compareVal) < tol) ret[index] = ifEqualsVal; else if(A[index] < compareVal) ret[index] = ifLessThanVal; else ret[index] = ifGreaterThanVal; } } /** * Performs a binary cellwise arithmetic operation on 2 matrices. * Either both matrices are of equal size or one of them is a vector or both are. * @param A first input matrix allocated on GPU * @param B second input matrix allocated on GPU * @param C output allocated on GPU * @param maxRlen maximum of the row lengths of A and B * @param maxClen maximum of the column lengths of A and B * @param vectorAStatus if A is a row vector, column vector or neither * @param vectorBStatus if B is a row vector, column vector or neither * @param op the numeric code of the arithmetic operation to perform * */ extern "C" __global__ void matrix_matrix_cellwise_op(double* A, double* B, double* C, int maxRlen, int maxClen, int vectorAStatus, int vectorBStatus, int op) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / maxClen; int iy = tid % maxClen; if(ix < maxRlen && iy < maxClen) { int outIndex = ix * maxClen + iy; int aIndex = outIndex; int bIndex = outIndex; if(vectorAStatus == 1) aIndex = ix; // clen == 1 else if(vectorAStatus == 2) aIndex = iy; // rlen == 1 if(vectorBStatus == 1) bIndex = ix; // clen == 1 else if(vectorBStatus == 2) bIndex = iy; // rlen == 1 C[outIndex] = binaryOp(A[aIndex], B[bIndex], op); //printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex, A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1)); __syncthreads(); } } /** * Performs an arithmetic operation between a matrix and a scalar. * C = s op A or C = A op s (where A is the matrix, s is the scalar and op is the operation) * @param A input matrix allocated on GPU * @param scalar scalar input * @param C output matrix allocated on GPU * @param size number of elements in matrix A * @param op number code of the arithmetic operation to perform * @param isLeftScalar whether the scalar is on the left side */ extern "C" __global__ void matrix_scalar_op(double* A, double scalar, double* C, int size, int op, int isLeftScalar) { int index = blockIdx.x *blockDim.x + threadIdx.x; if(index < size) { if(isLeftScalar) { C[index] = binaryOp(scalar, A[index], op); } else { C[index] = binaryOp(A[index], scalar, op); } } __syncthreads(); } /** * Sets all elements (fills) of a double array of given length with a given scalar value * @param A array to be filled * @param scalar value to fill array with * @param lenA length of array A */ extern "C" __global__ void fill(double* A, double scalar, int lenA) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < lenA){ A[index] = scalar; } } /** * Appends Matrix B to the right side of Matrix A into a new matrix C * | 1 2 3 4 | | 8 8 8 | | 1 2 3 4 8 8 8 | * cbind ( | 9 8 7 6 | , | 7 7 7 | ) = | 9 8 7 6 7 7 7 | * | 4 3 2 1 | | 9 9 9 | | 4 3 2 1 9 9 9 | * @param A input matrix A allocated on the GPU * @param B input matrix B allocated on the GPU * @param C input matrix C allocated on the GPU * @param rowsA rows in A * @param colsA columns in A * @param rowsB rows in B * @param colsB columns in B */ extern "C" __global__ void cbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) { int maxClen = max(colsA, colsB); int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / maxClen; int iy = tid % maxClen; int colsC = colsA + colsB; int rowsC = rowsA; // Copy an element of A into C into the appropriate location if (ix < rowsA && iy < colsA) { double elemA = A[ix * colsA + iy]; C[ix * colsC + iy] = elemA; } // Copy an element of B into C into the appropriate location if (ix < rowsB && iy < colsB) { double elemB = B[ix * colsB + iy]; C[ix * colsC + (iy + colsA)] = elemB; } } /** * Appends Matrix B to the bottom of Matrix A into a new matrix C * | 2 3 4 | | 8 8 8 | | 2 3 4 | * rbind ( | 8 7 6 | , | 7 7 7 | ) = | 8 7 6 | * | 3 2 1 | | 3 2 1 | | 8 8 8 | | 7 7 7 | * @param A input matrix A allocated on the GPU * @param B input matrix B allocated on the GPU * @param C input matrix C allocated on the GPU * @param rowsA rows in A * @param colsA columns in A * @param rowsB rows in B * @param colsB columns in B */ extern "C" __global__ void rbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) { int maxClen = max(colsA, colsB); int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / maxClen; int iy = tid % maxClen; int rowsC = rowsA + rowsB; int colsC = colsA; // Copy an element of A into C into the appropriate location if (ix < rowsA && iy < colsA) { double elemA = A[ix * colsA + iy]; C[ix * colsC + iy] = elemA; } // Copy an element of B into C into the appropriate location if (ix < rowsB && iy < colsB) { double elemB = B[ix * colsB + iy]; C[(ix + rowsA) * colsC + iy] = elemB; } } /** * Does a reduce operation over all elements of the array. * This method has been adapted from the Reduction sample in the NVIDIA CUDA Samples (v8.0) * and the Reduction example available through jcuda.org * When invoked initially, all blocks partly compute the reduction operation over the entire array * and writes it to the output/temporary array. A second invokation needs to happen to get the * reduced value. * The number of threads, blocks and amount of shared memory is calculated in a specific way. * Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this method to see * how its done. * The template-ized version of this function is similar to what is found in NVIDIA CUB * * @param ReductionOp Type of the functor object that implements the reduction operation */ template <typename ReductionOp> __device__ void reduce( double *g_idata, ///< input data stored in device memory (of size n) double *g_odata, ///< output/temporary array stored in device memory (of size n) unsigned int n, ///< size of the input and temporary/output arrays ReductionOp reduction_op, ///< Reduction operation to perform (functor object) double initialValue) ///< initial value for the reduction variable { extern __shared__ double sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; double v = initialValue; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { v = reduction_op(v, g_idata[i]); // ensure we don't read out of bounds if (i + blockDim.x < n) v = reduction_op(v, g_idata[i+blockDim.x]); i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = v; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile double* smem = sdata; if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); } if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); } if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); } if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); } if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); } if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /** * Does a reduce (sum) over each row of the array. * This kernel must be launched with as many blocks as there are rows. * The intuition for this kernel is that each block does a reduction over a single row. * The maximum number of blocks that can launched (as of compute capability 3.0) is 2^31 - 1 * This works out fine for SystemML, since the maximum elements in a Java array can be 2^31 - c (some small constant) * If the matrix is "fat" and "short", i.e. there are small number of rows and a large number of columns, * there could be under-utilization of the hardware. * The template-ized version of this function is similar to what is found in NVIDIA CUB * @param ReductionOp Type of the functor object that implements the reduction operation * @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each row */ template <typename ReductionOp, typename AssignmentOp> __device__ void reduce_row( double *g_idata, ///< input data stored in device memory (of size rows*cols) double *g_odata, ///< output/temporary array store in device memory (of size rows*cols) unsigned int rows, ///< rows in input and temporary/output arrays unsigned int cols, ///< columns in input and temporary/output arrays ReductionOp reduction_op, ///< Reduction operation to perform (functor object) AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each row double initialValue){ ///< initial value for the reduction variable extern __shared__ double sdata[]; // one block per row if (blockIdx.x >= rows) { return; } unsigned int block = blockIdx.x; unsigned int tid = threadIdx.x; unsigned int i = tid; unsigned int block_offset = block * cols; double v = initialValue; while (i < cols){ v = reduction_op(v, g_idata[block_offset + i]); i += blockDim.x; } // each thread puts its local sum into shared memory sdata[tid] = v; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile double* smem = sdata; if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); } if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); } if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); } if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); } if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); } if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); } } // write result for this block to global mem, modify it with assignment op if (tid == 0) g_odata[block] = assignment_op(sdata[0]); } /** * Does a column wise reduction. * The intuition is that there are as many global threads as there are columns * Each global thread is responsible for a single element in the output vector * This of course leads to a under-utilization of the GPU resources. * For cases, where the number of columns is small, there can be unused SMs * * The template-ized version of this function is similar to what is found in NVIDIA CUB * @param ReductionOp Type of the functor object that implements the reduction operation * @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each column */ template <typename ReductionOp, typename AssignmentOp> __device__ void reduce_col( double *g_idata, ///< input data stored in device memory (of size rows*cols) double *g_odata, ///< output/temporary array store in device memory (of size rows*cols) unsigned int rows, ///< rows in input and temporary/output arrays unsigned int cols, ///< columns in input and temporary/output arrays ReductionOp reduction_op, ///< Reduction operation to perform (functor object) AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each column double initialValue) ///< initial value for the reduction variable { unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x; if (global_tid >= cols) { return; } unsigned int i = global_tid; unsigned int grid_size = cols; double val = initialValue; while (i < rows * cols) { val = reduction_op(val, g_idata[i]); i += grid_size; } g_odata[global_tid] = assignment_op(val); } /** * Functor op for assignment op. This is a dummy/identity op. */ typedef struct { __device__ __forceinline__ double operator()(double a) const { return a; } } IdentityOp; /** * Functor op for summation operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return a + b; } } SumOp; /** * Do a summation over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stored in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_sum(double *g_idata, double *g_odata, unsigned int n){ SumOp op; reduce<SumOp>(g_idata, g_odata, n, op, 0.0); } /** * Do a summation over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; IdentityOp aop; reduce_row<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Do a summation over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; IdentityOp aop; reduce_col<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Functor op for max operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return fmax(a, b); } } MaxOp; /** * Do a max over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stode in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_max(double *g_idata, double *g_odata, unsigned int n){ MaxOp op; reduce<MaxOp>(g_idata, g_odata, n, op, -DBL_MAX); } /** * Do a max over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MaxOp op; IdentityOp aop; reduce_row<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX); } /** * Do a max over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MaxOp op; IdentityOp aop; reduce_col<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX); } /** * Functor op for min operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return fmin(a, b); } } MinOp; /** * Do a min over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stode in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_min(double *g_idata, double *g_odata, unsigned int n){ MinOp op; reduce<MinOp>(g_idata, g_odata, n, op, DBL_MAX); } /** * Do a min over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MinOp op; IdentityOp aop; reduce_row<MinOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX); } /** * Do a min over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MinOp op; IdentityOp aop; reduce_col<MinOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX); } /** * Functor op for product operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return a * b; } } ProductOp; /** * Do a product over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stode in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_prod(double *g_idata, double *g_odata, unsigned int n){ ProductOp op; reduce<ProductOp>(g_idata, g_odata, n, op, 1.0); } /** * Functor op for mean operation */ struct MeanOp { const long _size; ///< Number of elements by which to divide to calculate mean __device__ __forceinline__ MeanOp(long size): _size(size) {} __device__ __forceinline__ double operator()(double total) const { return total / _size; } }; /** * Do a mean over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; MeanOp aop(cols); reduce_row<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Do a mean over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; MeanOp aop(rows); reduce_col<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Do an exp over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_exp(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = exp(A[index]); } } /** * Do an sqrt over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_sqrt(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = sqrt(A[index]); } } /** * Do an round over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_round(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = (double)llround(A[index]); } } /** * Do an abs over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_abs(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = (double)fabs(A[index]); } } /** * Do an log over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_log(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = log(A[index]); } } /** * Do an floor over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_floor(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = floor(A[index]); } } /** * Do an ceil over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_ceil(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = ceil(A[index]); } } /** * Do an sin over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_sin(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = sin(A[index]); } } /** * Do an cos over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_cos(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = cos(A[index]); } } /** * Do an tan over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_tan(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = tan(A[index]); } } /** * Do an asin over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_asin(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = asin(A[index]); } } /** * Do an acos over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_acos(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = acos(A[index]); } } /** * Do an atan over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_atan(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = atan(A[index]); } } /** * Do an sign over all the elements of a matrix * Assign -1, 0 or 1 depending on the element being negative, 0 or positive * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_sign(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ if (A[index] == 0.0) { C[index] = 0.0; } else { C[index] = copysign(1.0, A[index]); } } }
32a44ad24e470c8848d0461e633232b62d4a7378.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ #include <cfloat> #include <cmath> /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper */ extern "C" __global__ void slice_sparse_dense(double* inVal, int* inRowPtr, int* colInd, double* ret, int rl, int ru, int cl, int cu) { int index = blockIdx.x * blockDim.x + threadIdx.x; int rowIndex = index + rl; if (rowIndex <= ru){ int retClen = cu - cl + 1; // Iterate over elements of the row 'rowIndex'. for(int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex+1]; i++) { // Only slice if the index falls into the given range if(cl <= colInd[i] && colInd[i] <= cu) { ret[ index*retClen + (colInd[i] - cl) ] = inVal[i]; } } } } /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" __global__ void copy_u2l_dense(double* ret, int dim, int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / dim; int iy = tid % dim; int id_dest = iy * dim + ix; if(iy > ix && id_dest < N) { // TODO: Potential to reduce the number of threads by half int id_src = tid; ret[id_dest] = ret[id_src]; } } extern "C" __forceinline__ __device__ double getBoolean(int val) { if(val == 0) return 0.0; else return 1.0; } // op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power, // 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal, // 11=min, 12=max, 13=and, 14=or, 15=minus1multiply, 16=minusnz, // 17=modulus, 18=integer division} extern "C" __forceinline__ __device__ double binaryOp(double x, double y, int op) { switch(op) { case 0 : return x + y; case 1 : return x - y; case 2 : return x * y; case 3 : return x / y; case 4 : return pow(x, y); case 5 : return getBoolean(x < y); case 6 : return getBoolean(x <= y); case 7 : return getBoolean(x > y); case 8 : return getBoolean(x >= y); case 9 : return getBoolean(x == y); case 10 : return getBoolean(x != y); case 11 : return min(x, y); case 12 : return max(x, y); case 13 : return getBoolean((int)llrint(x) & (int)llrint(y)); case 14 : return getBoolean((int)llrint(x) | (int)llrint(y)); case 15 : return 1 - x * y; case 16 : return (x != 0.0 ? x - y : 0.0); case 17 : { if (y == 0.0 || y == -0.0){ return nan(""); } double v = x / y; // Check for v being NaN (v != v) or if it is infinity if (isnan(v) || isinf(v)){ return v; } else { v = floor(v); } return x - v * y; } case 18:{ double v = x / y; if (isnan(v) || isinf(v)){ return v; } else { return floor(v); } } default : return DBL_MAX; } } extern "C" __global__ void relu(double* A, double* ret, int rlen, int clen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] = max(0.0, A[index]); } } // This method computes the backpropagation errors for previous layer of relu operation extern "C" __global__ void relu_backward(double* X, double* dout, double* ret, int rlen, int clen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] = X[index] > 0 ? dout[index] : 0; } } /** * Performs inplace addition: ret += input * * @param input rhs input array allocated on the GPU * @param ret the input and output array allocated on the GPU * @param rlen the number of rows * @param clen the number of columns */ extern "C" __global__ void inplace_add(double* input, double* ret, int rlen, int clen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; ret[index] += input[index]; } } // Performs the operation corresponding to the DML script: // ones = matrix(1, rows=1, cols=Hout*Wout) // output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout) // This operation is often followed by conv2d and hence we have introduced bias_add(input, bias) built-in function extern "C" __global__ void bias_add(double* input, double* bias, double* ret, int rlen, int clen, int PQ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; int biasIndex = iy / PQ; ret[index] = input[index] + bias[biasIndex]; } } // Performs the operation "ret <- A + alpha*B", where B is a vector extern "C" __global__ void daxpy_matrix_vector(double* A, double* B, double alpha, double* ret, int rlenA, int clenA, int rlenB, int clenB) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clenA; int iy = tid % clenA; if(ix < rlenA && iy < clenA) { int index = ix * clenA + iy; if(rlenB == 1) { ret[index] = A[index] + alpha*B[iy]; } else { ret[index] = A[index] + alpha*B[ix]; } } } // Performs similar operation as bias_add except elementwise multiplication instead of add extern "C" __global__ void bias_multiply(double* input, double* bias, double* ret, int rlen, int clen, int PQ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int index = ix * clen + iy; int biasIndex = iy / PQ; ret[index] = input[index] * bias[biasIndex]; } } // Compares the value and set extern "C" __global__ void compare_and_set(double* A, double* ret, int rlen, int clen, double compareVal, double tol, double ifEqualsVal, double ifLessThanVal, double ifGreaterThanVal) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; int index = ix * clen + iy; if(ix < rlen && iy < clen) { if(abs(A[index]-compareVal) < tol) ret[index] = ifEqualsVal; else if(A[index] < compareVal) ret[index] = ifLessThanVal; else ret[index] = ifGreaterThanVal; } } /** * Performs a binary cellwise arithmetic operation on 2 matrices. * Either both matrices are of equal size or one of them is a vector or both are. * @param A first input matrix allocated on GPU * @param B second input matrix allocated on GPU * @param C output allocated on GPU * @param maxRlen maximum of the row lengths of A and B * @param maxClen maximum of the column lengths of A and B * @param vectorAStatus if A is a row vector, column vector or neither * @param vectorBStatus if B is a row vector, column vector or neither * @param op the numeric code of the arithmetic operation to perform * */ extern "C" __global__ void matrix_matrix_cellwise_op(double* A, double* B, double* C, int maxRlen, int maxClen, int vectorAStatus, int vectorBStatus, int op) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / maxClen; int iy = tid % maxClen; if(ix < maxRlen && iy < maxClen) { int outIndex = ix * maxClen + iy; int aIndex = outIndex; int bIndex = outIndex; if(vectorAStatus == 1) aIndex = ix; // clen == 1 else if(vectorAStatus == 2) aIndex = iy; // rlen == 1 if(vectorBStatus == 1) bIndex = ix; // clen == 1 else if(vectorBStatus == 2) bIndex = iy; // rlen == 1 C[outIndex] = binaryOp(A[aIndex], B[bIndex], op); //printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex, A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1)); __syncthreads(); } } /** * Performs an arithmetic operation between a matrix and a scalar. * C = s op A or C = A op s (where A is the matrix, s is the scalar and op is the operation) * @param A input matrix allocated on GPU * @param scalar scalar input * @param C output matrix allocated on GPU * @param size number of elements in matrix A * @param op number code of the arithmetic operation to perform * @param isLeftScalar whether the scalar is on the left side */ extern "C" __global__ void matrix_scalar_op(double* A, double scalar, double* C, int size, int op, int isLeftScalar) { int index = blockIdx.x *blockDim.x + threadIdx.x; if(index < size) { if(isLeftScalar) { C[index] = binaryOp(scalar, A[index], op); } else { C[index] = binaryOp(A[index], scalar, op); } } __syncthreads(); } /** * Sets all elements (fills) of a double array of given length with a given scalar value * @param A array to be filled * @param scalar value to fill array with * @param lenA length of array A */ extern "C" __global__ void fill(double* A, double scalar, int lenA) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < lenA){ A[index] = scalar; } } /** * Appends Matrix B to the right side of Matrix A into a new matrix C * | 1 2 3 4 | | 8 8 8 | | 1 2 3 4 8 8 8 | * cbind ( | 9 8 7 6 | , | 7 7 7 | ) = | 9 8 7 6 7 7 7 | * | 4 3 2 1 | | 9 9 9 | | 4 3 2 1 9 9 9 | * @param A input matrix A allocated on the GPU * @param B input matrix B allocated on the GPU * @param C input matrix C allocated on the GPU * @param rowsA rows in A * @param colsA columns in A * @param rowsB rows in B * @param colsB columns in B */ extern "C" __global__ void cbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) { int maxClen = max(colsA, colsB); int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / maxClen; int iy = tid % maxClen; int colsC = colsA + colsB; int rowsC = rowsA; // Copy an element of A into C into the appropriate location if (ix < rowsA && iy < colsA) { double elemA = A[ix * colsA + iy]; C[ix * colsC + iy] = elemA; } // Copy an element of B into C into the appropriate location if (ix < rowsB && iy < colsB) { double elemB = B[ix * colsB + iy]; C[ix * colsC + (iy + colsA)] = elemB; } } /** * Appends Matrix B to the bottom of Matrix A into a new matrix C * | 2 3 4 | | 8 8 8 | | 2 3 4 | * rbind ( | 8 7 6 | , | 7 7 7 | ) = | 8 7 6 | * | 3 2 1 | | 3 2 1 | | 8 8 8 | | 7 7 7 | * @param A input matrix A allocated on the GPU * @param B input matrix B allocated on the GPU * @param C input matrix C allocated on the GPU * @param rowsA rows in A * @param colsA columns in A * @param rowsB rows in B * @param colsB columns in B */ extern "C" __global__ void rbind(double *A, double *B, double *C, int rowsA, int colsA, int rowsB, int colsB) { int maxClen = max(colsA, colsB); int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / maxClen; int iy = tid % maxClen; int rowsC = rowsA + rowsB; int colsC = colsA; // Copy an element of A into C into the appropriate location if (ix < rowsA && iy < colsA) { double elemA = A[ix * colsA + iy]; C[ix * colsC + iy] = elemA; } // Copy an element of B into C into the appropriate location if (ix < rowsB && iy < colsB) { double elemB = B[ix * colsB + iy]; C[(ix + rowsA) * colsC + iy] = elemB; } } /** * Does a reduce operation over all elements of the array. * This method has been adapted from the Reduction sample in the NVIDIA CUDA Samples (v8.0) * and the Reduction example available through jcuda.org * When invoked initially, all blocks partly compute the reduction operation over the entire array * and writes it to the output/temporary array. A second invokation needs to happen to get the * reduced value. * The number of threads, blocks and amount of shared memory is calculated in a specific way. * Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this method to see * how its done. * The template-ized version of this function is similar to what is found in NVIDIA CUB * * @param ReductionOp Type of the functor object that implements the reduction operation */ template <typename ReductionOp> __device__ void reduce( double *g_idata, ///< input data stored in device memory (of size n) double *g_odata, ///< output/temporary array stored in device memory (of size n) unsigned int n, ///< size of the input and temporary/output arrays ReductionOp reduction_op, ///< Reduction operation to perform (functor object) double initialValue) ///< initial value for the reduction variable { extern __shared__ double sdata[]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x*2 + threadIdx.x; unsigned int gridSize = blockDim.x*2*gridDim.x; double v = initialValue; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { v = reduction_op(v, g_idata[i]); // ensure we don't read out of bounds if (i + blockDim.x < n) v = reduction_op(v, g_idata[i+blockDim.x]); i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = v; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile double* smem = sdata; if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); } if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); } if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); } if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); } if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); } if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /** * Does a reduce (sum) over each row of the array. * This kernel must be launched with as many blocks as there are rows. * The intuition for this kernel is that each block does a reduction over a single row. * The maximum number of blocks that can launched (as of compute capability 3.0) is 2^31 - 1 * This works out fine for SystemML, since the maximum elements in a Java array can be 2^31 - c (some small constant) * If the matrix is "fat" and "short", i.e. there are small number of rows and a large number of columns, * there could be under-utilization of the hardware. * The template-ized version of this function is similar to what is found in NVIDIA CUB * @param ReductionOp Type of the functor object that implements the reduction operation * @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each row */ template <typename ReductionOp, typename AssignmentOp> __device__ void reduce_row( double *g_idata, ///< input data stored in device memory (of size rows*cols) double *g_odata, ///< output/temporary array store in device memory (of size rows*cols) unsigned int rows, ///< rows in input and temporary/output arrays unsigned int cols, ///< columns in input and temporary/output arrays ReductionOp reduction_op, ///< Reduction operation to perform (functor object) AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each row double initialValue){ ///< initial value for the reduction variable extern __shared__ double sdata[]; // one block per row if (blockIdx.x >= rows) { return; } unsigned int block = blockIdx.x; unsigned int tid = threadIdx.x; unsigned int i = tid; unsigned int block_offset = block * cols; double v = initialValue; while (i < cols){ v = reduction_op(v, g_idata[block_offset + i]); i += blockDim.x; } // each thread puts its local sum into shared memory sdata[tid] = v; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 1024){ if (tid < 512) { sdata[tid] = v = reduction_op(v, sdata[tid + 512]); } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = v = reduction_op(v, sdata[tid + 256]); } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = v = reduction_op(v, sdata[tid + 128]); } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = v = reduction_op(v, sdata[tid + 64]); } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile double* smem = sdata; if (blockDim.x >= 64) { smem[tid] = v = reduction_op(v, smem[tid + 32]); } if (blockDim.x >= 32) { smem[tid] = v = reduction_op(v, smem[tid + 16]); } if (blockDim.x >= 16) { smem[tid] = v = reduction_op(v, smem[tid + 8]); } if (blockDim.x >= 8) { smem[tid] = v = reduction_op(v, smem[tid + 4]); } if (blockDim.x >= 4) { smem[tid] = v = reduction_op(v, smem[tid + 2]); } if (blockDim.x >= 2) { smem[tid] = v = reduction_op(v, smem[tid + 1]); } } // write result for this block to global mem, modify it with assignment op if (tid == 0) g_odata[block] = assignment_op(sdata[0]); } /** * Does a column wise reduction. * The intuition is that there are as many global threads as there are columns * Each global thread is responsible for a single element in the output vector * This of course leads to a under-utilization of the GPU resources. * For cases, where the number of columns is small, there can be unused SMs * * The template-ized version of this function is similar to what is found in NVIDIA CUB * @param ReductionOp Type of the functor object that implements the reduction operation * @param AssignmentOp Type of the functor object that is used to modify the value before writing it to its final location in global memory for each column */ template <typename ReductionOp, typename AssignmentOp> __device__ void reduce_col( double *g_idata, ///< input data stored in device memory (of size rows*cols) double *g_odata, ///< output/temporary array store in device memory (of size rows*cols) unsigned int rows, ///< rows in input and temporary/output arrays unsigned int cols, ///< columns in input and temporary/output arrays ReductionOp reduction_op, ///< Reduction operation to perform (functor object) AssignmentOp assignment_op, ///< Operation to perform before assigning this to its final location in global memory for each column double initialValue) ///< initial value for the reduction variable { unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x; if (global_tid >= cols) { return; } unsigned int i = global_tid; unsigned int grid_size = cols; double val = initialValue; while (i < rows * cols) { val = reduction_op(val, g_idata[i]); i += grid_size; } g_odata[global_tid] = assignment_op(val); } /** * Functor op for assignment op. This is a dummy/identity op. */ typedef struct { __device__ __forceinline__ double operator()(double a) const { return a; } } IdentityOp; /** * Functor op for summation operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return a + b; } } SumOp; /** * Do a summation over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stored in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_sum(double *g_idata, double *g_odata, unsigned int n){ SumOp op; reduce<SumOp>(g_idata, g_odata, n, op, 0.0); } /** * Do a summation over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; IdentityOp aop; reduce_row<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Do a summation over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_sum(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; IdentityOp aop; reduce_col<SumOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Functor op for max operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return fmax(a, b); } } MaxOp; /** * Do a max over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stode in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_max(double *g_idata, double *g_odata, unsigned int n){ MaxOp op; reduce<MaxOp>(g_idata, g_odata, n, op, -DBL_MAX); } /** * Do a max over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MaxOp op; IdentityOp aop; reduce_row<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX); } /** * Do a max over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_max(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MaxOp op; IdentityOp aop; reduce_col<MaxOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, -DBL_MAX); } /** * Functor op for min operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return fmin(a, b); } } MinOp; /** * Do a min over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stode in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_min(double *g_idata, double *g_odata, unsigned int n){ MinOp op; reduce<MinOp>(g_idata, g_odata, n, op, DBL_MAX); } /** * Do a min over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MinOp op; IdentityOp aop; reduce_row<MinOp, IdentityOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX); } /** * Do a min over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_min(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ MinOp op; IdentityOp aop; reduce_col<MinOp>(g_idata, g_odata, rows, cols, op, aop, DBL_MAX); } /** * Functor op for product operation */ typedef struct { __device__ __forceinline__ double operator()(double a, double b) const { return a * b; } } ProductOp; /** * Do a product over all elements of an array/matrix * @param g_idata input data stored in device memory (of size n) * @param g_odata output/temporary array stode in device memory (of size n) * @param n size of the input and temporary/output arrays */ extern "C" __global__ void reduce_prod(double *g_idata, double *g_odata, unsigned int n){ ProductOp op; reduce<ProductOp>(g_idata, g_odata, n, op, 1.0); } /** * Functor op for mean operation */ struct MeanOp { const long _size; ///< Number of elements by which to divide to calculate mean __device__ __forceinline__ MeanOp(long size): _size(size) {} __device__ __forceinline__ double operator()(double total) const { return total / _size; } }; /** * Do a mean over all rows of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size rows) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_row_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; MeanOp aop(cols); reduce_row<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Do a mean over all columns of a matrix * @param g_idata input matrix stored in device memory (of size rows * cols) * @param g_odata output vector stored in device memory (of size cols) * @param rows number of rows in input matrix * @param cols number of columns in input matrix */ extern "C" __global__ void reduce_col_mean(double *g_idata, double *g_odata, unsigned int rows, unsigned int cols){ SumOp op; MeanOp aop(rows); reduce_col<SumOp, MeanOp>(g_idata, g_odata, rows, cols, op, aop, 0.0); } /** * Do an exp over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_exp(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = exp(A[index]); } } /** * Do an sqrt over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_sqrt(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = sqrt(A[index]); } } /** * Do an round over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_round(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = (double)llround(A[index]); } } /** * Do an abs over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_abs(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = (double)fabs(A[index]); } } /** * Do an log over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_log(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = log(A[index]); } } /** * Do an floor over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_floor(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = floor(A[index]); } } /** * Do an ceil over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_ceil(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = ceil(A[index]); } } /** * Do an sin over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_sin(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = sin(A[index]); } } /** * Do an cos over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_cos(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = cos(A[index]); } } /** * Do an tan over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_tan(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = tan(A[index]); } } /** * Do an asin over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_asin(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = asin(A[index]); } } /** * Do an acos over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_acos(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = acos(A[index]); } } /** * Do an atan over all the elements of a matrix * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_atan(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ C[index] = atan(A[index]); } } /** * Do an sign over all the elements of a matrix * Assign -1, 0 or 1 depending on the element being negative, 0 or positive * @param A the input matrix (of length = size) * @param C the pre-allocated output matrix (of length = size) * @param siz the length of the input and output matrices */ extern "C" __global__ void matrix_sign(double *A, double *C, unsigned int size) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size){ if (A[index] == 0.0) { C[index] = 0.0; } else { C[index] = copysign(1.0, A[index]); } } }
ad2fd13a8a60b90e824dbc45eb915449b159bc29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common_cuda_helper.hpp" #include "trt_cuda_helper.cuh" #include "trt_plugin_helper.hpp" template <typename scalar_t> __global__ void top_bottom_pool_kernel(const scalar_t *input, scalar_t *output, const int batch_size, const int channels, const int height, const int width, const int pool_type) { const int nthreads = batch_size * channels * width; CUDA_1D_KERNEL_LOOP(index, nthreads) { int n_idx = index / (channels * width); // batch int w_idx = index % width; // width int c_idx = (index / width) % channels; // channels int offset_n = n_idx * channels * width * height; int offset_n_c = offset_n + c_idx * width * height; int direction = -1; // in [-1, 1], default for TopPool int index_start = height - 2; // default for TopPool // pool_type in [0, 1] if (pool_type == 0) { // TopPool // directly copy the most bottom value from input to output output[offset_n_c + (height - 1) * width + w_idx] = input[offset_n_c + (height - 1) * width + w_idx]; } else { // BottomPool // directly copy the most top value from input to output output[offset_n_c + w_idx] = input[offset_n_c + w_idx]; index_start = 1; direction = 1; } // do pool for (int h = index_start; h >= 0 && h < height; h += direction) { output[offset_n_c + h * width + w_idx] = max(output[offset_n_c + (h - direction) * width + w_idx], input[offset_n_c + h * width + w_idx]); } } } template <typename scalar_t> __global__ void left_right_pool_kernel(const scalar_t *input, scalar_t *output, const int batch_size, const int channels, const int height, const int width, const int pool_type) { const int nthreads = batch_size * channels * height; CUDA_1D_KERNEL_LOOP(index, nthreads) { int n_idx = index / (channels * height); // batch int h_idx = index % height; // height int c_idx = (index / height) % channels; // channels int offset_n = n_idx * channels * width * height; int offset_n_c = offset_n + c_idx * width * height; int offset_n_c_h = offset_n_c + h_idx * width; int direction = -1; // in [-1, 1], default for LeftPool int index_start = width - 2; // default for LeftPool // pool_type in [2, 3] if (pool_type == 2) { // LeftPool // directly copy the most right value from input to output output[offset_n_c_h + width - 1] = input[offset_n_c_h + width - 1]; } else { // RightPool // directly copy the most left value from input to output output[offset_n_c_h] = input[offset_n_c_h]; index_start = 1; direction = 1; } // do pool for (int w = index_start; w >= 0 && w < width; w += direction) { output[offset_n_c_h + w] = max(output[offset_n_c_h + w - direction], input[offset_n_c_h + w]); } } } template <typename scalar_t> void CornerPoolForwardLauncher(const scalar_t *input, scalar_t *output, const int batch_size, const int channels, const int height, const int width, const int pool_type, hipStream_t stream) { int nthreads = -1, col_block = -1; switch (pool_type) { case 0: case 1: nthreads = batch_size * channels * width; col_block = DIVUP(nthreads, THREADS_PER_BLOCK); hipLaunchKernelGGL(( top_bottom_pool_kernel<scalar_t>) , dim3(col_block), dim3(THREADS_PER_BLOCK), 0, stream, input, output, batch_size, channels, height, width, pool_type); break; case 2: case 3: nthreads = batch_size * channels * height; col_block = DIVUP(nthreads, THREADS_PER_BLOCK); hipLaunchKernelGGL(( left_right_pool_kernel<scalar_t>) , dim3(col_block), dim3(THREADS_PER_BLOCK), 0, stream, input, output, batch_size, channels, height, width, pool_type); break; } } void CornerPoolForwardLauncher_float(const float *input, float *output, const int batch_size, const int channels, const int height, const int width, const int pool_type, hipStream_t stream) { CornerPoolForwardLauncher<float>(input, output, batch_size, channels, height, width, pool_type, stream); }
ad2fd13a8a60b90e824dbc45eb915449b159bc29.cu
#include "common_cuda_helper.hpp" #include "trt_cuda_helper.cuh" #include "trt_plugin_helper.hpp" template <typename scalar_t> __global__ void top_bottom_pool_kernel(const scalar_t *input, scalar_t *output, const int batch_size, const int channels, const int height, const int width, const int pool_type) { const int nthreads = batch_size * channels * width; CUDA_1D_KERNEL_LOOP(index, nthreads) { int n_idx = index / (channels * width); // batch int w_idx = index % width; // width int c_idx = (index / width) % channels; // channels int offset_n = n_idx * channels * width * height; int offset_n_c = offset_n + c_idx * width * height; int direction = -1; // in [-1, 1], default for TopPool int index_start = height - 2; // default for TopPool // pool_type in [0, 1] if (pool_type == 0) { // TopPool // directly copy the most bottom value from input to output output[offset_n_c + (height - 1) * width + w_idx] = input[offset_n_c + (height - 1) * width + w_idx]; } else { // BottomPool // directly copy the most top value from input to output output[offset_n_c + w_idx] = input[offset_n_c + w_idx]; index_start = 1; direction = 1; } // do pool for (int h = index_start; h >= 0 && h < height; h += direction) { output[offset_n_c + h * width + w_idx] = max(output[offset_n_c + (h - direction) * width + w_idx], input[offset_n_c + h * width + w_idx]); } } } template <typename scalar_t> __global__ void left_right_pool_kernel(const scalar_t *input, scalar_t *output, const int batch_size, const int channels, const int height, const int width, const int pool_type) { const int nthreads = batch_size * channels * height; CUDA_1D_KERNEL_LOOP(index, nthreads) { int n_idx = index / (channels * height); // batch int h_idx = index % height; // height int c_idx = (index / height) % channels; // channels int offset_n = n_idx * channels * width * height; int offset_n_c = offset_n + c_idx * width * height; int offset_n_c_h = offset_n_c + h_idx * width; int direction = -1; // in [-1, 1], default for LeftPool int index_start = width - 2; // default for LeftPool // pool_type in [2, 3] if (pool_type == 2) { // LeftPool // directly copy the most right value from input to output output[offset_n_c_h + width - 1] = input[offset_n_c_h + width - 1]; } else { // RightPool // directly copy the most left value from input to output output[offset_n_c_h] = input[offset_n_c_h]; index_start = 1; direction = 1; } // do pool for (int w = index_start; w >= 0 && w < width; w += direction) { output[offset_n_c_h + w] = max(output[offset_n_c_h + w - direction], input[offset_n_c_h + w]); } } } template <typename scalar_t> void CornerPoolForwardLauncher(const scalar_t *input, scalar_t *output, const int batch_size, const int channels, const int height, const int width, const int pool_type, cudaStream_t stream) { int nthreads = -1, col_block = -1; switch (pool_type) { case 0: case 1: nthreads = batch_size * channels * width; col_block = DIVUP(nthreads, THREADS_PER_BLOCK); top_bottom_pool_kernel<scalar_t> <<<col_block, THREADS_PER_BLOCK, 0, stream>>>( input, output, batch_size, channels, height, width, pool_type); break; case 2: case 3: nthreads = batch_size * channels * height; col_block = DIVUP(nthreads, THREADS_PER_BLOCK); left_right_pool_kernel<scalar_t> <<<col_block, THREADS_PER_BLOCK, 0, stream>>>( input, output, batch_size, channels, height, width, pool_type); break; } } void CornerPoolForwardLauncher_float(const float *input, float *output, const int batch_size, const int channels, const int height, const int width, const int pool_type, cudaStream_t stream) { CornerPoolForwardLauncher<float>(input, output, batch_size, channels, height, width, pool_type, stream); }
a13d8b34ef2c6ab3de14b04aa0d94ea50cd7d74a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" // CUDA #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "rocblas.h" #include "cudaCommon.h" // static parameters void printAboutDevice(hipDeviceProp_t info, int devno) { printf("Information on device %i (%s):\n", devno, info.name); printf("\tCompute capability: %i.%i\n", info.major, info.minor); printf("\tGlobal mem = %iMB\n", (int)(info.totalGlobalMem/1048576)); printf("\tConstant mem = %iKB\n", (int)(info.totalConstMem/1024)); printf("\tShared mem/block = %iKB\n", (int)(info.sharedMemPerBlock/1024)); printf("\tRegisters/block = %i\n", info.regsPerBlock); printf("\tWarp size = %i\n", info.warpSize); printf("\tMemory pitch = %i\n", (int)info.memPitch); printf("\tMax threads/block = %i\n", info.maxThreadsPerBlock); printf("\tMax block dim = %ix%ix%i\n", info.maxThreadsDim[0], info.maxThreadsDim[1], info.maxThreadsDim[2]); printf("\tMax grid dim = %ix%ix%i\n", info.maxGridSize[0], info.maxGridSize[1], info.maxGridSize[2]); printf("\tClock rate = %iMhz\n",(int)((float)info.clockRate / 1000.0)); printf("\t# of multiprocs = %i\n", info.multiProcessorCount); printf("\tExecution timeout? %s\n", info.kernelExecTimeoutEnabled == 1 ? "yes" : "no"); printf("\tSupports host mmap? %s\n", info.canMapHostMemory == 1? "yes" : "no"); // size_t textureAlignment; // int deviceOverlap; // int integrated; // int computeMode; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if(nrhs < 1) mexErrMsgTxt("Error: needs arguments; GPU_ctrl('help') for help.\n"); int nDevices; hipGetDeviceCount(&nDevices); int returnCode = CHECK_CUDA_ERROR("GPU_ctrl(): hipGetDeviceCount doesn't even work.\nIf occurring apropos of nothing, this appears to occur if Matlab was open,\nhad acquired a context, and the system was ACPI suspended.\n"); if(returnCode != SUCCESSFUL) DROP_MEX_ERROR("GPU_ctrl failed at entry."); mexLock(); // It would be Bad if this disappeared on us at any point. mxClassID argtype = mxGetClassID(prhs[0]); if(argtype != mxCHAR_CLASS) mexErrMsgTxt("See GPU_ctrl('help').\n"); int slen = mxGetNumberOfElements(prhs[0]); char c[slen+1]; int stat = mxGetString(prhs[0], &c[0], slen+1); if(strcmp(c, "help") == 0) { printf("\%=== GPU_ctrl commands ===\%\n GPU_ctrl('help'): This message\n\ GPU_ctrl('info, [device #s]'): print device information (about specific devices)\n\ GPU_ctrl('peers', [1/0]): Print matrix of hipDeviceCanAccessPeer results, 1 or 0 to enable/disable it.\n\ GPU_ctrl('reset'): If CUDA crashes, reset it so we can reinitialize without restarting Matlab.\n\ m = GPU_ctrl('memory'): m[i,:] = [free, total] on device i.\n"); return; } if(strcmp(c, "info") == 0) { if(nlhs > 0) mexErrMsgTxt("Error: No return value from GPU_ctrl('info')\n"); int nDevices; hipGetDeviceCount(&nDevices); hipDeviceProp_t devprops; int q; for(q = 0; q < nDevices; q++) { hipGetDeviceProperties(&devprops, q); printAboutDevice(devprops, q); } } if(strcmp(c, "peers") == 0) { // print accessibility matrix int nDevices; hipGetDeviceCount(&nDevices); hipError_t goofed; // turn it on/off if given to if(nrhs > 1) { mxArray *getGM[1]; int stupid = mexCallMATLAB(1, &getGM[0], 0, NULL, "GPUManager.getInstance"); double *f = mxGetPr(prhs[1]); int doit = (int)*f; // FIXME: loop over only gm.deviceList devices int u, v, IseeU; mxArray *mxDevList = mxGetProperty(getGM[0], 0, "deviceList"); double *devList = mxGetPr(mxDevList); int numList = mxGetNumberOfElements(mxDevList); for(u = 0; u < numList; u++) { for(v = 0; v < numList; v++) { if(u == v) continue; int uhat = (int)devList[u]; int vhat = (int)devList[v]; hipDeviceCanAccessPeer(&IseeU, uhat, vhat); if(IseeU) { hipSetDevice(uhat); if(doit == 1) hipDeviceEnablePeerAccess(vhat, 0); if(doit == 0) hipDeviceDisablePeerAccess(vhat); goofed = hipGetLastError(); if(goofed == hipErrorPeerAccessAlreadyEnabled) { printf("Oops: Peer access apparently already on. Returning...\n"); return; } if(goofed == hipErrorPeerAccessNotEnabled) { printf("Oops: Peer access already disabled. Returning...\n"); return; } } } } } else { // We have no request to turn it on/off, just gather information int IseeU; int u,v; int justprint = (nlhs == 0); double *outputMatrix = NULL; if(justprint) { printf("Device accessibility: symmetric matrix M_ij = device i can access device j\n"); printf(" | "); for(u = 0; u < nDevices; u++) { printf("%i ", u); } printf("\n"); } else { mwSize matSize = nDevices; plhs[0] = mxCreateDoubleMatrix(matSize, matSize, mxREAL); outputMatrix = mxGetPr(plhs[0]); } for(u = 0; u < nDevices; u++) { if(justprint) printf("%i | ", u); for(v = 0; v < nDevices; v++) { hipDeviceCanAccessPeer(&IseeU, u, v); if(justprint) { if(u == v) { printf("x "); } else { printf("%i ", IseeU); } } else { outputMatrix[u+nDevices*v] = (double)IseeU; } } printf("\n"); } } } if(strcmp(c, "reset") == 0) { int nDevices; hipGetDeviceCount(&nDevices); int i; for(i = 0; i < nDevices; i++) { printf("Resetting device %i... ", i); hipSetDevice(i); hipDeviceReset(); printf("Done.\n"); returnCode = CHECK_CUDA_ERROR("device reset"); if(returnCode != SUCCESSFUL) break; } } if(strcmp(c, "memory") == 0) { size_t freemem; size_t totalmem; //hipError_t fail; mwSize dims[3]; dims[0] = nDevices; dims[1] = 2; dims[2] = 1; plhs[0] = mxCreateNumericArray(2, dims, mxDOUBLE_CLASS, mxREAL); double *d = mxGetPr(plhs[0]); int i; for(i = 0; i < nDevices; i++) { hipSetDevice(i); returnCode = CHECK_CUDA_ERROR("hipSetDevice()"); if(returnCode != SUCCESSFUL) break; hipMemGetInfo(&freemem, &totalmem); returnCode = CHECK_CUDA_ERROR("hipMemGetInfo()"); if(returnCode != SUCCESSFUL) break; d[i] = freemem; d[i+nDevices] = totalmem; } } if(strcmp(c,"createStreams") == 0) { if(nrhs < 2) { printf("Must receive list of devices to get new streams: streams = GPU_ctrl('createStreams',[0 1]) e.g."); return; } if(nlhs < 1) { printf("Must be able to return hipStream_t *: streams = GPU_ctrl('createStreams',[0 1]) e.g."); return; } double *d = mxGetPr(prhs[1]); int i; int imax = mxGetNumberOfElements(prhs[1]); mwSize dims[2]; dims[0] = imax; dims[1] = 1; plhs[0] = mxCreateNumericArray(2, dims, mxINT64_CLASS, mxREAL); int64_t *out = (int64_t *)mxGetData(plhs[0]); hipStream_t pstream; for(i = 0; i < imax; i++) { hipSetDevice((int)d[i]); hipStreamCreate(&pstream); out[i] = (uint64_t)pstream; } } if(strcmp(c,"destroyStreams") == 0) { if(nrhs < 3) { printf("Call is: GPU_ctrl('destroyStreams', [device list], streams"); return; } hipStream_t *streams = (hipStream_t *)mxGetData(prhs[0]); int numd; double *d = mxGetPr(prhs[1]); numd = (int)mxGetNumberOfElements(prhs[1]); uint64_t *s = (uint64_t *)mxGetData(prhs[2]); int nums = (int)mxGetNumberOfElements(prhs[2]); int nspd = nums / numd; int i, j; for(i = 0; i < numd; i++) { hipSetDevice((int)d[i]); for(j = 0; j < nspd; j++) { hipStreamDestroy((hipStream_t)s[numd*j + i]); } } } if(returnCode != SUCCESSFUL) { DROP_MEX_ERROR("Operation of GPU_ctrl failed: Causing interpreter error."); } }
a13d8b34ef2c6ab3de14b04aa0d94ea50cd7d74a.cu
#include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" // CUDA #include "cuda.h" #include "cuda_runtime.h" #include "cublas.h" #include "cudaCommon.h" // static parameters void printAboutDevice(cudaDeviceProp info, int devno) { printf("Information on device %i (%s):\n", devno, info.name); printf("\tCompute capability: %i.%i\n", info.major, info.minor); printf("\tGlobal mem = %iMB\n", (int)(info.totalGlobalMem/1048576)); printf("\tConstant mem = %iKB\n", (int)(info.totalConstMem/1024)); printf("\tShared mem/block = %iKB\n", (int)(info.sharedMemPerBlock/1024)); printf("\tRegisters/block = %i\n", info.regsPerBlock); printf("\tWarp size = %i\n", info.warpSize); printf("\tMemory pitch = %i\n", (int)info.memPitch); printf("\tMax threads/block = %i\n", info.maxThreadsPerBlock); printf("\tMax block dim = %ix%ix%i\n", info.maxThreadsDim[0], info.maxThreadsDim[1], info.maxThreadsDim[2]); printf("\tMax grid dim = %ix%ix%i\n", info.maxGridSize[0], info.maxGridSize[1], info.maxGridSize[2]); printf("\tClock rate = %iMhz\n",(int)((float)info.clockRate / 1000.0)); printf("\t# of multiprocs = %i\n", info.multiProcessorCount); printf("\tExecution timeout? %s\n", info.kernelExecTimeoutEnabled == 1 ? "yes" : "no"); printf("\tSupports host mmap? %s\n", info.canMapHostMemory == 1? "yes" : "no"); // size_t textureAlignment; // int deviceOverlap; // int integrated; // int computeMode; } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { if(nrhs < 1) mexErrMsgTxt("Error: needs arguments; GPU_ctrl('help') for help.\n"); int nDevices; cudaGetDeviceCount(&nDevices); int returnCode = CHECK_CUDA_ERROR("GPU_ctrl(): cudaGetDeviceCount doesn't even work.\nIf occurring apropos of nothing, this appears to occur if Matlab was open,\nhad acquired a context, and the system was ACPI suspended.\n"); if(returnCode != SUCCESSFUL) DROP_MEX_ERROR("GPU_ctrl failed at entry."); mexLock(); // It would be Bad if this disappeared on us at any point. mxClassID argtype = mxGetClassID(prhs[0]); if(argtype != mxCHAR_CLASS) mexErrMsgTxt("See GPU_ctrl('help').\n"); int slen = mxGetNumberOfElements(prhs[0]); char c[slen+1]; int stat = mxGetString(prhs[0], &c[0], slen+1); if(strcmp(c, "help") == 0) { printf("\%=== GPU_ctrl commands ===\%\n GPU_ctrl('help'): This message\n\ GPU_ctrl('info, [device #s]'): print device information (about specific devices)\n\ GPU_ctrl('peers', [1/0]): Print matrix of cudaDeviceCanAccessPeer results, 1 or 0 to enable/disable it.\n\ GPU_ctrl('reset'): If CUDA crashes, reset it so we can reinitialize without restarting Matlab.\n\ m = GPU_ctrl('memory'): m[i,:] = [free, total] on device i.\n"); return; } if(strcmp(c, "info") == 0) { if(nlhs > 0) mexErrMsgTxt("Error: No return value from GPU_ctrl('info')\n"); int nDevices; cudaGetDeviceCount(&nDevices); cudaDeviceProp devprops; int q; for(q = 0; q < nDevices; q++) { cudaGetDeviceProperties(&devprops, q); printAboutDevice(devprops, q); } } if(strcmp(c, "peers") == 0) { // print accessibility matrix int nDevices; cudaGetDeviceCount(&nDevices); cudaError_t goofed; // turn it on/off if given to if(nrhs > 1) { mxArray *getGM[1]; int stupid = mexCallMATLAB(1, &getGM[0], 0, NULL, "GPUManager.getInstance"); double *f = mxGetPr(prhs[1]); int doit = (int)*f; // FIXME: loop over only gm.deviceList devices int u, v, IseeU; mxArray *mxDevList = mxGetProperty(getGM[0], 0, "deviceList"); double *devList = mxGetPr(mxDevList); int numList = mxGetNumberOfElements(mxDevList); for(u = 0; u < numList; u++) { for(v = 0; v < numList; v++) { if(u == v) continue; int uhat = (int)devList[u]; int vhat = (int)devList[v]; cudaDeviceCanAccessPeer(&IseeU, uhat, vhat); if(IseeU) { cudaSetDevice(uhat); if(doit == 1) cudaDeviceEnablePeerAccess(vhat, 0); if(doit == 0) cudaDeviceDisablePeerAccess(vhat); goofed = cudaGetLastError(); if(goofed == cudaErrorPeerAccessAlreadyEnabled) { printf("Oops: Peer access apparently already on. Returning...\n"); return; } if(goofed == cudaErrorPeerAccessNotEnabled) { printf("Oops: Peer access already disabled. Returning...\n"); return; } } } } } else { // We have no request to turn it on/off, just gather information int IseeU; int u,v; int justprint = (nlhs == 0); double *outputMatrix = NULL; if(justprint) { printf("Device accessibility: symmetric matrix M_ij = device i can access device j\n"); printf(" | "); for(u = 0; u < nDevices; u++) { printf("%i ", u); } printf("\n"); } else { mwSize matSize = nDevices; plhs[0] = mxCreateDoubleMatrix(matSize, matSize, mxREAL); outputMatrix = mxGetPr(plhs[0]); } for(u = 0; u < nDevices; u++) { if(justprint) printf("%i | ", u); for(v = 0; v < nDevices; v++) { cudaDeviceCanAccessPeer(&IseeU, u, v); if(justprint) { if(u == v) { printf("x "); } else { printf("%i ", IseeU); } } else { outputMatrix[u+nDevices*v] = (double)IseeU; } } printf("\n"); } } } if(strcmp(c, "reset") == 0) { int nDevices; cudaGetDeviceCount(&nDevices); int i; for(i = 0; i < nDevices; i++) { printf("Resetting device %i... ", i); cudaSetDevice(i); cudaDeviceReset(); printf("Done.\n"); returnCode = CHECK_CUDA_ERROR("device reset"); if(returnCode != SUCCESSFUL) break; } } if(strcmp(c, "memory") == 0) { size_t freemem; size_t totalmem; //cudaError_t fail; mwSize dims[3]; dims[0] = nDevices; dims[1] = 2; dims[2] = 1; plhs[0] = mxCreateNumericArray(2, dims, mxDOUBLE_CLASS, mxREAL); double *d = mxGetPr(plhs[0]); int i; for(i = 0; i < nDevices; i++) { cudaSetDevice(i); returnCode = CHECK_CUDA_ERROR("cudaSetDevice()"); if(returnCode != SUCCESSFUL) break; cudaMemGetInfo(&freemem, &totalmem); returnCode = CHECK_CUDA_ERROR("cudaMemGetInfo()"); if(returnCode != SUCCESSFUL) break; d[i] = freemem; d[i+nDevices] = totalmem; } } if(strcmp(c,"createStreams") == 0) { if(nrhs < 2) { printf("Must receive list of devices to get new streams: streams = GPU_ctrl('createStreams',[0 1]) e.g."); return; } if(nlhs < 1) { printf("Must be able to return cudaStream_t *: streams = GPU_ctrl('createStreams',[0 1]) e.g."); return; } double *d = mxGetPr(prhs[1]); int i; int imax = mxGetNumberOfElements(prhs[1]); mwSize dims[2]; dims[0] = imax; dims[1] = 1; plhs[0] = mxCreateNumericArray(2, dims, mxINT64_CLASS, mxREAL); int64_t *out = (int64_t *)mxGetData(plhs[0]); cudaStream_t pstream; for(i = 0; i < imax; i++) { cudaSetDevice((int)d[i]); cudaStreamCreate(&pstream); out[i] = (uint64_t)pstream; } } if(strcmp(c,"destroyStreams") == 0) { if(nrhs < 3) { printf("Call is: GPU_ctrl('destroyStreams', [device list], streams"); return; } cudaStream_t *streams = (cudaStream_t *)mxGetData(prhs[0]); int numd; double *d = mxGetPr(prhs[1]); numd = (int)mxGetNumberOfElements(prhs[1]); uint64_t *s = (uint64_t *)mxGetData(prhs[2]); int nums = (int)mxGetNumberOfElements(prhs[2]); int nspd = nums / numd; int i, j; for(i = 0; i < numd; i++) { cudaSetDevice((int)d[i]); for(j = 0; j < nspd; j++) { cudaStreamDestroy((cudaStream_t)s[numd*j + i]); } } } if(returnCode != SUCCESSFUL) { DROP_MEX_ERROR("Operation of GPU_ctrl failed: Causing interpreter error."); } }
77acb528a9b1bceae71174219cd569d4fd3a81d5.hip
// !!! This is a file automatically generated by hipify!!! /**************************************************************************** Similar to factorise_3_0 but solves the problem with 4 threads using a block method for search space partitioning. It is included here to accompany a CUDA version of the program. Compile with: nvcc -o pswcuda pswcuda.cu Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> __device__ int is_a_match(char *password){ char pw1[]="BI1100"; char pw2[]="KR2200"; char pw3[]="RA4540"; char pw4[]="PU6080"; char *p1 = password; char *p2 = password; char *p3 = password; char *p4 = password; char *w1 = pw1; char *w2 = pw2; char *w3 = pw3; char *w4 = pw4; while(*p1 == *w1){ if(*p1 == '\0'){ printf("Password found : %s\n", pw1); return 1; } p1++; w1++; } while(*p2 == *w2){ if(*p2 == '\0'){ printf("Password found : %s\n", pw2); return 1; } p2++; w2++; } while(*p3 == *w3){ if(*p3 == '\0'){ printf("Password found : %s\n", pw3); return 1; } p3++; w3++; } while(*p4 == *w4){ if(*p4 == '\0'){ printf("Password found : %s\n", pw4); return 1; } p4++; w4++; } return 0; } __global__ void kernel(){ char alphabet[26] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'S', 'T','U', 'V', 'W', 'X', 'Y', 'Z'}; char numbers[10] = {'0','1','2','3','4','5','6','7','8','9'}; char password[7]; password[6] = '\0'; int a, b, c, d; for(a=0;a<10;a++){ for(b=0;b<10;b++){ for(c=0;c<10;c++){ for(d=0;d<10;d++){ password[0] = alphabet[blockIdx.x]; password[1] = alphabet[threadIdx.x]; password[2] = numbers[a]; password[3] = numbers[b]; password[4] = numbers[c]; password[5] = numbers[d]; if(is_a_match(password)){ printf("Password found: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char *argv[]) { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( kernel) , dim3(26), dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
77acb528a9b1bceae71174219cd569d4fd3a81d5.cu
/**************************************************************************** Similar to factorise_3_0 but solves the problem with 4 threads using a block method for search space partitioning. It is included here to accompany a CUDA version of the program. Compile with: nvcc -o pswcuda pswcuda.cu Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ #include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> __device__ int is_a_match(char *password){ char pw1[]="BI1100"; char pw2[]="KR2200"; char pw3[]="RA4540"; char pw4[]="PU6080"; char *p1 = password; char *p2 = password; char *p3 = password; char *p4 = password; char *w1 = pw1; char *w2 = pw2; char *w3 = pw3; char *w4 = pw4; while(*p1 == *w1){ if(*p1 == '\0'){ printf("Password found : %s\n", pw1); return 1; } p1++; w1++; } while(*p2 == *w2){ if(*p2 == '\0'){ printf("Password found : %s\n", pw2); return 1; } p2++; w2++; } while(*p3 == *w3){ if(*p3 == '\0'){ printf("Password found : %s\n", pw3); return 1; } p3++; w3++; } while(*p4 == *w4){ if(*p4 == '\0'){ printf("Password found : %s\n", pw4); return 1; } p4++; w4++; } return 0; } __global__ void kernel(){ char alphabet[26] = {'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'S', 'T','U', 'V', 'W', 'X', 'Y', 'Z'}; char numbers[10] = {'0','1','2','3','4','5','6','7','8','9'}; char password[7]; password[6] = '\0'; int a, b, c, d; for(a=0;a<10;a++){ for(b=0;b<10;b++){ for(c=0;c<10;c++){ for(d=0;d<10;d++){ password[0] = alphabet[blockIdx.x]; password[1] = alphabet[threadIdx.x]; password[2] = numbers[a]; password[3] = numbers[b]; password[4] = numbers[c]; password[5] = numbers[d]; if(is_a_match(password)){ printf("Password found: %s\n", password); } } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char *argv[]) { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26, 26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
28012bd77f906b1c5ced9a51b661e67f76c86828.hip
// !!! This is a file automatically generated by hipify!!! #include "Convolution.cuh" // Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT #ifndef WIN64 __device__ cufftCallbackLoadC myOwnCallbackPtr = cbComplexPointwiseMul; #endif void findBlockSize(long long iFrames, int M, size_t *blockSize, int *blockNum) { /*Finding block size/number*/ int myExp = ceil(log2((float)(iFrames + M))); while (pow(2, myExp) > INT_MAX) { myExp--; } size_t smallerBlockSize = pow(2, myExp); *blockNum = 1; size_t workspace; CHECK_CUFFT_ERRORS(hipfftEstimate1d(smallerBlockSize, HIPFFT_R2C, 2, &workspace)); /*Look for block size worth with 2 complex arrays Multiply by 4 to leave some room*/ while (getFreeSize() < workspace + (smallerBlockSize / 2 + 1) * 8L * 4L) { myExp--; smallerBlockSize = pow(2, myExp); (*blockNum)++; CHECK_CUFFT_ERRORS(hipfftEstimate1d(smallerBlockSize, HIPFFT_R2C, 2, &workspace)); } fprintf(stderr, "blockSize: %i\t numBlocks: %i\n", smallerBlockSize, *blockNum); *blockSize = smallerBlockSize; } void mismatchedConvolve(passable *p) { flags flag = p->type; long long paddedSize = p->paddedSize; float *d_ibuf = p->input->d_buf; float *d_rbuf = p->reverb->d_buf; /*Create forward FFT plan*/ hipfftHandle plan; CHECK_CUFFT_ERRORS(hipfftCreate(&plan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&plan, paddedSize, HIPFFT_R2C, 1)); /*Create inverse FFT plan*/ hipfftHandle outplan; CHECK_CUFFT_ERRORS(hipfftCreate(&outplan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&outplan, paddedSize, HIPFFT_C2R, 1)); /*Transform Input Signal*/ CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal *)d_ibuf, (hipfftComplex*)d_ibuf)); if (flag == stereo_mono) { Print("Transforming Ch 2 of input\n"); CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal *)d_ibuf + paddedSize, (hipfftComplex*)d_ibuf + paddedSize / 2 + 1)); } /*Transform Filter Signal*/ CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal*)d_rbuf, (hipfftComplex*)d_rbuf)); if (flag == mono_stereo) { Print("Transforming Ch 2 of reverb\n"); CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal *)d_rbuf + paddedSize, (hipfftComplex*)d_rbuf + paddedSize / 2 + 1)); } #if defined WIN64 || CB == 0 /*NO CB VERSION*/ /*CONVOLUTION*/ int blockSize = 256; int numBlocks = (paddedSize / 2 + 1 + blockSize - 1) / blockSize; if (flag == mono_stereo) { Print("Convolving & Inverse Transforming for stereo reverb\n"); ComplexPointwiseMul << < numBlocks, blockSize >> > ((hipfftComplex*)d_rbuf, (hipfftComplex*)d_ibuf, paddedSize / 2 + 1); ComplexPointwiseMul << < numBlocks, blockSize >> > ((hipfftComplex*)d_rbuf + paddedSize / 2 + 1, (hipfftComplex*)d_ibuf, paddedSize / 2 + 1); } else { ComplexPointwiseMul << < numBlocks, blockSize >> > ((hipfftComplex*)d_ibuf, (hipfftComplex*)d_rbuf, paddedSize / 2 + 1); ComplexPointwiseMul << < numBlocks, blockSize >> > ((hipfftComplex*)d_ibuf + paddedSize / 2 + 1, (hipfftComplex*)d_rbuf, paddedSize / 2 + 1); } #else /*Copy over the host copy of callback function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ if (flag == stereo_mono) { CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_rbuf)); } else { CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_ibuf)); } #endif if (flag == stereo_mono) { CHECK_CUFFT_ERRORS(hipfftExecC2R(outplan, (hipfftComplex*)d_ibuf, d_ibuf)); CHECK_CUFFT_ERRORS(hipfftExecC2R(outplan, (hipfftComplex*)d_ibuf + paddedSize / 2 + 1, d_ibuf + paddedSize)); } else { CHECK_CUFFT_ERRORS(hipfftExecC2R(outplan, (hipfftComplex*)d_rbuf, d_rbuf)); CHECK_CUFFT_ERRORS(hipfftExecC2R(outplan, (hipfftComplex*)d_rbuf + paddedSize / 2 + 1, d_rbuf + paddedSize)); } checkCudaErrors(hipfftDestroy(plan)); checkCudaErrors(hipfftDestroy(outplan)); } void convolve(float *d_ibuf, float *d_rbuf, long long paddedSize) { /*Create forward FFT plan*/ hipfftHandle plan; CHECK_CUFFT_ERRORS(hipfftCreate(&plan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&plan, paddedSize, HIPFFT_R2C, 1)); /*Create inverse FFT plan*/ hipfftHandle outplan; CHECK_CUFFT_ERRORS(hipfftCreate(&outplan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&outplan, paddedSize, HIPFFT_C2R, 1)); /*Transform Complex Signal*/ CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal*)d_ibuf, (hipfftComplex*)d_ibuf)); /*Transform Filter Signal*/ CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal*)d_rbuf, (hipfftComplex*)d_rbuf)); #if defined WIN64 || CB == 0 /*NO CB VERSION*/ /*CONVOLUTION*/ int blockSize = 256; int numBlocks = (paddedSize + blockSize - 1) / blockSize; ComplexPointwiseMul << < numBlocks, blockSize >> > ((hipfftComplex*)d_ibuf, (hipfftComplex*)d_rbuf, paddedSize / 2 + 1); getLastCudaError("Kernel execution failed [ ComplexPointwiseMul]"); #else /*Copy over the host copy of callback function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_rbuf)); #endif CHECK_CUFFT_ERRORS(hipfftExecC2R(outplan, (hipfftComplex*)d_ibuf, (hipfftReal*)d_ibuf)); checkCudaErrors(hipfftDestroy(plan)); checkCudaErrors(hipfftDestroy(outplan)); } /*Assumes that d_buf contains paddedSize * 2 elements. Input is in first half, filter is in second half, and both are padded*/ void convolveBatched(float *d_buf, long long paddedSize) { float *d_rbuf = d_buf + paddedSize + 2; /*Create forward FFT plan*/ hipfftHandle plan; CHECK_CUFFT_ERRORS(hipfftCreate(&plan)); /*hipfftResult hipfftPlanMany(hipfftHandle *plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, hipfftType type, int batch);*/ /*stride = skip length. Ex 1 = every element, 2 = every other element*/ /*use for interleaving???*/ /*idist/odist is space between batches of transforms*/ /*need to check if odist is in terms of complex numbers or floats*/ /*inembed/onembed are for 2D/3D, num elements per dimension*/ int n = paddedSize; CHECK_CUFFT_ERRORS( hipfftPlanMany(&plan, 1, &n, &n, 1, n + 2, &n, 1, n / 2 + 1, HIPFFT_R2C, 2) ) /*Create inverse FFT plan*/ hipfftHandle outplan; CHECK_CUFFT_ERRORS(hipfftCreate(&outplan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&outplan, paddedSize, HIPFFT_C2R, 1)); /*Transform Complex Signal*/ CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal*)d_buf, (hipfftComplex*)d_buf)); #if defined WIN64 || CB == 0 /*NO CB VERSION*/ /*CONVOLUTION*/ int blockSize = 256; int numBlocks = (paddedSize + blockSize - 1) / blockSize; ComplexPointwiseMul << < numBlocks, blockSize >> > ((hipfftComplex*)d_buf, (hipfftComplex*)d_rbuf, paddedSize / 2 + 1); getLastCudaError("Kernel execution failed [ ComplexPointwiseMul]"); #else /*Copy over the host copy of callback function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_rbuf)); #endif CHECK_CUFFT_ERRORS(hipfftExecC2R(outplan, (hipfftComplex*)d_buf, (hipfftReal*)d_buf)); checkCudaErrors(hipfftDestroy(plan)); checkCudaErrors(hipfftDestroy(outplan)); } void overlapAdd(float *d_ibuf, hipfftComplex *d_rbuf, long long iFrames, long long M, long long blockSize, int blockNum, hipfftHandle plan, hipfftHandle outplan) { float *d_block; long long L = blockSize - M; int numThreads = 256; int numBlocks = (M + numThreads - 1) / numThreads; checkCudaErrors(hipMalloc(&d_block, (blockSize / 2 + 1) * sizeof(hipfftComplex))); for (int blockNo = 0; blockNo < blockNum; blockNo++) { long long cpyAmount = L; if (blockNo == blockNum && iFrames != cpyAmount) { cpyAmount = iFrames % L; } /*1/5/11/17 - Copy buf(N * L, L) -> sig[0]. cpyAmount becomes R at the end. N = 0 initially*/ //fprintf(stderr, "Copy(block, obuf[%'i], %'i)\n", L * blockNo, cpyAmount); checkCudaErrors(hipMemcpy(d_block, &d_ibuf[L * blockNo], cpyAmount * sizeof(float), hipMemcpyDeviceToDevice)); if (blockNo != 0) { /*6/12/18 - Copy sig(L, M) -> buf[N * L]*/ //fprintf(stderr, "Copy(obuf[%'i], block[%'i], %'i)\n", L * blockNo, L, M); checkCudaErrors(hipMemcpy(&d_ibuf[L * blockNo], &d_block[L], M * sizeof(float), hipMemcpyDeviceToDevice)); } /*2/7/13/19 - Pad sig(L, M) with 0's, cpyAmount becomes R at the end*/ fillWithZeroes(&d_block, cpyAmount, blockSize); /*Transform signal*/ CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal *)d_block, (hipfftComplex*)d_block)); #if defined WIN64 || CB == 0 /*CONVOLUTION*/ /*3/8/14/20*/ numBlocks = (blockSize / 2 + numThreads) / numThreads; ComplexPointwiseMul << < numBlocks, numThreads >> > ((hipfftComplex*)d_block, (hipfftComplex*)d_rbuf, blockSize / 2 + 1); getLastCudaError("Kernel execution failed [ ComplexPointwiseMul]"); #endif /*IFFT*/ CHECK_CUFFT_ERRORS(hipfftExecC2R(outplan, (hipfftComplex*)d_block, (hipfftReal*)d_block)); if (blockNo != 0) { /* 9/15/21 - Point-wise add sig(0,M) + buf[N*L]*/ PointwiseAdd << <numBlocks, numThreads >> > ((float*)d_block, &d_ibuf[blockNo * L], M); } checkCudaErrors(hipDeviceSynchronize()); /*Corner case where only one block*/ if (blockNo == 0 && blockNo == blockNum - 1) { checkCudaErrors(hipMemcpy(d_ibuf, d_block, (cpyAmount + M) * sizeof(float), hipMemcpyDeviceToDevice)); break; } /*Initial case*/ if (blockNo == 0) { /*4 - Copy sig(0,L) -> buf[0]*/ checkCudaErrors(hipMemcpy(d_ibuf, d_block, L * sizeof(float), hipMemcpyDeviceToDevice)); } /*Last case*/ if (blockNo == blockNum - 1) { //fprintf(stderr, "Copy(obuf[%'i], block[%'i], %'i)\n", blockNo * L + M, M, cpyAmount); checkCudaErrors(hipMemcpy(&d_ibuf[blockNo * L + M], &d_block[M], cpyAmount * sizeof(float), hipMemcpyDeviceToDevice)); } /*Every other case*/ if (blockNo != 0 && blockNo < blockNum) { /*10/16 - Copy sig(M, L-M) -> buf[N * L + M]*/ checkCudaErrors(hipMemcpy(&d_ibuf[blockNo * L + M], &d_block[M], (L - M) * sizeof(float), hipMemcpyDeviceToDevice)); } } checkCudaErrors(hipFree(d_block)); } float *blockConvolution(passable *p) { float *d_ibuf = p->input->d_buf; float *rbuf = p->reverb->buf; hipfftComplex *d_filter_complex; float *d_obuf = d_ibuf, *obuf; long long rFrames = p->reverb->frames; long long iFrames = p->input->frames; long long oFrames = rFrames + iFrames - 1; flags flag = p->type; int oCh = flag == mono_mono ? 1 : 2; float minmax, minmax2; hipEvent_t start, stop; int M = rFrames - 1; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); size_t blockSize = iFrames; int blockNum = 0; /*Find block size and store in blockSize and blockNum*/ findBlockSize(iFrames, M, &blockSize, &blockNum); /*Allocating memory for output*/ Print("Allocating memory for output\n"); checkCudaErrors(hipHostMalloc((void**)&obuf, oFrames * oCh * sizeof(float))); /*Find peak of input signal*/ Print("Finding peak of input signal\n"); minmax = DExtrema(d_ibuf, oFrames * p->input->channels); /*TRANSFORMING FILTER*/ /*Allocating Memory*/ Print("Allocating memory\n"); int ch = p->reverb->channels; checkCudaErrors(hipMalloc(&d_filter_complex, (blockSize / 2 + 1) * ch * sizeof(hipfftComplex))); /*Block/Thread sizes for kernels*/ int numThreads = 256; int numBlocks = (blockSize + 2 - rFrames + numThreads - 1) / numThreads; hipStream_t stream[4]; for (int i = 0; i < 4; i++) { checkCudaErrors(hipStreamCreate(&stream[i])); } /* Copy over filter */ Print("Copying over filter\n"); FillWithZeros << <numBlocks, numThreads, 0, stream[0] >> > ((float*)d_filter_complex, rFrames, blockSize + 2); if (ch == 2) { FillWithZeros << <numBlocks, numThreads, 0, stream[1] >> > ((float*)d_filter_complex + blockSize + 2, rFrames, blockSize * 2 + 4); checkCudaErrors(hipMemcpyAsync((float*)d_filter_complex + blockSize + 2, rbuf + rFrames, rFrames * sizeof(float), hipMemcpyHostToDevice, stream[2])); } checkCudaErrors(hipMemcpyAsync((float*)d_filter_complex, rbuf, rFrames * sizeof(float), hipMemcpyHostToDevice, stream[3])); /*Create cuFFT plan*/ Print("Creating FFT plans\n"); hipfftHandle plan; CHECK_CUFFT_ERRORS(hipfftCreate(&plan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&plan, blockSize, HIPFFT_R2C, 1)); /*Plans*/ hipfftHandle outplan; CHECK_CUFFT_ERRORS(hipfftCreate(&outplan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&outplan, blockSize, HIPFFT_C2R, 1)); #if defined WIN64 || CB == 0 #else /*Create host pointer to CB Function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_filter_complex)); #endif for (int i = 0; i < 4; i++) { checkCudaErrors(hipStreamSynchronize(stream[i])); } checkCudaErrors(hipHostFree(rbuf)); /*Transform Filter*/ Print("Transforming filter\n"); CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal *)d_filter_complex, (hipfftComplex*)d_filter_complex)); if (ch == 2) { CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal *)d_filter_complex + blockSize + 2, (hipfftComplex*)d_filter_complex + blockSize / 2 + 1)); } /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_mono) { Print("stereo_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else { Print("mono_stereo Convolving\n"); checkCudaErrors(hipMemcpy(d_obuf + oFrames, d_obuf, oFrames * sizeof(float), hipMemcpyDeviceToDevice)); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } checkCudaErrors(hipFree(d_filter_complex)); CHECK_CUFFT_ERRORS(hipfftDestroy(plan)); CHECK_CUFFT_ERRORS(hipfftDestroy(outplan)); /*Find peak of output*/ Print("Find peak of output\n"); minmax2 = DExtrema(d_obuf, oFrames * oCh); float scale = minmax / minmax2; long long end = oFrames * oCh; fprintf(stderr, "end: %lli\n", end); /*Block/Thread sizes for kernels*/ blockSize = 512; numBlocks = (end + blockSize - 1) / blockSize; // RealFloatScale << < numBlocks, blockSize>> > (d_obuf, end, scale); // checkCudaErrors(hipMemcpy(obuf, d_obuf, end * sizeof(float), hipMemcpyDeviceToHost)); /*Asynchronous copy & scale */ const int nStreams = 4; int streamSize = (end + nStreams - 1) / nStreams; int streamBytes = streamSize * sizeof(float); numBlocks = (streamSize + blockSize - 1) / blockSize; Print("Scaling and copying\n"); for (int i = 0; i < nStreams; ++i) { long long offset = i * streamSize; /*Run scale kernel*/ RealFloatScaleConcurrent << < numBlocks, blockSize, 0, stream[i] >> > (d_obuf, end, streamSize, scale, offset); /*Copy device memory to host asynchronously*/ if (i == nStreams - 1) { streamBytes = sizeof(float) * (end - offset); } checkCudaErrors(hipMemcpyAsync(&obuf[offset], &d_obuf[offset], streamBytes, hipMemcpyDeviceToHost, stream[i])); } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "Time for GPU convolution: %f ms\n", milliseconds); checkCudaErrors(hipFree(d_obuf)); return obuf; } float *convolution(passable *p) { float *d_ibuf = p->input->d_buf; float *d_rbuf = p->reverb->d_buf; float *d_obuf = d_ibuf; float *obuf; flags flag = p->type; int oCh = flag == mono_mono ? 1 : 2; long long paddedSize = p->paddedSize; float minmax, minmax2; hipEvent_t start, stop; //printMe(p); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); /*Allocating host memory for output*/ Print("Allocating host memory for output\n"); checkCudaErrors(hipHostMalloc((void**)&obuf, paddedSize * oCh * sizeof(float))); /*Find peak of input signal*/ Print("Finding peak of input signal\n"); minmax = DExtrema(d_ibuf, paddedSize * p->input->channels); /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); convolve(d_ibuf, d_rbuf, paddedSize); //convolveBatched(d_ibuf, paddedSize); // not doing batched because it is very slightly slower (~20 ms) } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); convolve(d_ibuf, d_rbuf, paddedSize); convolve(d_ibuf + paddedSize, d_rbuf + paddedSize, paddedSize); } else { mismatchedConvolve(p); if (flag == mono_stereo) { d_obuf = d_rbuf; } } /*Find peak of output*/ Print("Find peak of output\n"); minmax2 = DExtrema(d_obuf, paddedSize * oCh); float scale = minmax / minmax2; long long end = paddedSize * oCh; /*Block/Thread sizes for kernels*/ int blockSize = 512; int numBlocks = (end + blockSize - 1) / blockSize; /*Asynchronous copy & scale */ const int nStreams = 4; int streamSize = (end + nStreams - 1) / nStreams; int streamBytes = streamSize * sizeof(float); numBlocks = (streamSize + blockSize - 1) / blockSize; /*Create streams*/ Print("Creating streams\n"); hipStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) { checkCudaErrors(hipStreamCreate(&stream[i])); } Print("Scaling and copying\n"); for (int i = 0; i < nStreams; ++i) { long long offset = i * streamSize; /*Run scale kernel*/ RealFloatScaleConcurrent << < numBlocks, blockSize, 0, stream[i] >> > (d_obuf, end, streamSize, scale, offset); /*Copy device memory to host asynchronously*/ if (i == nStreams - 1) { streamBytes = sizeof(float) * (end - offset); } checkCudaErrors(hipMemcpyAsync(&obuf[offset], &d_obuf[offset], streamBytes, hipMemcpyDeviceToHost, stream[i])); } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "Time for GPU convolution: %f ms\n", milliseconds); checkCudaErrors(hipFree(d_ibuf)); checkCudaErrors(hipFree(d_rbuf)); return obuf; } void blockProcess(passable* p) { float* d_ibuf = p->input->d_buf; float* rbuf = p->reverb->buf; hipfftComplex* d_filter_complex; float* d_obuf = d_ibuf; long long rFrames = p->reverb->frames; long long iFrames = p->input->frames; long long oFrames = rFrames + iFrames - 1; flags flag = p->type; int M = rFrames - 1; size_t blockSize = iFrames; int blockNum = 0; /*Find block size and store in blockSize and blockNum*/ findBlockSize(iFrames, M, &blockSize, &blockNum); /*TRANSFORMING FILTER*/ /*Allocating Memory*/ Print("Allocating memory\n"); int ch = p->reverb->channels; checkCudaErrors(hipMalloc(&d_filter_complex, (blockSize / 2 + 1) * ch * sizeof(hipfftComplex))); /*Block/Thread sizes for kernels*/ int numThreads = 256; int numBlocks = (blockSize + 2 - rFrames + numThreads - 1) / numThreads; hipStream_t stream[4]; for (int i = 0; i < 4; i++) { checkCudaErrors(hipStreamCreate(&stream[i])); } /* Copy over filter */ Print("Copying over filter\n"); FillWithZeros << <numBlocks, numThreads, 0, stream[0] >> > ((float*)d_filter_complex, rFrames, blockSize + 2); if (ch == 2) { FillWithZeros << <numBlocks, numThreads, 0, stream[1] >> > ((float*)d_filter_complex + blockSize + 2, rFrames, blockSize * 2 + 4); checkCudaErrors(hipMemcpyAsync((float*)d_filter_complex + blockSize + 2, rbuf + rFrames, rFrames * sizeof(float), hipMemcpyHostToDevice, stream[2])); } checkCudaErrors(hipMemcpyAsync((float*)d_filter_complex, rbuf, rFrames * sizeof(float), hipMemcpyHostToDevice, stream[3])); /*Create cuFFT plan*/ Print("Creating FFT plans\n"); hipfftHandle plan; CHECK_CUFFT_ERRORS(hipfftCreate(&plan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&plan, blockSize, HIPFFT_R2C, 1)); /*Plans*/ hipfftHandle outplan; CHECK_CUFFT_ERRORS(hipfftCreate(&outplan)); CHECK_CUFFT_ERRORS(hipfftPlan1d(&outplan, blockSize, HIPFFT_C2R, 1)); #if defined WIN64 || CB == 0 #else /*Create host pointer to CB Function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void**)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void**)&d_filter_complex)); #endif for (int i = 0; i < 4; i++) { checkCudaErrors(hipStreamSynchronize(stream[i])); } checkCudaErrors(hipHostFree(rbuf)); /*Transform Filter*/ Print("Transforming filter\n"); CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal*)d_filter_complex, (hipfftComplex*)d_filter_complex)); if (ch == 2) { CHECK_CUFFT_ERRORS(hipfftExecR2C(plan, (hipfftReal*)d_filter_complex + blockSize + 2, (hipfftComplex*)d_filter_complex + blockSize / 2 + 1)); } /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_mono) { Print("stereo_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else { Print("mono_stereo Convolving\n"); checkCudaErrors(hipMemcpy(d_obuf + oFrames, d_obuf, oFrames * sizeof(float), hipMemcpyDeviceToDevice)); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } checkCudaErrors(hipFree(d_filter_complex)); CHECK_CUFFT_ERRORS(hipfftDestroy(plan)); CHECK_CUFFT_ERRORS(hipfftDestroy(outplan)); } void convolutionPicker(passable* p) { } void process(passable* p) { float* d_ibuf = p->input->d_buf; float* d_rbuf = p->reverb->d_buf; float* d_obuf = d_ibuf; long long paddedSize = p->paddedSize; flags flag = p->type; /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); //convolve(d_ibuf, d_rbuf, paddedSize); convolveBatched(d_ibuf, paddedSize); } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); convolve(d_ibuf, d_rbuf, paddedSize); convolve(d_ibuf + paddedSize, d_rbuf + paddedSize, paddedSize); } else { mismatchedConvolve(p); if (flag == mono_stereo) { d_obuf = d_rbuf; } } } void asyncCopyScale(passable* p, float *obuf, long long end, float scale) { float* d_obuf = p->input->d_buf; /*Block/Thread sizes for kernels*/ int blockSize = 512; int numBlocks = (end + blockSize - 1) / blockSize; /*Asynchronous copy & scale */ const int nStreams = 4; int streamSize = (end + nStreams - 1) / nStreams; int streamBytes = streamSize * sizeof(float); numBlocks = (streamSize + blockSize - 1) / blockSize; /*Create streams*/ Print("Creating streams\n"); hipStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) { checkCudaErrors(hipStreamCreate(&stream[i])); } Print("Scaling and copying\n"); for (int i = 0; i < nStreams; ++i) { long long offset = i * streamSize; /*Run scale kernel*/ RealFloatScaleConcurrent << < numBlocks, blockSize, 0, stream[i] >> > (d_obuf, end, streamSize, scale, offset); /*Copy device memory to host asynchronously*/ if (i == nStreams - 1) { streamBytes = sizeof(float) * (end - offset); } checkCudaErrors(hipMemcpyAsync(&obuf[offset], &d_obuf[offset], streamBytes, hipMemcpyDeviceToHost, stream[i])); } } float* convolutionWrapper(passable* p, bool blockProcessingOn) { float* d_ibuf = p->input->d_buf; float* d_rbuf = p->reverb->d_buf; float* d_obuf = d_ibuf; float* obuf; flags flag = p->type; int oCh = flag == mono_mono ? 1 : 2; long long paddedSize = p->paddedSize; float minmax, minmax2; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); /*Allocating host memory for output*/ Print("Allocating host memory for output\n"); checkCudaErrors(hipHostMalloc((void**)&obuf, paddedSize * oCh * sizeof(float))); /*Find peak of input signal*/ Print("Finding peak of input signal\n"); minmax = DExtrema(d_ibuf, paddedSize * oCh); /*Performing Convolution*/ if (blockProcessingOn) { blockProcess(p); } else { process(p); } /*Find peak of output*/ Print("Find peak of output\n"); minmax2 = DExtrema(d_obuf, paddedSize * oCh); float scale = minmax / minmax2; long long end = paddedSize * oCh; asyncCopyScale(p, obuf, end, scale); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "Time for GPU convolution: %f ms\n", milliseconds); checkCudaErrors(hipFree(d_ibuf)); checkCudaErrors(hipFree(d_rbuf)); return obuf; }
28012bd77f906b1c5ced9a51b661e67f76c86828.cu
#include "Convolution.cuh" // Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT #ifndef WIN64 __device__ cufftCallbackLoadC myOwnCallbackPtr = cbComplexPointwiseMul; #endif void findBlockSize(long long iFrames, int M, size_t *blockSize, int *blockNum) { /*Finding block size/number*/ int myExp = ceil(log2((float)(iFrames + M))); while (pow(2, myExp) > INT_MAX) { myExp--; } size_t smallerBlockSize = pow(2, myExp); *blockNum = 1; size_t workspace; CHECK_CUFFT_ERRORS(cufftEstimate1d(smallerBlockSize, CUFFT_R2C, 2, &workspace)); /*Look for block size worth with 2 complex arrays Multiply by 4 to leave some room*/ while (getFreeSize() < workspace + (smallerBlockSize / 2 + 1) * 8L * 4L) { myExp--; smallerBlockSize = pow(2, myExp); (*blockNum)++; CHECK_CUFFT_ERRORS(cufftEstimate1d(smallerBlockSize, CUFFT_R2C, 2, &workspace)); } fprintf(stderr, "blockSize: %i\t numBlocks: %i\n", smallerBlockSize, *blockNum); *blockSize = smallerBlockSize; } void mismatchedConvolve(passable *p) { flags flag = p->type; long long paddedSize = p->paddedSize; float *d_ibuf = p->input->d_buf; float *d_rbuf = p->reverb->d_buf; /*Create forward FFT plan*/ cufftHandle plan; CHECK_CUFFT_ERRORS(cufftCreate(&plan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&plan, paddedSize, CUFFT_R2C, 1)); /*Create inverse FFT plan*/ cufftHandle outplan; CHECK_CUFFT_ERRORS(cufftCreate(&outplan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&outplan, paddedSize, CUFFT_C2R, 1)); /*Transform Input Signal*/ CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal *)d_ibuf, (cufftComplex*)d_ibuf)); if (flag == stereo_mono) { Print("Transforming Ch 2 of input\n"); CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal *)d_ibuf + paddedSize, (cufftComplex*)d_ibuf + paddedSize / 2 + 1)); } /*Transform Filter Signal*/ CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal*)d_rbuf, (cufftComplex*)d_rbuf)); if (flag == mono_stereo) { Print("Transforming Ch 2 of reverb\n"); CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal *)d_rbuf + paddedSize, (cufftComplex*)d_rbuf + paddedSize / 2 + 1)); } #if defined WIN64 || CB == 0 /*NO CB VERSION*/ /*CONVOLUTION*/ int blockSize = 256; int numBlocks = (paddedSize / 2 + 1 + blockSize - 1) / blockSize; if (flag == mono_stereo) { Print("Convolving & Inverse Transforming for stereo reverb\n"); ComplexPointwiseMul << < numBlocks, blockSize >> > ((cufftComplex*)d_rbuf, (cufftComplex*)d_ibuf, paddedSize / 2 + 1); ComplexPointwiseMul << < numBlocks, blockSize >> > ((cufftComplex*)d_rbuf + paddedSize / 2 + 1, (cufftComplex*)d_ibuf, paddedSize / 2 + 1); } else { ComplexPointwiseMul << < numBlocks, blockSize >> > ((cufftComplex*)d_ibuf, (cufftComplex*)d_rbuf, paddedSize / 2 + 1); ComplexPointwiseMul << < numBlocks, blockSize >> > ((cufftComplex*)d_ibuf + paddedSize / 2 + 1, (cufftComplex*)d_rbuf, paddedSize / 2 + 1); } #else /*Copy over the host copy of callback function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ if (flag == stereo_mono) { CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_rbuf)); } else { CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_ibuf)); } #endif if (flag == stereo_mono) { CHECK_CUFFT_ERRORS(cufftExecC2R(outplan, (cufftComplex*)d_ibuf, d_ibuf)); CHECK_CUFFT_ERRORS(cufftExecC2R(outplan, (cufftComplex*)d_ibuf + paddedSize / 2 + 1, d_ibuf + paddedSize)); } else { CHECK_CUFFT_ERRORS(cufftExecC2R(outplan, (cufftComplex*)d_rbuf, d_rbuf)); CHECK_CUFFT_ERRORS(cufftExecC2R(outplan, (cufftComplex*)d_rbuf + paddedSize / 2 + 1, d_rbuf + paddedSize)); } checkCudaErrors(cufftDestroy(plan)); checkCudaErrors(cufftDestroy(outplan)); } void convolve(float *d_ibuf, float *d_rbuf, long long paddedSize) { /*Create forward FFT plan*/ cufftHandle plan; CHECK_CUFFT_ERRORS(cufftCreate(&plan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&plan, paddedSize, CUFFT_R2C, 1)); /*Create inverse FFT plan*/ cufftHandle outplan; CHECK_CUFFT_ERRORS(cufftCreate(&outplan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&outplan, paddedSize, CUFFT_C2R, 1)); /*Transform Complex Signal*/ CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal*)d_ibuf, (cufftComplex*)d_ibuf)); /*Transform Filter Signal*/ CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal*)d_rbuf, (cufftComplex*)d_rbuf)); #if defined WIN64 || CB == 0 /*NO CB VERSION*/ /*CONVOLUTION*/ int blockSize = 256; int numBlocks = (paddedSize + blockSize - 1) / blockSize; ComplexPointwiseMul << < numBlocks, blockSize >> > ((cufftComplex*)d_ibuf, (cufftComplex*)d_rbuf, paddedSize / 2 + 1); getLastCudaError("Kernel execution failed [ ComplexPointwiseMul]"); #else /*Copy over the host copy of callback function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_rbuf)); #endif CHECK_CUFFT_ERRORS(cufftExecC2R(outplan, (cufftComplex*)d_ibuf, (cufftReal*)d_ibuf)); checkCudaErrors(cufftDestroy(plan)); checkCudaErrors(cufftDestroy(outplan)); } /*Assumes that d_buf contains paddedSize * 2 elements. Input is in first half, filter is in second half, and both are padded*/ void convolveBatched(float *d_buf, long long paddedSize) { float *d_rbuf = d_buf + paddedSize + 2; /*Create forward FFT plan*/ cufftHandle plan; CHECK_CUFFT_ERRORS(cufftCreate(&plan)); /*cufftResult cufftPlanMany(cufftHandle *plan, int rank, int *n, int *inembed, int istride, int idist, int *onembed, int ostride, int odist, cufftType type, int batch);*/ /*stride = skip length. Ex 1 = every element, 2 = every other element*/ /*use for interleaving???*/ /*idist/odist is space between batches of transforms*/ /*need to check if odist is in terms of complex numbers or floats*/ /*inembed/onembed are for 2D/3D, num elements per dimension*/ int n = paddedSize; CHECK_CUFFT_ERRORS( cufftPlanMany(&plan, 1, &n, &n, 1, n + 2, &n, 1, n / 2 + 1, CUFFT_R2C, 2) ) /*Create inverse FFT plan*/ cufftHandle outplan; CHECK_CUFFT_ERRORS(cufftCreate(&outplan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&outplan, paddedSize, CUFFT_C2R, 1)); /*Transform Complex Signal*/ CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal*)d_buf, (cufftComplex*)d_buf)); #if defined WIN64 || CB == 0 /*NO CB VERSION*/ /*CONVOLUTION*/ int blockSize = 256; int numBlocks = (paddedSize + blockSize - 1) / blockSize; ComplexPointwiseMul << < numBlocks, blockSize >> > ((cufftComplex*)d_buf, (cufftComplex*)d_rbuf, paddedSize / 2 + 1); getLastCudaError("Kernel execution failed [ ComplexPointwiseMul]"); #else /*Copy over the host copy of callback function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_rbuf)); #endif CHECK_CUFFT_ERRORS(cufftExecC2R(outplan, (cufftComplex*)d_buf, (cufftReal*)d_buf)); checkCudaErrors(cufftDestroy(plan)); checkCudaErrors(cufftDestroy(outplan)); } void overlapAdd(float *d_ibuf, cufftComplex *d_rbuf, long long iFrames, long long M, long long blockSize, int blockNum, cufftHandle plan, cufftHandle outplan) { float *d_block; long long L = blockSize - M; int numThreads = 256; int numBlocks = (M + numThreads - 1) / numThreads; checkCudaErrors(cudaMalloc(&d_block, (blockSize / 2 + 1) * sizeof(cufftComplex))); for (int blockNo = 0; blockNo < blockNum; blockNo++) { long long cpyAmount = L; if (blockNo == blockNum && iFrames != cpyAmount) { cpyAmount = iFrames % L; } /*1/5/11/17 - Copy buf(N * L, L) -> sig[0]. cpyAmount becomes R at the end. N = 0 initially*/ //fprintf(stderr, "Copy(block, obuf[%'i], %'i)\n", L * blockNo, cpyAmount); checkCudaErrors(cudaMemcpy(d_block, &d_ibuf[L * blockNo], cpyAmount * sizeof(float), cudaMemcpyDeviceToDevice)); if (blockNo != 0) { /*6/12/18 - Copy sig(L, M) -> buf[N * L]*/ //fprintf(stderr, "Copy(obuf[%'i], block[%'i], %'i)\n", L * blockNo, L, M); checkCudaErrors(cudaMemcpy(&d_ibuf[L * blockNo], &d_block[L], M * sizeof(float), cudaMemcpyDeviceToDevice)); } /*2/7/13/19 - Pad sig(L, M) with 0's, cpyAmount becomes R at the end*/ fillWithZeroes(&d_block, cpyAmount, blockSize); /*Transform signal*/ CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal *)d_block, (cufftComplex*)d_block)); #if defined WIN64 || CB == 0 /*CONVOLUTION*/ /*3/8/14/20*/ numBlocks = (blockSize / 2 + numThreads) / numThreads; ComplexPointwiseMul << < numBlocks, numThreads >> > ((cufftComplex*)d_block, (cufftComplex*)d_rbuf, blockSize / 2 + 1); getLastCudaError("Kernel execution failed [ ComplexPointwiseMul]"); #endif /*IFFT*/ CHECK_CUFFT_ERRORS(cufftExecC2R(outplan, (cufftComplex*)d_block, (cufftReal*)d_block)); if (blockNo != 0) { /* 9/15/21 - Point-wise add sig(0,M) + buf[N*L]*/ PointwiseAdd << <numBlocks, numThreads >> > ((float*)d_block, &d_ibuf[blockNo * L], M); } checkCudaErrors(cudaDeviceSynchronize()); /*Corner case where only one block*/ if (blockNo == 0 && blockNo == blockNum - 1) { checkCudaErrors(cudaMemcpy(d_ibuf, d_block, (cpyAmount + M) * sizeof(float), cudaMemcpyDeviceToDevice)); break; } /*Initial case*/ if (blockNo == 0) { /*4 - Copy sig(0,L) -> buf[0]*/ checkCudaErrors(cudaMemcpy(d_ibuf, d_block, L * sizeof(float), cudaMemcpyDeviceToDevice)); } /*Last case*/ if (blockNo == blockNum - 1) { //fprintf(stderr, "Copy(obuf[%'i], block[%'i], %'i)\n", blockNo * L + M, M, cpyAmount); checkCudaErrors(cudaMemcpy(&d_ibuf[blockNo * L + M], &d_block[M], cpyAmount * sizeof(float), cudaMemcpyDeviceToDevice)); } /*Every other case*/ if (blockNo != 0 && blockNo < blockNum) { /*10/16 - Copy sig(M, L-M) -> buf[N * L + M]*/ checkCudaErrors(cudaMemcpy(&d_ibuf[blockNo * L + M], &d_block[M], (L - M) * sizeof(float), cudaMemcpyDeviceToDevice)); } } checkCudaErrors(cudaFree(d_block)); } float *blockConvolution(passable *p) { float *d_ibuf = p->input->d_buf; float *rbuf = p->reverb->buf; cufftComplex *d_filter_complex; float *d_obuf = d_ibuf, *obuf; long long rFrames = p->reverb->frames; long long iFrames = p->input->frames; long long oFrames = rFrames + iFrames - 1; flags flag = p->type; int oCh = flag == mono_mono ? 1 : 2; float minmax, minmax2; cudaEvent_t start, stop; int M = rFrames - 1; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); size_t blockSize = iFrames; int blockNum = 0; /*Find block size and store in blockSize and blockNum*/ findBlockSize(iFrames, M, &blockSize, &blockNum); /*Allocating memory for output*/ Print("Allocating memory for output\n"); checkCudaErrors(cudaMallocHost((void**)&obuf, oFrames * oCh * sizeof(float))); /*Find peak of input signal*/ Print("Finding peak of input signal\n"); minmax = DExtrema(d_ibuf, oFrames * p->input->channels); /*TRANSFORMING FILTER*/ /*Allocating Memory*/ Print("Allocating memory\n"); int ch = p->reverb->channels; checkCudaErrors(cudaMalloc(&d_filter_complex, (blockSize / 2 + 1) * ch * sizeof(cufftComplex))); /*Block/Thread sizes for kernels*/ int numThreads = 256; int numBlocks = (blockSize + 2 - rFrames + numThreads - 1) / numThreads; cudaStream_t stream[4]; for (int i = 0; i < 4; i++) { checkCudaErrors(cudaStreamCreate(&stream[i])); } /* Copy over filter */ Print("Copying over filter\n"); FillWithZeros << <numBlocks, numThreads, 0, stream[0] >> > ((float*)d_filter_complex, rFrames, blockSize + 2); if (ch == 2) { FillWithZeros << <numBlocks, numThreads, 0, stream[1] >> > ((float*)d_filter_complex + blockSize + 2, rFrames, blockSize * 2 + 4); checkCudaErrors(cudaMemcpyAsync((float*)d_filter_complex + blockSize + 2, rbuf + rFrames, rFrames * sizeof(float), cudaMemcpyHostToDevice, stream[2])); } checkCudaErrors(cudaMemcpyAsync((float*)d_filter_complex, rbuf, rFrames * sizeof(float), cudaMemcpyHostToDevice, stream[3])); /*Create cuFFT plan*/ Print("Creating FFT plans\n"); cufftHandle plan; CHECK_CUFFT_ERRORS(cufftCreate(&plan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&plan, blockSize, CUFFT_R2C, 1)); /*Plans*/ cufftHandle outplan; CHECK_CUFFT_ERRORS(cufftCreate(&outplan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&outplan, blockSize, CUFFT_C2R, 1)); #if defined WIN64 || CB == 0 #else /*Create host pointer to CB Function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_filter_complex)); #endif for (int i = 0; i < 4; i++) { checkCudaErrors(cudaStreamSynchronize(stream[i])); } checkCudaErrors(cudaFreeHost(rbuf)); /*Transform Filter*/ Print("Transforming filter\n"); CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal *)d_filter_complex, (cufftComplex*)d_filter_complex)); if (ch == 2) { CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal *)d_filter_complex + blockSize + 2, (cufftComplex*)d_filter_complex + blockSize / 2 + 1)); } /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_mono) { Print("stereo_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else { Print("mono_stereo Convolving\n"); checkCudaErrors(cudaMemcpy(d_obuf + oFrames, d_obuf, oFrames * sizeof(float), cudaMemcpyDeviceToDevice)); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } checkCudaErrors(cudaFree(d_filter_complex)); CHECK_CUFFT_ERRORS(cufftDestroy(plan)); CHECK_CUFFT_ERRORS(cufftDestroy(outplan)); /*Find peak of output*/ Print("Find peak of output\n"); minmax2 = DExtrema(d_obuf, oFrames * oCh); float scale = minmax / minmax2; long long end = oFrames * oCh; fprintf(stderr, "end: %lli\n", end); /*Block/Thread sizes for kernels*/ blockSize = 512; numBlocks = (end + blockSize - 1) / blockSize; // RealFloatScale << < numBlocks, blockSize>> > (d_obuf, end, scale); // checkCudaErrors(cudaMemcpy(obuf, d_obuf, end * sizeof(float), cudaMemcpyDeviceToHost)); /*Asynchronous copy & scale */ const int nStreams = 4; int streamSize = (end + nStreams - 1) / nStreams; int streamBytes = streamSize * sizeof(float); numBlocks = (streamSize + blockSize - 1) / blockSize; Print("Scaling and copying\n"); for (int i = 0; i < nStreams; ++i) { long long offset = i * streamSize; /*Run scale kernel*/ RealFloatScaleConcurrent << < numBlocks, blockSize, 0, stream[i] >> > (d_obuf, end, streamSize, scale, offset); /*Copy device memory to host asynchronously*/ if (i == nStreams - 1) { streamBytes = sizeof(float) * (end - offset); } checkCudaErrors(cudaMemcpyAsync(&obuf[offset], &d_obuf[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i])); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "Time for GPU convolution: %f ms\n", milliseconds); checkCudaErrors(cudaFree(d_obuf)); return obuf; } float *convolution(passable *p) { float *d_ibuf = p->input->d_buf; float *d_rbuf = p->reverb->d_buf; float *d_obuf = d_ibuf; float *obuf; flags flag = p->type; int oCh = flag == mono_mono ? 1 : 2; long long paddedSize = p->paddedSize; float minmax, minmax2; cudaEvent_t start, stop; //printMe(p); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); /*Allocating host memory for output*/ Print("Allocating host memory for output\n"); checkCudaErrors(cudaMallocHost((void**)&obuf, paddedSize * oCh * sizeof(float))); /*Find peak of input signal*/ Print("Finding peak of input signal\n"); minmax = DExtrema(d_ibuf, paddedSize * p->input->channels); /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); convolve(d_ibuf, d_rbuf, paddedSize); //convolveBatched(d_ibuf, paddedSize); // not doing batched because it is very slightly slower (~20 ms) } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); convolve(d_ibuf, d_rbuf, paddedSize); convolve(d_ibuf + paddedSize, d_rbuf + paddedSize, paddedSize); } else { mismatchedConvolve(p); if (flag == mono_stereo) { d_obuf = d_rbuf; } } /*Find peak of output*/ Print("Find peak of output\n"); minmax2 = DExtrema(d_obuf, paddedSize * oCh); float scale = minmax / minmax2; long long end = paddedSize * oCh; /*Block/Thread sizes for kernels*/ int blockSize = 512; int numBlocks = (end + blockSize - 1) / blockSize; /*Asynchronous copy & scale */ const int nStreams = 4; int streamSize = (end + nStreams - 1) / nStreams; int streamBytes = streamSize * sizeof(float); numBlocks = (streamSize + blockSize - 1) / blockSize; /*Create streams*/ Print("Creating streams\n"); cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) { checkCudaErrors(cudaStreamCreate(&stream[i])); } Print("Scaling and copying\n"); for (int i = 0; i < nStreams; ++i) { long long offset = i * streamSize; /*Run scale kernel*/ RealFloatScaleConcurrent << < numBlocks, blockSize, 0, stream[i] >> > (d_obuf, end, streamSize, scale, offset); /*Copy device memory to host asynchronously*/ if (i == nStreams - 1) { streamBytes = sizeof(float) * (end - offset); } checkCudaErrors(cudaMemcpyAsync(&obuf[offset], &d_obuf[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i])); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "Time for GPU convolution: %f ms\n", milliseconds); checkCudaErrors(cudaFree(d_ibuf)); checkCudaErrors(cudaFree(d_rbuf)); return obuf; } void blockProcess(passable* p) { float* d_ibuf = p->input->d_buf; float* rbuf = p->reverb->buf; cufftComplex* d_filter_complex; float* d_obuf = d_ibuf; long long rFrames = p->reverb->frames; long long iFrames = p->input->frames; long long oFrames = rFrames + iFrames - 1; flags flag = p->type; int M = rFrames - 1; size_t blockSize = iFrames; int blockNum = 0; /*Find block size and store in blockSize and blockNum*/ findBlockSize(iFrames, M, &blockSize, &blockNum); /*TRANSFORMING FILTER*/ /*Allocating Memory*/ Print("Allocating memory\n"); int ch = p->reverb->channels; checkCudaErrors(cudaMalloc(&d_filter_complex, (blockSize / 2 + 1) * ch * sizeof(cufftComplex))); /*Block/Thread sizes for kernels*/ int numThreads = 256; int numBlocks = (blockSize + 2 - rFrames + numThreads - 1) / numThreads; cudaStream_t stream[4]; for (int i = 0; i < 4; i++) { checkCudaErrors(cudaStreamCreate(&stream[i])); } /* Copy over filter */ Print("Copying over filter\n"); FillWithZeros << <numBlocks, numThreads, 0, stream[0] >> > ((float*)d_filter_complex, rFrames, blockSize + 2); if (ch == 2) { FillWithZeros << <numBlocks, numThreads, 0, stream[1] >> > ((float*)d_filter_complex + blockSize + 2, rFrames, blockSize * 2 + 4); checkCudaErrors(cudaMemcpyAsync((float*)d_filter_complex + blockSize + 2, rbuf + rFrames, rFrames * sizeof(float), cudaMemcpyHostToDevice, stream[2])); } checkCudaErrors(cudaMemcpyAsync((float*)d_filter_complex, rbuf, rFrames * sizeof(float), cudaMemcpyHostToDevice, stream[3])); /*Create cuFFT plan*/ Print("Creating FFT plans\n"); cufftHandle plan; CHECK_CUFFT_ERRORS(cufftCreate(&plan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&plan, blockSize, CUFFT_R2C, 1)); /*Plans*/ cufftHandle outplan; CHECK_CUFFT_ERRORS(cufftCreate(&outplan)); CHECK_CUFFT_ERRORS(cufftPlan1d(&outplan, blockSize, CUFFT_C2R, 1)); #if defined WIN64 || CB == 0 #else /*Create host pointer to CB Function*/ cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); /*Associate the load callback with the plan*/ CHECK_CUFFT_ERRORS(cufftXtSetCallback(outplan, (void**)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void**)&d_filter_complex)); #endif for (int i = 0; i < 4; i++) { checkCudaErrors(cudaStreamSynchronize(stream[i])); } checkCudaErrors(cudaFreeHost(rbuf)); /*Transform Filter*/ Print("Transforming filter\n"); CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal*)d_filter_complex, (cufftComplex*)d_filter_complex)); if (ch == 2) { CHECK_CUFFT_ERRORS(cufftExecR2C(plan, (cufftReal*)d_filter_complex + blockSize + 2, (cufftComplex*)d_filter_complex + blockSize / 2 + 1)); } /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } else if (flag == stereo_mono) { Print("stereo_mono Convolving\n"); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); } else { Print("mono_stereo Convolving\n"); checkCudaErrors(cudaMemcpy(d_obuf + oFrames, d_obuf, oFrames * sizeof(float), cudaMemcpyDeviceToDevice)); overlapAdd(d_obuf, d_filter_complex, iFrames, M, blockSize, blockNum, plan, outplan); overlapAdd(d_obuf + oFrames, d_filter_complex + blockSize / 2 + 1, iFrames, M, blockSize, blockNum, plan, outplan); } checkCudaErrors(cudaFree(d_filter_complex)); CHECK_CUFFT_ERRORS(cufftDestroy(plan)); CHECK_CUFFT_ERRORS(cufftDestroy(outplan)); } void convolutionPicker(passable* p) { } void process(passable* p) { float* d_ibuf = p->input->d_buf; float* d_rbuf = p->reverb->d_buf; float* d_obuf = d_ibuf; long long paddedSize = p->paddedSize; flags flag = p->type; /*Convolving*/ if (flag == mono_mono) { Print("mono_mono Convolving\n"); //convolve(d_ibuf, d_rbuf, paddedSize); convolveBatched(d_ibuf, paddedSize); } else if (flag == stereo_stereo) { Print("stereo_stereo Convolving\n"); convolve(d_ibuf, d_rbuf, paddedSize); convolve(d_ibuf + paddedSize, d_rbuf + paddedSize, paddedSize); } else { mismatchedConvolve(p); if (flag == mono_stereo) { d_obuf = d_rbuf; } } } void asyncCopyScale(passable* p, float *obuf, long long end, float scale) { float* d_obuf = p->input->d_buf; /*Block/Thread sizes for kernels*/ int blockSize = 512; int numBlocks = (end + blockSize - 1) / blockSize; /*Asynchronous copy & scale */ const int nStreams = 4; int streamSize = (end + nStreams - 1) / nStreams; int streamBytes = streamSize * sizeof(float); numBlocks = (streamSize + blockSize - 1) / blockSize; /*Create streams*/ Print("Creating streams\n"); cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; ++i) { checkCudaErrors(cudaStreamCreate(&stream[i])); } Print("Scaling and copying\n"); for (int i = 0; i < nStreams; ++i) { long long offset = i * streamSize; /*Run scale kernel*/ RealFloatScaleConcurrent << < numBlocks, blockSize, 0, stream[i] >> > (d_obuf, end, streamSize, scale, offset); /*Copy device memory to host asynchronously*/ if (i == nStreams - 1) { streamBytes = sizeof(float) * (end - offset); } checkCudaErrors(cudaMemcpyAsync(&obuf[offset], &d_obuf[offset], streamBytes, cudaMemcpyDeviceToHost, stream[i])); } } float* convolutionWrapper(passable* p, bool blockProcessingOn) { float* d_ibuf = p->input->d_buf; float* d_rbuf = p->reverb->d_buf; float* d_obuf = d_ibuf; float* obuf; flags flag = p->type; int oCh = flag == mono_mono ? 1 : 2; long long paddedSize = p->paddedSize; float minmax, minmax2; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); /*Allocating host memory for output*/ Print("Allocating host memory for output\n"); checkCudaErrors(cudaMallocHost((void**)&obuf, paddedSize * oCh * sizeof(float))); /*Find peak of input signal*/ Print("Finding peak of input signal\n"); minmax = DExtrema(d_ibuf, paddedSize * oCh); /*Performing Convolution*/ if (blockProcessingOn) { blockProcess(p); } else { process(p); } /*Find peak of output*/ Print("Find peak of output\n"); minmax2 = DExtrema(d_obuf, paddedSize * oCh); float scale = minmax / minmax2; long long end = paddedSize * oCh; asyncCopyScale(p, obuf, end, scale); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); fprintf(stderr, "Time for GPU convolution: %f ms\n", milliseconds); checkCudaErrors(cudaFree(d_ibuf)); checkCudaErrors(cudaFree(d_rbuf)); return obuf; }
4661514f45d2aeefd3965ab019dbda5b94de44c9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <ruby.h> #include <hip/hip_runtime.h> __global__ void my_kernel() { printf("Hello from thread %d block %d\n", threadIdx.x, blockIdx.x); } VALUE print_from_kernel(VALUE obj) { hipLaunchKernelGGL(( my_kernel), dim3(2),dim3(2), 0, 0, ); hipDeviceSynchronize(); return Qnil; } VALUE printGPUinfo(VALUE obj) { int devID, count = 0; hipDeviceProp_t props; hipGetDeviceCount(&count); if(count == 0){ printf("CUDA Device Not Found.\n"); return Qnil; } for(devID = 0; devID < count; devID++){ if(hipGetDevice(&devID) == hipSuccess && hipGetDeviceProperties(&props, devID) == hipSuccess){ printf("GPU %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); }else{ printf("Getting CUDA Device %d info failed.\n", devID); } } hipDeviceReset(); return Qnil; } extern "C" void Init_culib(){ VALUE mCulib = rb_define_module("Culib"); rb_define_singleton_method(mCulib, "print_from_kernel", RUBY_METHOD_FUNC(print_from_kernel), 0); rb_define_singleton_method(mCulib, "printGPUinfo", RUBY_METHOD_FUNC(printGPUinfo), 0); }
4661514f45d2aeefd3965ab019dbda5b94de44c9.cu
#include <stdio.h> #include <ruby.h> #include <cuda_runtime.h> __global__ void my_kernel() { printf("Hello from thread %d block %d\n", threadIdx.x, blockIdx.x); } VALUE print_from_kernel(VALUE obj) { my_kernel<<<2,2>>>(); cudaDeviceSynchronize(); return Qnil; } VALUE printGPUinfo(VALUE obj) { int devID, count = 0; cudaDeviceProp props; cudaGetDeviceCount(&count); if(count == 0){ printf("CUDA Device Not Found.\n"); return Qnil; } for(devID = 0; devID < count; devID++){ if(cudaGetDevice(&devID) == cudaSuccess && cudaGetDeviceProperties(&props, devID) == cudaSuccess){ printf("GPU %d: \"%s\" with Compute %d.%d capability\n", devID, props.name, props.major, props.minor); }else{ printf("Getting CUDA Device %d info failed.\n", devID); } } cudaDeviceReset(); return Qnil; } extern "C" void Init_culib(){ VALUE mCulib = rb_define_module("Culib"); rb_define_singleton_method(mCulib, "print_from_kernel", RUBY_METHOD_FUNC(print_from_kernel), 0); rb_define_singleton_method(mCulib, "printGPUinfo", RUBY_METHOD_FUNC(printGPUinfo), 0); }
e071f277cd07a9c10e29faac4f8c054852e19383.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdio.h> #include<stdlib.h> #include <math.h> #include <Windows.h> #include <time.h> #include <assert.h> #define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}} typedef float TIMER_T; #define USE_CPU_TIMER 1 #define USE_GPU_TIMER 1 #if USE_CPU_TIMER == 1 __int64 start, freq, end; #define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); } #define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); } #else #define CHECK_TIME_START #define CHECK_TIME_END(a) #endif #if USE_GPU_TIMER == 1 hipEvent_t cuda_timer_start, cuda_timer_stop; #define CUDA_STREAM_0 (0) void create_device_timer() { CUDA_CALL(hipEventCreate(&cuda_timer_start)); CUDA_CALL(hipEventCreate(&cuda_timer_stop)); } void destroy_device_timer() { CUDA_CALL(hipEventDestroy(cuda_timer_start)); CUDA_CALL(hipEventDestroy(cuda_timer_stop)); } inline void start_device_timer() { hipEventRecord(cuda_timer_start, CUDA_STREAM_0); } inline TIMER_T stop_device_timer() { TIMER_T ms; hipEventRecord(cuda_timer_stop, CUDA_STREAM_0); hipEventSynchronize(cuda_timer_stop); hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop); return ms; } #define CHECK_TIME_INIT_GPU() { create_device_timer(); } #define CHECK_TIME_START_GPU() { start_device_timer(); } #define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); } #define CHECK_TIME_DEST_GPU() { destroy_device_timer(); } #else #define CHECK_TIME_INIT_GPU() #define CHECK_TIME_START_GPU() #define CHECK_TIME_END_GPU(a) #define CHECK_TIME_DEST_GPU() #endif #define BLOCK_SIZE 64 #define DATA_SIZE (1 << 26) TIMER_T compute_time = 0; TIMER_T device_time = 0; #define N_EQUATIONS 1 << 26 int N; float* A; float* B; float* C; float* X0; float* X1; float* FX0; float* FX1; float* X0_gpu; float* X1_gpu; float* FX0_gpu; float* FX1_gpu; hipError_t Equation_GPU(float* A, float* B, float* C, float* X0, float* X1, float* fX0, float* fX1, int n); __global__ void Equation_Kernel(float* A, float* B, float* C, float* X0, float* X1, float* FX0, float* FX1) { float a, b, c, d, x0, x1, tmp; int row = blockDim.y * blockIdx.y + threadIdx.y; int col = blockDim.x * blockIdx.x + threadIdx.x; int tid = gridDim.x * blockDim.x * row + col; a = A[tid]; b = B[tid]; c = C[tid]; d = sqrtf(b * b - 4.0f * a * c); tmp = 1.0f / (2.0f * a); X0[tid] = x0 = (-b - d) * tmp; X1[tid] = x1 = (-b + d) * tmp; FX0[tid] = (a * x0 + b) * x0 + c; FX1[tid] = (a * x1 + b) * x1 + c; } void Equation_CPU(float *A, float * B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n) { int i; float a, b, c, d, x0, x1, tmp; for (i = 0; i < n; i++) { a = A[i]; b = B[i]; c = C[i]; d = sqrtf(b * b - 4.0f * a * c); tmp = 1.0f / (2.0f * a); X0[i] = x0 = (-b - d) * tmp; X1[i] = x1 = (-b + d) * tmp; FX0[i] = (a * x0 + b) * x0 + c; FX1[i] = (a * x1 + b) * x1 + c; } } void init_bin_file() { srand((unsigned)time(NULL)); N = N_EQUATIONS; A = (float*)malloc(sizeof(float) * N); B = (float*)malloc(sizeof(float) * N); C = (float*)malloc(sizeof(float) * N); X0 = (float*)malloc(sizeof(float) * N); X1 = (float*)malloc(sizeof(float) * N); FX0 = (float*)malloc(sizeof(float) * N); FX1 = (float*)malloc(sizeof(float) * N); X0_gpu = (float*)malloc(sizeof(float) * N); X1_gpu = (float*)malloc(sizeof(float) * N); FX0_gpu = (float*)malloc(sizeof(float) * N); FX1_gpu = (float*)malloc(sizeof(float) * N);// . printf("***Binary File init Start!!\n"); FILE *fp1 = fopen("A.bin", "wb"); FILE* fp2 = fopen("B.bin", "wb"); FILE* fp3 = fopen("C.bin", "wb"); float random_a; float random_b; float random_c; for (int i = 0; i < N; i++) { random_a = (float)(((rand() %10))); random_c = (float)(((rand() %10)*(-1))); random_b = (float)(((rand() %10))); fwrite(&random_a, sizeof(float), 1, fp1); fwrite(&random_b, sizeof(float), 1, fp2); fwrite(&random_c, sizeof(float), 1, fp3); } fclose(fp1); fclose(fp2); fclose(fp3); printf("***Binary File init End!!\n\n"); } void read_bin_file() { printf("***Binary File Read Start!!\n"); FILE *fp1 = fopen("a.bin", "rb"); FILE* fp2 = fopen("b.bin", "rb"); FILE* fp3 = fopen("c.bin", "rb"); /*Todo*/ int i; for (i = 0; i < N; i++)//N h_Fibonacci_number { fread(&A[i], sizeof(float), 1, fp1); fread(&B[i], sizeof(float), 1, fp2); fread(&C[i], sizeof(float), 1, fp3); } fclose(fp1); fclose(fp2); fclose(fp3); printf("***Binary File Read End!!\n\n"); } int main() { init_bin_file(); read_bin_file(); printf("The problem size is %d.\n", N); int i; //CPU printf("***Equation_CPU Start!!\n"); CHECK_TIME_START; Equation_CPU(A, B, C, X0, X1, FX0, FX1, N); CHECK_TIME_END(compute_time); printf("***Equation_CPU End!!\n"); printf("CPU time = %.6f\n\n", compute_time); //GPU printf("***Equation_GPU Start!!\n"); Equation_GPU(A, B, C, X0_gpu, X1_gpu, FX0_gpu, FX1_gpu, N); printf("***Equation_GPU End!!\n"); printf("GPU time = %.6f\n", device_time); for (i = 0; i < N; i++) { if (fabs(X0[i] - X0_gpu[i])>0.0001 || fabs(X1[i] - X1_gpu[i])>0.0001) { printf("x0[i] %f, x0_gpu[i] %f\n", X0[i], X0_gpu[i]); printf("x1[i] %f, x1_gpu[i] %f\n", X1[i], X1_gpu[i]); break; } if (fabs(FX0_gpu[i]) > 0.0001 || fabs(FX1_gpu[i]) > 0.0001) { printf("fx0_gpu[i] %f\n", FX0_gpu[i]); printf("fx1_gpu[i] %f\n", FX1_gpu[i]); break; } } if (i == N) printf("***Kernel execution Success!!\n\n"); // Write the output array into the output file. FILE *fp1 = fopen("X0.bin", "wb"); if (!fp1) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } FILE* fp2 = fopen("X1.bin", "wb"); if (!fp2) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } FILE* fp3 = fopen("FX0.bin", "wb"); if (!fp3) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } FILE* fp4 = fopen("FX1.bin", "wb"); if (!fp4) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } fwrite(&X0_gpu, sizeof(float), N, fp1); fwrite(&X1_gpu, sizeof(float), N, fp2); fwrite(&FX0_gpu, sizeof(float), N, fp3); fwrite(&FX1_gpu, sizeof(float), N, fp4); fclose(fp1); fclose(fp2); fclose(fp3); fclose(fp4); printf("end!!\n\n"); return 0; } hipError_t Equation_GPU(float* A, float* B, float* C, float* X0_gpu, float* X1_gpu, float* FX0_gpu, float* FX1_gpu, int n) { CHECK_TIME_INIT_GPU(); hipError_t cudaStatus; /*Todo*/ // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; }///////////// if(cu..... ==CUDA_CALL float* d_a; float* d_b; float* d_c; float* d_x0; float* d_x1; float* d_fx0; float* d_fx1; size_t size=N*sizeof(float); CUDA_CALL(hipMalloc(&d_a, size))//gpu a . CUDA_CALL(hipMemcpy(d_a, A, size, hipMemcpyHostToDevice))//gpu(d_a) a copy. CUDA_CALL(hipMalloc(&d_b, size))//gpu b . CUDA_CALL(hipMemcpy(d_b, B, size, hipMemcpyHostToDevice))//gpu(d_b) b copy. CUDA_CALL(hipMalloc(&d_c, size))//gpu c . CUDA_CALL(hipMemcpy(d_c, C, size, hipMemcpyHostToDevice))//gpu(d_c) c copy. CUDA_CALL(hipMalloc(&d_x0, size))// gpu memory . CUDA_CALL(hipMalloc(&d_x1, size))// gpu memory . CUDA_CALL(hipMalloc(&d_fx0, size))// gpu memory . CUDA_CALL(hipMalloc(&d_fx1, size))// gpu memory . // Assume that width and height are multiples of BLOCK SIZE. dim3 dimBlock(BLOCK_SIZE);//block dimension 1, block size dim3 dimGrid(N/BLOCK_SIZE);//n/block_size grid dimension CHECK_TIME_START_GPU() Equation_Kernel << < dimGrid, dimBlock >> > (d_a, d_b, d_c, d_x0, d_x1, d_fx0, d_fx1);//kernel CHECK_TIME_END_GPU(device_time) CUDA_CALL(hipGetLastError()) // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. CUDA_CALL(hipDeviceSynchronize()) CUDA_CALL(hipMemcpy(X0_gpu, d_x0, size, hipMemcpyDeviceToHost))//gpu CUDA_CALL(hipMemcpy(X1_gpu, d_x1, size, hipMemcpyDeviceToHost))//gpu CUDA_CALL(hipMemcpy(FX0_gpu, d_fx0, size, hipMemcpyDeviceToHost))//gpu CUDA_CALL(hipMemcpy(FX1_gpu, d_fx1, size, hipMemcpyDeviceToHost))//gpu Error: hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_x0); hipFree(d_x1); hipFree(d_fx0); hipFree(d_fx1); CHECK_TIME_DEST_GPU(); return cudaStatus; }
e071f277cd07a9c10e29faac4f8c054852e19383.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<stdio.h> #include<stdlib.h> #include <math.h> #include <Windows.h> #include <time.h> #include <assert.h> #define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}} typedef float TIMER_T; #define USE_CPU_TIMER 1 #define USE_GPU_TIMER 1 #if USE_CPU_TIMER == 1 __int64 start, freq, end; #define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); } #define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); } #else #define CHECK_TIME_START #define CHECK_TIME_END(a) #endif #if USE_GPU_TIMER == 1 cudaEvent_t cuda_timer_start, cuda_timer_stop; #define CUDA_STREAM_0 (0) void create_device_timer() { CUDA_CALL(cudaEventCreate(&cuda_timer_start)); CUDA_CALL(cudaEventCreate(&cuda_timer_stop)); } void destroy_device_timer() { CUDA_CALL(cudaEventDestroy(cuda_timer_start)); CUDA_CALL(cudaEventDestroy(cuda_timer_stop)); } inline void start_device_timer() { cudaEventRecord(cuda_timer_start, CUDA_STREAM_0); } inline TIMER_T stop_device_timer() { TIMER_T ms; cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0); cudaEventSynchronize(cuda_timer_stop); cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop); return ms; } #define CHECK_TIME_INIT_GPU() { create_device_timer(); } #define CHECK_TIME_START_GPU() { start_device_timer(); } #define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); } #define CHECK_TIME_DEST_GPU() { destroy_device_timer(); } #else #define CHECK_TIME_INIT_GPU() #define CHECK_TIME_START_GPU() #define CHECK_TIME_END_GPU(a) #define CHECK_TIME_DEST_GPU() #endif #define BLOCK_SIZE 64 #define DATA_SIZE (1 << 26) TIMER_T compute_time = 0; TIMER_T device_time = 0; #define N_EQUATIONS 1 << 26 int N; float* A; float* B; float* C; float* X0; float* X1; float* FX0; float* FX1; float* X0_gpu; float* X1_gpu; float* FX0_gpu; float* FX1_gpu; cudaError_t Equation_GPU(float* A, float* B, float* C, float* X0, float* X1, float* fX0, float* fX1, int n); __global__ void Equation_Kernel(float* A, float* B, float* C, float* X0, float* X1, float* FX0, float* FX1) { float a, b, c, d, x0, x1, tmp; int row = blockDim.y * blockIdx.y + threadIdx.y; int col = blockDim.x * blockIdx.x + threadIdx.x; int tid = gridDim.x * blockDim.x * row + col; a = A[tid]; b = B[tid]; c = C[tid]; d = sqrtf(b * b - 4.0f * a * c); tmp = 1.0f / (2.0f * a); X0[tid] = x0 = (-b - d) * tmp; X1[tid] = x1 = (-b + d) * tmp; FX0[tid] = (a * x0 + b) * x0 + c; FX1[tid] = (a * x1 + b) * x1 + c; } void Equation_CPU(float *A, float * B, float *C, float *X0, float *X1, float *FX0, float *FX1, int n) { int i; float a, b, c, d, x0, x1, tmp; for (i = 0; i < n; i++) { a = A[i]; b = B[i]; c = C[i]; d = sqrtf(b * b - 4.0f * a * c); tmp = 1.0f / (2.0f * a); X0[i] = x0 = (-b - d) * tmp; X1[i] = x1 = (-b + d) * tmp; FX0[i] = (a * x0 + b) * x0 + c; FX1[i] = (a * x1 + b) * x1 + c; } } void init_bin_file() { srand((unsigned)time(NULL)); N = N_EQUATIONS; A = (float*)malloc(sizeof(float) * N); B = (float*)malloc(sizeof(float) * N); C = (float*)malloc(sizeof(float) * N); X0 = (float*)malloc(sizeof(float) * N); X1 = (float*)malloc(sizeof(float) * N); FX0 = (float*)malloc(sizeof(float) * N); FX1 = (float*)malloc(sizeof(float) * N); X0_gpu = (float*)malloc(sizeof(float) * N); X1_gpu = (float*)malloc(sizeof(float) * N); FX0_gpu = (float*)malloc(sizeof(float) * N); FX1_gpu = (float*)malloc(sizeof(float) * N);//공간 할당. printf("***Binary File init Start!!\n"); FILE *fp1 = fopen("A.bin", "wb"); FILE* fp2 = fopen("B.bin", "wb"); FILE* fp3 = fopen("C.bin", "wb"); float random_a; float random_b; float random_c; for (int i = 0; i < N; i++) { random_a = (float)(((rand() %10))); random_c = (float)(((rand() %10)*(-1))); random_b = (float)(((rand() %10))); fwrite(&random_a, sizeof(float), 1, fp1); fwrite(&random_b, sizeof(float), 1, fp2); fwrite(&random_c, sizeof(float), 1, fp3); } fclose(fp1); fclose(fp2); fclose(fp3); printf("***Binary File init End!!\n\n"); } void read_bin_file() { printf("***Binary File Read Start!!\n"); FILE *fp1 = fopen("a.bin", "rb"); FILE* fp2 = fopen("b.bin", "rb"); FILE* fp3 = fopen("c.bin", "rb"); /*Todo*/ int i; for (i = 0; i < N; i++)//N개만큼 숫자 읽어와 h_Fibonacci_number로 저장 { fread(&A[i], sizeof(float), 1, fp1); fread(&B[i], sizeof(float), 1, fp2); fread(&C[i], sizeof(float), 1, fp3); } fclose(fp1); fclose(fp2); fclose(fp3); printf("***Binary File Read End!!\n\n"); } int main() { init_bin_file(); read_bin_file(); printf("The problem size is %d.\n", N); int i; //CPU printf("***Equation_CPU Start!!\n"); CHECK_TIME_START; Equation_CPU(A, B, C, X0, X1, FX0, FX1, N); CHECK_TIME_END(compute_time); printf("***Equation_CPU End!!\n"); printf("CPU time = %.6f\n\n", compute_time); //GPU printf("***Equation_GPU Start!!\n"); Equation_GPU(A, B, C, X0_gpu, X1_gpu, FX0_gpu, FX1_gpu, N); printf("***Equation_GPU End!!\n"); printf("GPU time = %.6f\n", device_time); for (i = 0; i < N; i++) { if (fabs(X0[i] - X0_gpu[i])>0.0001 || fabs(X1[i] - X1_gpu[i])>0.0001) { printf("x0[i] %f, x0_gpu[i] %f\n", X0[i], X0_gpu[i]); printf("x1[i] %f, x1_gpu[i] %f\n", X1[i], X1_gpu[i]); break; } if (fabs(FX0_gpu[i]) > 0.0001 || fabs(FX1_gpu[i]) > 0.0001) { printf("fx0_gpu[i] %f\n", FX0_gpu[i]); printf("fx1_gpu[i] %f\n", FX1_gpu[i]); break; } } if (i == N) printf("***Kernel execution Success!!\n\n"); // Write the output array into the output file. FILE *fp1 = fopen("X0.bin", "wb"); if (!fp1) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } FILE* fp2 = fopen("X1.bin", "wb"); if (!fp2) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } FILE* fp3 = fopen("FX0.bin", "wb"); if (!fp3) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } FILE* fp4 = fopen("FX1.bin", "wb"); if (!fp4) { fprintf(stderr, "Error: cannot open the output file...\n"); exit(-1); } fwrite(&X0_gpu, sizeof(float), N, fp1); fwrite(&X1_gpu, sizeof(float), N, fp2); fwrite(&FX0_gpu, sizeof(float), N, fp3); fwrite(&FX1_gpu, sizeof(float), N, fp4); fclose(fp1); fclose(fp2); fclose(fp3); fclose(fp4); printf("end!!\n\n"); return 0; } cudaError_t Equation_GPU(float* A, float* B, float* C, float* X0_gpu, float* X1_gpu, float* FX0_gpu, float* FX1_gpu, int n) { CHECK_TIME_INIT_GPU(); cudaError_t cudaStatus; /*Todo*/ // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; }///////////// if(cu..... ==CUDA_CALL float* d_a; float* d_b; float* d_c; float* d_x0; float* d_x1; float* d_fx0; float* d_fx1; size_t size=N*sizeof(float); CUDA_CALL(cudaMalloc(&d_a, size))//gpu에 a를 저장하기 위하여 메모리 할당. CUDA_CALL(cudaMemcpy(d_a, A, size, cudaMemcpyHostToDevice))//gpu(d_a)로 a copy함. CUDA_CALL(cudaMalloc(&d_b, size))//gpu에 b를 저장하기 위하여 메모리 할당. CUDA_CALL(cudaMemcpy(d_b, B, size, cudaMemcpyHostToDevice))//gpu(d_b)로 b copy함. CUDA_CALL(cudaMalloc(&d_c, size))//gpu에 c를 저장하기 위하여 메모리 할당. CUDA_CALL(cudaMemcpy(d_c, C, size, cudaMemcpyHostToDevice))//gpu(d_c)로 c copy함. CUDA_CALL(cudaMalloc(&d_x0, size))// gpu내에 결과값이 저장될 memory 할당. CUDA_CALL(cudaMalloc(&d_x1, size))// gpu내에 결과값이 저장될 memory 할당. CUDA_CALL(cudaMalloc(&d_fx0, size))// gpu내에 결과값이 저장될 memory 할당. CUDA_CALL(cudaMalloc(&d_fx1, size))// gpu내에 결과값이 저장될 memory 할당. // Assume that width and height are multiples of BLOCK SIZE. dim3 dimBlock(BLOCK_SIZE);//block dimension 1차원, block size dim3 dimGrid(N/BLOCK_SIZE);//n/block_size가 grid의 dimension CHECK_TIME_START_GPU() Equation_Kernel << < dimGrid, dimBlock >> > (d_a, d_b, d_c, d_x0, d_x1, d_fx0, d_fx1);//kernel 수행 CHECK_TIME_END_GPU(device_time) CUDA_CALL(cudaGetLastError()) // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. CUDA_CALL(cudaDeviceSynchronize()) CUDA_CALL(cudaMemcpy(X0_gpu, d_x0, size, cudaMemcpyDeviceToHost))//gpu메모리로부터 계산결과 카피 CUDA_CALL(cudaMemcpy(X1_gpu, d_x1, size, cudaMemcpyDeviceToHost))//gpu메모리로부터 계산결과 카피 CUDA_CALL(cudaMemcpy(FX0_gpu, d_fx0, size, cudaMemcpyDeviceToHost))//gpu메모리로부터 계산결과 카피 CUDA_CALL(cudaMemcpy(FX1_gpu, d_fx1, size, cudaMemcpyDeviceToHost))//gpu메모리로부터 계산결과 카피 Error: cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_x0); cudaFree(d_x1); cudaFree(d_fx0); cudaFree(d_fx1); CHECK_TIME_DEST_GPU(); return cudaStatus; }
7c6f54bf6a0ddc325959c272bb8bce59c376d42c.hip
// !!! This is a file automatically generated by hipify!!! // // Created by lidan on 23/10/2020. // #include <helper_cuda.h> #include <hip/hip_runtime.h> #include <time.h> #include <stdint.h> #include <iostream> #include <omp.h> #include <random> #include <cxxopts.hpp> #define NUM 10000 static uint32_t cpu_tmp_0[NUM] ; static uint32_t cpu_tmp_1[NUM] ; __host__ void cpusort(uint32_t* const data,const uint32_t num_elements) { for(uint32_t bit = 0 ; bit <32 ;bit++) { uint32_t base_cnt_0 = 0 ; uint32_t base_cnt_1 = 0 ; for(uint32_t i = 0 ; i< num_elements;i++) { const uint32_t d = data[i] ; const uint32_t bit_mask = (1<<bit) ; if((d & bit_mask) > 0) { cpu_tmp_1[base_cnt_1++] = d ; }else{ cpu_tmp_0[base_cnt_0++] = d ; } } for(uint32_t i =0 ; i< base_cnt_0;i++) { data[i] = cpu_tmp_0[i] ; } for(uint32_t i = 0 ; i<base_cnt_1;i++) { data[base_cnt_0+i] = cpu_tmp_1[i] ; } } } __global__ void gpu_radixsort2(unsigned int * const sort_tmp, int NUM_ELEMENT , int NUM_LISTS , unsigned int * const sort_tmp_1, const unsigned int tid) // { for(unsigned int bit_mask = 1; bit_mask > 0; bit_mask <<= 1) //32 { unsigned int base_cnt_0 = 0; unsigned int base_cnt_1 = 0; for (unsigned int i = 0; i < NUM_ELEMENT; i+=NUM_LISTS) { if(sort_tmp[i+tid] & bit_mask) //1sort_tmp_1 { sort_tmp_1[base_cnt_1+tid] = sort_tmp[i+tid]; base_cnt_1 += NUM_LISTS; } else //0sort_tmp { sort_tmp[base_cnt_0+tid] = sort_tmp[i+tid]; base_cnt_0 += NUM_LISTS; } } for (unsigned int i = 0; i < base_cnt_1; i+=NUM_LISTS) //sort_tmp_1sort_tmp { sort_tmp[base_cnt_0+i+tid] = sort_tmp_1[i+tid]; } __syncthreads(); } } __device__ unsigned int getIdx(dim3* threads, dim3* blocks) { int x; return threadIdx.x + threadIdx.y * (x = threads->x) + threadIdx.z * (x *= threads->y) + blockIdx.x * (x *= threads->z) + blockIdx.y * (x *= blocks->z) + blockIdx.z * (x *= blocks->y); } __global__ void gpu_radixsort(uint32_t* const data, const uint32_t num_elements,int numlist, uint32_t* t,uint32_t* w ) { const unsigned int idx = (blockIdx.x* blockDim.x) + threadIdx.x ; const unsigned int idy = (blockIdx.y*blockDim.y) + threadIdx.y ; const unsigned int tid = idy*gridDim.x*blockDim.x + idx ; // const unsigned int tid = threadIdx.x ; for(int i = 0 ; i< 32; i++) { int base_0 = 0; int base_1 = 0 ; const int bit_mask = (1 << i) ; for(int j=0 ; j< num_elements;j+=numlist) { const int elem = data[j+tid] ; if((elem & bit_mask) > 0) { t[base_1+tid] = elem ; base_1 += numlist ; }else{ w[base_0+tid] = elem ; base_0 += numlist ; } } for(int m = 0; m < base_0;m+=numlist) { data[m+tid] = w[m+tid] ; } for(int m = 0; m < base_1;m+=numlist) { data[base_0+m+tid] = t[m+tid] ; } __syncthreads() ; } } void radixsort_gpu(uint32_t* data, uint32_t num_element ) { uint32_t *cuda_data ; uint32_t *tdata ; uint32_t *wdata ; checkCudaErrors(hipMalloc((void**)&cuda_data,num_element*sizeof(int))) ; checkCudaErrors(hipMalloc((void**)&tdata,num_element*sizeof(int))) ; checkCudaErrors(hipMalloc((void**)&wdata,num_element*sizeof(int))) ; checkCudaErrors(hipMemcpy(cuda_data,data,sizeof(int)*num_element,hipMemcpyHostToDevice)) ; hipLaunchKernelGGL(( gpu_radixsort), dim3(1),dim3(dim3(10,10,1)), 0, 0, cuda_data, num_element,10000,tdata,wdata) ; checkCudaErrors(hipMemcpy(data,cuda_data,sizeof(int)*num_element,hipMemcpyDeviceToHost)) ; } #define MAX_NUM_LIST 10000 __device__ void mergeArray(const uint32_t * src_array , uint32_t* const dest_array, const uint32_t num_list, const uint32_t num_elements, const uint32_t tid) { const uint32_t num_elements_per_list = (num_elements/num_list) ; __shared__ unsigned int list_indexs[MAX_NUM_LIST] ; list_indexs[tid] = 0 ; __syncthreads() ; for(int i = 0 ;i < num_elements;i++) { __shared__ int min_val ; __shared__ int min_idx ; int data ; if(list_indexs[tid] < num_elements_per_list) { const int src_idx = tid + (list_indexs[tid]*num_list) ; data = src_array[src_idx] ; }else{ data = 0xFFFFFFF ; } if(tid==0) { min_val = 0xFFFFFF ; min_idx = 0xFFFFFF ; } __syncthreads() ; atomicMin(&min_val,data) ; __syncthreads() ; if(min_val==data) { atomicMin(&min_idx,tid) ; } __syncthreads() ; if(tid==min_idx) { list_indexs[tid]++ ; dest_array[i] = data ; } } } __device__ void merge_two(unsigned int * const data,unsigned int* const dst,const unsigned int tid , const int num_list,const int num_element) { const int num_elements_per_list = num_element/num_list ; __shared__ int list_indexs[MAX_NUM_LIST] ; __shared__ int reduce_val[MAX_NUM_LIST] ; __shared__ int reduce_idx[MAX_NUM_LIST] ; list_indexs[tid] = 0 ; reduce_idx[tid] = 0 ; reduce_val[tid] = 0 ; __syncthreads() ; for(int i = 0 ;i < num_element;i++) { int tid_max = num_list >> 1 ; int t ; if(list_indexs[tid] < num_elements_per_list) { const int src_idx = tid + (list_indexs[tid] * num_list) ; t = data[src_idx] ; }else{ t = 0xFFFFFFF ; } reduce_val[tid] = t ; reduce_idx[tid] = tid ; __syncthreads() ; while(tid_max!=0) { if(tid < tid_max) { const int val2_idx = tid + tid_max ; const int val2 = reduce_idx[val2_idx] ; if(reduce_val[tid]>val2) { reduce_val[tid] = val2 ; reduce_idx[tid] = val2_idx ; } } } tid_max >>= 1 ; __syncthreads() ; if(tid == 0 ) { list_indexs[reduce_idx[0]]++ ; dst[i] = reduce_val[0] ; } __syncthreads() ; } } #define REDUCTION_SIZE 8 #define REDUCTION_SHIFT 3 __device__ void merge_final(unsigned int * const srcData, unsigned int * const dstData, const unsigned int NUM_LIST , const unsigned int NUM_ELEMENTS , const unsigned int tid) { __shared__ unsigned int list_reduction[MAX_NUM_LIST] ; unsigned int num_reduction = NUM_LIST >> REDUCTION_SHIFT ; unsigned int s_tid = tid >> REDUCTION_SHIFT ; unsigned int self_index = tid ; unsigned int min_val ; for(int i = 0 ; i< NUM_ELEMENTS;i++) { int t = 0xFFFFFF ; if(self_index<NUM_ELEMENTS) { t = srcData[self_index]; } if(tid < NUM_LIST/REDUCTION_SIZE) { list_reduction[tid] = 0xFFFFFF ; } __syncthreads() ; atomicMin(&(list_reduction[s_tid]),t) ; __syncthreads() ; if(tid == 0 ) { min_val = 0xFFFFFF ; } __syncthreads() ; if(tid<NUM_LIST/REDUCTION_SIZE) { atomicMin(&min_val,list_reduction[tid]) ; } __syncthreads() ; if(min_val == t) { dstData[i] = min_val ; self_index += NUM_LIST ; min_val = 0xFFFFFF ; } } } int main() { uint32_t t[10] = {10,7,7,3,8,8,2,3,9,10} ; // mergesort(t,10) ; radixsort_gpu(t,10) ; for(int i = 0;i < 10 ;i++) { std::cout<<t[i] ; } }
7c6f54bf6a0ddc325959c272bb8bce59c376d42c.cu
// // Created by lidan on 23/10/2020. // #include <helper_cuda.h> #include <cuda_runtime.h> #include <time.h> #include <stdint.h> #include <iostream> #include <omp.h> #include <random> #include <cxxopts.hpp> #define NUM 10000 static uint32_t cpu_tmp_0[NUM] ; static uint32_t cpu_tmp_1[NUM] ; __host__ void cpusort(uint32_t* const data,const uint32_t num_elements) { for(uint32_t bit = 0 ; bit <32 ;bit++) { uint32_t base_cnt_0 = 0 ; uint32_t base_cnt_1 = 0 ; for(uint32_t i = 0 ; i< num_elements;i++) { const uint32_t d = data[i] ; const uint32_t bit_mask = (1<<bit) ; if((d & bit_mask) > 0) { cpu_tmp_1[base_cnt_1++] = d ; }else{ cpu_tmp_0[base_cnt_0++] = d ; } } for(uint32_t i =0 ; i< base_cnt_0;i++) { data[i] = cpu_tmp_0[i] ; } for(uint32_t i = 0 ; i<base_cnt_1;i++) { data[base_cnt_0+i] = cpu_tmp_1[i] ; } } } __global__ void gpu_radixsort2(unsigned int * const sort_tmp, int NUM_ELEMENT , int NUM_LISTS , unsigned int * const sort_tmp_1, const unsigned int tid) //桶排序 { for(unsigned int bit_mask = 1; bit_mask > 0; bit_mask <<= 1) //32位 { unsigned int base_cnt_0 = 0; unsigned int base_cnt_1 = 0; for (unsigned int i = 0; i < NUM_ELEMENT; i+=NUM_LISTS) { if(sort_tmp[i+tid] & bit_mask) //该位是1,放到sort_tmp_1中 { sort_tmp_1[base_cnt_1+tid] = sort_tmp[i+tid]; base_cnt_1 += NUM_LISTS; } else //该位是0,放到sort_tmp的前面的 { sort_tmp[base_cnt_0+tid] = sort_tmp[i+tid]; base_cnt_0 += NUM_LISTS; } } for (unsigned int i = 0; i < base_cnt_1; i+=NUM_LISTS) //将sort_tmp_1的数据放到sort_tmp后面 { sort_tmp[base_cnt_0+i+tid] = sort_tmp_1[i+tid]; } __syncthreads(); } } __device__ unsigned int getIdx(dim3* threads, dim3* blocks) { int x; return threadIdx.x + threadIdx.y * (x = threads->x) + threadIdx.z * (x *= threads->y) + blockIdx.x * (x *= threads->z) + blockIdx.y * (x *= blocks->z) + blockIdx.z * (x *= blocks->y); } __global__ void gpu_radixsort(uint32_t* const data, const uint32_t num_elements,int numlist, uint32_t* t,uint32_t* w ) { const unsigned int idx = (blockIdx.x* blockDim.x) + threadIdx.x ; const unsigned int idy = (blockIdx.y*blockDim.y) + threadIdx.y ; const unsigned int tid = idy*gridDim.x*blockDim.x + idx ; // const unsigned int tid = threadIdx.x ; for(int i = 0 ; i< 32; i++) { int base_0 = 0; int base_1 = 0 ; const int bit_mask = (1 << i) ; for(int j=0 ; j< num_elements;j+=numlist) { const int elem = data[j+tid] ; if((elem & bit_mask) > 0) { t[base_1+tid] = elem ; base_1 += numlist ; }else{ w[base_0+tid] = elem ; base_0 += numlist ; } } for(int m = 0; m < base_0;m+=numlist) { data[m+tid] = w[m+tid] ; } for(int m = 0; m < base_1;m+=numlist) { data[base_0+m+tid] = t[m+tid] ; } __syncthreads() ; } } void radixsort_gpu(uint32_t* data, uint32_t num_element ) { uint32_t *cuda_data ; uint32_t *tdata ; uint32_t *wdata ; checkCudaErrors(cudaMalloc((void**)&cuda_data,num_element*sizeof(int))) ; checkCudaErrors(cudaMalloc((void**)&tdata,num_element*sizeof(int))) ; checkCudaErrors(cudaMalloc((void**)&wdata,num_element*sizeof(int))) ; checkCudaErrors(cudaMemcpy(cuda_data,data,sizeof(int)*num_element,cudaMemcpyHostToDevice)) ; gpu_radixsort<<<1,dim3(10,10,1)>>>(cuda_data, num_element,10000,tdata,wdata) ; checkCudaErrors(cudaMemcpy(data,cuda_data,sizeof(int)*num_element,cudaMemcpyDeviceToHost)) ; } #define MAX_NUM_LIST 10000 __device__ void mergeArray(const uint32_t * src_array , uint32_t* const dest_array, const uint32_t num_list, const uint32_t num_elements, const uint32_t tid) { const uint32_t num_elements_per_list = (num_elements/num_list) ; __shared__ unsigned int list_indexs[MAX_NUM_LIST] ; list_indexs[tid] = 0 ; __syncthreads() ; for(int i = 0 ;i < num_elements;i++) { __shared__ int min_val ; __shared__ int min_idx ; int data ; if(list_indexs[tid] < num_elements_per_list) { const int src_idx = tid + (list_indexs[tid]*num_list) ; data = src_array[src_idx] ; }else{ data = 0xFFFFFFF ; } if(tid==0) { min_val = 0xFFFFFF ; min_idx = 0xFFFFFF ; } __syncthreads() ; atomicMin(&min_val,data) ; __syncthreads() ; if(min_val==data) { atomicMin(&min_idx,tid) ; } __syncthreads() ; if(tid==min_idx) { list_indexs[tid]++ ; dest_array[i] = data ; } } } __device__ void merge_two(unsigned int * const data,unsigned int* const dst,const unsigned int tid , const int num_list,const int num_element) { const int num_elements_per_list = num_element/num_list ; __shared__ int list_indexs[MAX_NUM_LIST] ; __shared__ int reduce_val[MAX_NUM_LIST] ; __shared__ int reduce_idx[MAX_NUM_LIST] ; list_indexs[tid] = 0 ; reduce_idx[tid] = 0 ; reduce_val[tid] = 0 ; __syncthreads() ; for(int i = 0 ;i < num_element;i++) { int tid_max = num_list >> 1 ; int t ; if(list_indexs[tid] < num_elements_per_list) { const int src_idx = tid + (list_indexs[tid] * num_list) ; t = data[src_idx] ; }else{ t = 0xFFFFFFF ; } reduce_val[tid] = t ; reduce_idx[tid] = tid ; __syncthreads() ; while(tid_max!=0) { if(tid < tid_max) { const int val2_idx = tid + tid_max ; const int val2 = reduce_idx[val2_idx] ; if(reduce_val[tid]>val2) { reduce_val[tid] = val2 ; reduce_idx[tid] = val2_idx ; } } } tid_max >>= 1 ; __syncthreads() ; if(tid == 0 ) { list_indexs[reduce_idx[0]]++ ; dst[i] = reduce_val[0] ; } __syncthreads() ; } } #define REDUCTION_SIZE 8 #define REDUCTION_SHIFT 3 __device__ void merge_final(unsigned int * const srcData, unsigned int * const dstData, const unsigned int NUM_LIST , const unsigned int NUM_ELEMENTS , const unsigned int tid) { __shared__ unsigned int list_reduction[MAX_NUM_LIST] ; unsigned int num_reduction = NUM_LIST >> REDUCTION_SHIFT ; unsigned int s_tid = tid >> REDUCTION_SHIFT ; unsigned int self_index = tid ; unsigned int min_val ; for(int i = 0 ; i< NUM_ELEMENTS;i++) { int t = 0xFFFFFF ; if(self_index<NUM_ELEMENTS) { t = srcData[self_index]; } if(tid < NUM_LIST/REDUCTION_SIZE) { list_reduction[tid] = 0xFFFFFF ; } __syncthreads() ; atomicMin(&(list_reduction[s_tid]),t) ; __syncthreads() ; if(tid == 0 ) { min_val = 0xFFFFFF ; } __syncthreads() ; if(tid<NUM_LIST/REDUCTION_SIZE) { atomicMin(&min_val,list_reduction[tid]) ; } __syncthreads() ; if(min_val == t) { dstData[i] = min_val ; self_index += NUM_LIST ; min_val = 0xFFFFFF ; } } } int main() { uint32_t t[10] = {10,7,7,3,8,8,2,3,9,10} ; // mergesort(t,10) ; radixsort_gpu(t,10) ; for(int i = 0;i < 10 ;i++) { std::cout<<t[i] ; } }
a3a622c5038cbccf67f219174a83fe149e2bd375.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 25-Oct-2011 15:07:39 // // user function __device__ #include "update.h" // CUDA kernel function __global__ void op_cuda_update( float *arg0, float *arg1, float *arg2, float *arg3, float *arg4, int offset_s, int set_size ) { float arg3_l[1]; for (int d=0; d<1; d++) arg3_l[d]=ZERO_float; float arg4_l[1]; for (int d=0; d<1; d++) arg4_l[d]=arg4[d+blockIdx.x*1]; // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call update( arg0+n, arg1+n, arg2+n, arg3_l, arg4_l ); } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]); for(int d=0; d<1; d++) op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]); } // host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4 ){ float *arg3h = (float *)arg3.data; float *arg4h = (float *)arg4.data; if (OP_diags>2) { printf(" kernel routine w/o indirection: update \n"); } // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg3.data = OP_reduct_h + reduct_bytes; arg3.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((float *)arg3.data)[d+b*1] = ZERO_float; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((float *)arg4.data)[d+b*1] = arg4h[d]; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0, (float *) arg0.data_d, (float *) arg1.data_d, (float *) arg2.data_d, (float *) arg3.data_d, (float *) arg4.data_d, offset_s, set->size ); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_update execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg3h[d] = arg3h[d] + ((float *)arg3.data)[d+b*1]; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg4h[d] = MAX(arg4h[d],((float *)arg4.data)[d+b*1]); // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; OP_kernels[1].time += wall_t2 - wall_t1; OP_kernels[1].transfer += (float)set->size * arg0.size; OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f; }
a3a622c5038cbccf67f219174a83fe149e2bd375.cu
// // auto-generated by op2.m on 25-Oct-2011 15:07:39 // // user function __device__ #include "update.h" // CUDA kernel function __global__ void op_cuda_update( float *arg0, float *arg1, float *arg2, float *arg3, float *arg4, int offset_s, int set_size ) { float arg3_l[1]; for (int d=0; d<1; d++) arg3_l[d]=ZERO_float; float arg4_l[1]; for (int d=0; d<1; d++) arg4_l[d]=arg4[d+blockIdx.x*1]; // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call update( arg0+n, arg1+n, arg2+n, arg3_l, arg4_l ); } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg3[d+blockIdx.x*1],arg3_l[d]); for(int d=0; d<1; d++) op_reduction<OP_MAX>(&arg4[d+blockIdx.x*1],arg4_l[d]); } // host stub function void op_par_loop_update(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4 ){ float *arg3h = (float *)arg3.data; float *arg4h = (float *)arg4.data; if (OP_diags>2) { printf(" kernel routine w/o indirection: update \n"); } // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); reduct_size = MAX(reduct_size,sizeof(float)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg3.data = OP_reduct_h + reduct_bytes; arg3.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((float *)arg3.data)[d+b*1] = ZERO_float; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); arg4.data = OP_reduct_h + reduct_bytes; arg4.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((float *)arg4.data)[d+b*1] = arg4h[d]; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); op_cuda_update<<<nblocks,nthread,nshared>>>( (float *) arg0.data_d, (float *) arg1.data_d, (float *) arg2.data_d, (float *) arg3.data_d, (float *) arg4.data_d, offset_s, set->size ); cutilSafeCall(cudaThreadSynchronize()); cutilCheckMsg("op_cuda_update execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg3h[d] = arg3h[d] + ((float *)arg3.data)[d+b*1]; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg4h[d] = MAX(arg4h[d],((float *)arg4.data)[d+b*1]); // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; OP_kernels[1].time += wall_t2 - wall_t1; OP_kernels[1].transfer += (float)set->size * arg0.size; OP_kernels[1].transfer += (float)set->size * arg1.size * 2.0f; OP_kernels[1].transfer += (float)set->size * arg2.size * 2.0f; }
448ce0cf00dbe79ab19f65eaa5b64bffad30328c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <hip/hip_fp16.h> #include "ReflectPad1D.h" __forceinline__ __device__ void map1d(int insize, int outsize, int index, int lpad, int& inindex, int& outindex) { int inoffset = (blockIdx.y + blockIdx.z * gridDim.y) * insize; int outoffset = (blockIdx.y + blockIdx.z * gridDim.y) * outsize; int instart = max(0, -lpad); int outstart = max(0, lpad); int x = abs(index - lpad) - abs(index - (insize + lpad - 1)) - index + 2 * lpad + insize - 1 - outstart + instart; inindex = inoffset + x, outindex = outoffset + index; } template <typename Dtype> __global__ void reflectpad1d(Dtype* outdata, const Dtype* indata, int insize, int lpad, int rpad) { int index = threadIdx.x + blockIdx.x * blockDim.x; int outsize = insize + lpad + rpad; if (index < outsize) { int inindex = 0, outindex = 0; map1d(insize, outsize, index, lpad, inindex, outindex); outdata[outindex] = indata[inindex]; } } ReflectPad1D::ReflectPad1D(int lpad, int rpad) : m_lpad(lpad), m_rpad(rpad) { } ReflectPad1D::ReflectPad1D(const void *serialData, size_t serialLength) : Plugin(serialData, serialLength) { const char *buffer = static_cast<const char *>(serialData) + Plugin::getSerializationSize(); read(buffer, m_lpad); read(buffer, m_rpad); } size_t ReflectPad1D::getSerializationSize() { return Plugin::getSerializationSize() + sizeof(m_lpad) + sizeof(m_rpad); } void ReflectPad1D::serialize(void *serialData) { Plugin::serialize(serialData); char *buffer = static_cast<char *>(serialData) + Plugin::getSerializationSize(); write(buffer, m_lpad); write(buffer, m_rpad); } int ReflectPad1D::enqueue(int batchSize, const void * const *inputs, void **outputs, void */*workspace*/, hipStream_t stream) { int maps = m_inshape.d[0], insize = m_inshape.d[1]; int outsize = insize + m_lpad + m_rpad; dim3 block(32); dim3 grid( (outsize + block.x - 1) / block.x, maps, batchSize ); if (m_datatype == nv::DataType::kFLOAT) { hipLaunchKernelGGL(( reflectpad1d), dim3(grid), dim3(block), 0, stream, static_cast<float *>(outputs[0]), static_cast<const float *>(inputs[0]), insize, m_lpad, m_rpad ); } else { hipLaunchKernelGGL(( reflectpad1d), dim3(grid), dim3(block), 0, stream, static_cast<half *>(outputs[0]), static_cast<const half *>(inputs[0]), insize, m_lpad, m_rpad ); } return 0; } nv::Dims ReflectPad1D::getOutputDimensions(int index, const nv::Dims *inputDims, int nbInputs) { assert(nbInputs == 1 && index == 0); nv::Dims inshape = inputDims[0]; nv::Dims outshape; outshape.nbDims = inshape.nbDims; outshape.type[0] = inshape.type[0]; outshape.d[0] = inshape.d[0]; outshape.type[1] = inshape.type[1]; outshape.d[1] = inshape.d[1] + m_lpad + m_rpad; return outshape; } void ReflectPad1D::configureWithFormat(const nv::Dims *inputDims, int /*nbInputs*/, const nv::Dims *outputDims, int /*nbOutputs*/, nv::DataType type, nv::PluginFormat format, int /*maxBatchSize*/) { m_inshape = inputDims[0]; m_outshape = outputDims[0]; assert((type == nv::DataType::kFLOAT || type == nv::DataType::kHALF) && format == nv::PluginFormat::kNCHW); m_datatype = type; } bool ReflectPad1D::supportsFormat(nv::DataType type, nv::PluginFormat format) const { return (type == nv::DataType::kFLOAT || type == nv::DataType::kHALF) && format == nv::PluginFormat::kNCHW; } int ReflectPad1D::initialize() { return 0; } void ReflectPad1D::terminate() { } size_t ReflectPad1D::getWorkspaceSize(int /*maxBatchSize*/) const { return 0; } int ReflectPad1D::getNbOutputs() const { return 1; }
448ce0cf00dbe79ab19f65eaa5b64bffad30328c.cu
#include <cassert> #include <cuda_fp16.h> #include "ReflectPad1D.h" __forceinline__ __device__ void map1d(int insize, int outsize, int index, int lpad, int& inindex, int& outindex) { int inoffset = (blockIdx.y + blockIdx.z * gridDim.y) * insize; int outoffset = (blockIdx.y + blockIdx.z * gridDim.y) * outsize; int instart = max(0, -lpad); int outstart = max(0, lpad); int x = abs(index - lpad) - abs(index - (insize + lpad - 1)) - index + 2 * lpad + insize - 1 - outstart + instart; inindex = inoffset + x, outindex = outoffset + index; } template <typename Dtype> __global__ void reflectpad1d(Dtype* outdata, const Dtype* indata, int insize, int lpad, int rpad) { int index = threadIdx.x + blockIdx.x * blockDim.x; int outsize = insize + lpad + rpad; if (index < outsize) { int inindex = 0, outindex = 0; map1d(insize, outsize, index, lpad, inindex, outindex); outdata[outindex] = indata[inindex]; } } ReflectPad1D::ReflectPad1D(int lpad, int rpad) : m_lpad(lpad), m_rpad(rpad) { } ReflectPad1D::ReflectPad1D(const void *serialData, size_t serialLength) : Plugin(serialData, serialLength) { const char *buffer = static_cast<const char *>(serialData) + Plugin::getSerializationSize(); read(buffer, m_lpad); read(buffer, m_rpad); } size_t ReflectPad1D::getSerializationSize() { return Plugin::getSerializationSize() + sizeof(m_lpad) + sizeof(m_rpad); } void ReflectPad1D::serialize(void *serialData) { Plugin::serialize(serialData); char *buffer = static_cast<char *>(serialData) + Plugin::getSerializationSize(); write(buffer, m_lpad); write(buffer, m_rpad); } int ReflectPad1D::enqueue(int batchSize, const void * const *inputs, void **outputs, void */*workspace*/, cudaStream_t stream) { int maps = m_inshape.d[0], insize = m_inshape.d[1]; int outsize = insize + m_lpad + m_rpad; dim3 block(32); dim3 grid( (outsize + block.x - 1) / block.x, maps, batchSize ); if (m_datatype == nv::DataType::kFLOAT) { reflectpad1d<<<grid, block, 0, stream>>>( static_cast<float *>(outputs[0]), static_cast<const float *>(inputs[0]), insize, m_lpad, m_rpad ); } else { reflectpad1d<<<grid, block, 0, stream>>>( static_cast<half *>(outputs[0]), static_cast<const half *>(inputs[0]), insize, m_lpad, m_rpad ); } return 0; } nv::Dims ReflectPad1D::getOutputDimensions(int index, const nv::Dims *inputDims, int nbInputs) { assert(nbInputs == 1 && index == 0); nv::Dims inshape = inputDims[0]; nv::Dims outshape; outshape.nbDims = inshape.nbDims; outshape.type[0] = inshape.type[0]; outshape.d[0] = inshape.d[0]; outshape.type[1] = inshape.type[1]; outshape.d[1] = inshape.d[1] + m_lpad + m_rpad; return outshape; } void ReflectPad1D::configureWithFormat(const nv::Dims *inputDims, int /*nbInputs*/, const nv::Dims *outputDims, int /*nbOutputs*/, nv::DataType type, nv::PluginFormat format, int /*maxBatchSize*/) { m_inshape = inputDims[0]; m_outshape = outputDims[0]; assert((type == nv::DataType::kFLOAT || type == nv::DataType::kHALF) && format == nv::PluginFormat::kNCHW); m_datatype = type; } bool ReflectPad1D::supportsFormat(nv::DataType type, nv::PluginFormat format) const { return (type == nv::DataType::kFLOAT || type == nv::DataType::kHALF) && format == nv::PluginFormat::kNCHW; } int ReflectPad1D::initialize() { return 0; } void ReflectPad1D::terminate() { } size_t ReflectPad1D::getWorkspaceSize(int /*maxBatchSize*/) const { return 0; } int ReflectPad1D::getNbOutputs() const { return 1; }
7555714d06fea42e12ccd79e7b8546ca1bbfd37e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudassl/des.h" #if defined(CUDASSL_DES_C) /* * Expanded DES S-boxes */ __constant__ static const uint32_t SB1[64] = { 0x01010400, 0x00000000, 0x00010000, 0x01010404, 0x01010004, 0x00010404, 0x00000004, 0x00010000, 0x00000400, 0x01010400, 0x01010404, 0x00000400, 0x01000404, 0x01010004, 0x01000000, 0x00000004, 0x00000404, 0x01000400, 0x01000400, 0x00010400, 0x00010400, 0x01010000, 0x01010000, 0x01000404, 0x00010004, 0x01000004, 0x01000004, 0x00010004, 0x00000000, 0x00000404, 0x00010404, 0x01000000, 0x00010000, 0x01010404, 0x00000004, 0x01010000, 0x01010400, 0x01000000, 0x01000000, 0x00000400, 0x01010004, 0x00010000, 0x00010400, 0x01000004, 0x00000400, 0x00000004, 0x01000404, 0x00010404, 0x01010404, 0x00010004, 0x01010000, 0x01000404, 0x01000004, 0x00000404, 0x00010404, 0x01010400, 0x00000404, 0x01000400, 0x01000400, 0x00000000, 0x00010004, 0x00010400, 0x00000000, 0x01010004 }; __constant__ static const uint32_t SB2[64] = { 0x80108020, 0x80008000, 0x00008000, 0x00108020, 0x00100000, 0x00000020, 0x80100020, 0x80008020, 0x80000020, 0x80108020, 0x80108000, 0x80000000, 0x80008000, 0x00100000, 0x00000020, 0x80100020, 0x00108000, 0x00100020, 0x80008020, 0x00000000, 0x80000000, 0x00008000, 0x00108020, 0x80100000, 0x00100020, 0x80000020, 0x00000000, 0x00108000, 0x00008020, 0x80108000, 0x80100000, 0x00008020, 0x00000000, 0x00108020, 0x80100020, 0x00100000, 0x80008020, 0x80100000, 0x80108000, 0x00008000, 0x80100000, 0x80008000, 0x00000020, 0x80108020, 0x00108020, 0x00000020, 0x00008000, 0x80000000, 0x00008020, 0x80108000, 0x00100000, 0x80000020, 0x00100020, 0x80008020, 0x80000020, 0x00100020, 0x00108000, 0x00000000, 0x80008000, 0x00008020, 0x80000000, 0x80100020, 0x80108020, 0x00108000 }; __constant__ static const uint32_t SB3[64] = { 0x00000208, 0x08020200, 0x00000000, 0x08020008, 0x08000200, 0x00000000, 0x00020208, 0x08000200, 0x00020008, 0x08000008, 0x08000008, 0x00020000, 0x08020208, 0x00020008, 0x08020000, 0x00000208, 0x08000000, 0x00000008, 0x08020200, 0x00000200, 0x00020200, 0x08020000, 0x08020008, 0x00020208, 0x08000208, 0x00020200, 0x00020000, 0x08000208, 0x00000008, 0x08020208, 0x00000200, 0x08000000, 0x08020200, 0x08000000, 0x00020008, 0x00000208, 0x00020000, 0x08020200, 0x08000200, 0x00000000, 0x00000200, 0x00020008, 0x08020208, 0x08000200, 0x08000008, 0x00000200, 0x00000000, 0x08020008, 0x08000208, 0x00020000, 0x08000000, 0x08020208, 0x00000008, 0x00020208, 0x00020200, 0x08000008, 0x08020000, 0x08000208, 0x00000208, 0x08020000, 0x00020208, 0x00000008, 0x08020008, 0x00020200 }; __constant__ static const uint32_t SB4[64] = { 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802080, 0x00800081, 0x00800001, 0x00002001, 0x00000000, 0x00802000, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00800080, 0x00800001, 0x00000001, 0x00002000, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002001, 0x00002080, 0x00800081, 0x00000001, 0x00002080, 0x00800080, 0x00002000, 0x00802080, 0x00802081, 0x00000081, 0x00800080, 0x00800001, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00000000, 0x00802000, 0x00002080, 0x00800080, 0x00800081, 0x00000001, 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802081, 0x00000081, 0x00000001, 0x00002000, 0x00800001, 0x00002001, 0x00802080, 0x00800081, 0x00002001, 0x00002080, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002000, 0x00802080 }; __constant__ static const uint32_t SB5[64] = { 0x00000100, 0x02080100, 0x02080000, 0x42000100, 0x00080000, 0x00000100, 0x40000000, 0x02080000, 0x40080100, 0x00080000, 0x02000100, 0x40080100, 0x42000100, 0x42080000, 0x00080100, 0x40000000, 0x02000000, 0x40080000, 0x40080000, 0x00000000, 0x40000100, 0x42080100, 0x42080100, 0x02000100, 0x42080000, 0x40000100, 0x00000000, 0x42000000, 0x02080100, 0x02000000, 0x42000000, 0x00080100, 0x00080000, 0x42000100, 0x00000100, 0x02000000, 0x40000000, 0x02080000, 0x42000100, 0x40080100, 0x02000100, 0x40000000, 0x42080000, 0x02080100, 0x40080100, 0x00000100, 0x02000000, 0x42080000, 0x42080100, 0x00080100, 0x42000000, 0x42080100, 0x02080000, 0x00000000, 0x40080000, 0x42000000, 0x00080100, 0x02000100, 0x40000100, 0x00080000, 0x00000000, 0x40080000, 0x02080100, 0x40000100 }; __constant__ static const uint32_t SB6[64] = { 0x20000010, 0x20400000, 0x00004000, 0x20404010, 0x20400000, 0x00000010, 0x20404010, 0x00400000, 0x20004000, 0x00404010, 0x00400000, 0x20000010, 0x00400010, 0x20004000, 0x20000000, 0x00004010, 0x00000000, 0x00400010, 0x20004010, 0x00004000, 0x00404000, 0x20004010, 0x00000010, 0x20400010, 0x20400010, 0x00000000, 0x00404010, 0x20404000, 0x00004010, 0x00404000, 0x20404000, 0x20000000, 0x20004000, 0x00000010, 0x20400010, 0x00404000, 0x20404010, 0x00400000, 0x00004010, 0x20000010, 0x00400000, 0x20004000, 0x20000000, 0x00004010, 0x20000010, 0x20404010, 0x00404000, 0x20400000, 0x00404010, 0x20404000, 0x00000000, 0x20400010, 0x00000010, 0x00004000, 0x20400000, 0x00404010, 0x00004000, 0x00400010, 0x20004010, 0x00000000, 0x20404000, 0x20000000, 0x00400010, 0x20004010 }; __constant__ static const uint32_t SB7[64] = { 0x00200000, 0x04200002, 0x04000802, 0x00000000, 0x00000800, 0x04000802, 0x00200802, 0x04200800, 0x04200802, 0x00200000, 0x00000000, 0x04000002, 0x00000002, 0x04000000, 0x04200002, 0x00000802, 0x04000800, 0x00200802, 0x00200002, 0x04000800, 0x04000002, 0x04200000, 0x04200800, 0x00200002, 0x04200000, 0x00000800, 0x00000802, 0x04200802, 0x00200800, 0x00000002, 0x04000000, 0x00200800, 0x04000000, 0x00200800, 0x00200000, 0x04000802, 0x04000802, 0x04200002, 0x04200002, 0x00000002, 0x00200002, 0x04000000, 0x04000800, 0x00200000, 0x04200800, 0x00000802, 0x00200802, 0x04200800, 0x00000802, 0x04000002, 0x04200802, 0x04200000, 0x00200800, 0x00000000, 0x00000002, 0x04200802, 0x00000000, 0x00200802, 0x04200000, 0x00000800, 0x04000002, 0x04000800, 0x00000800, 0x00200002 }; __constant__ static const uint32_t SB8[64] = { 0x10001040, 0x00001000, 0x00040000, 0x10041040, 0x10000000, 0x10001040, 0x00000040, 0x10000000, 0x00040040, 0x10040000, 0x10041040, 0x00041000, 0x10041000, 0x00041040, 0x00001000, 0x00000040, 0x10040000, 0x10000040, 0x10001000, 0x00001040, 0x00041000, 0x00040040, 0x10040040, 0x10041000, 0x00001040, 0x00000000, 0x00000000, 0x10040040, 0x10000040, 0x10001000, 0x00041040, 0x00040000, 0x00041040, 0x00040000, 0x10041000, 0x00001000, 0x00000040, 0x10040040, 0x00001000, 0x00041040, 0x10001000, 0x00000040, 0x10000040, 0x10040000, 0x10040040, 0x10000000, 0x00040000, 0x10001040, 0x00000000, 0x10041040, 0x00040040, 0x10000040, 0x10040000, 0x10001000, 0x10001040, 0x00000000, 0x10041040, 0x00041000, 0x00041000, 0x00001040, 0x00001040, 0x00040040, 0x10000000, 0x10041000 }; /* * PC1: left and right halves bit-swap */ static const uint32_t LHs[16] = { 0x00000000, 0x00000001, 0x00000100, 0x00000101, 0x00010000, 0x00010001, 0x00010100, 0x00010101, 0x01000000, 0x01000001, 0x01000100, 0x01000101, 0x01010000, 0x01010001, 0x01010100, 0x01010101 }; static const uint32_t RHs[16] = { 0x00000000, 0x01000000, 0x00010000, 0x01010000, 0x00000100, 0x01000100, 0x00010100, 0x01010100, 0x00000001, 0x01000001, 0x00010001, 0x01010001, 0x00000101, 0x01000101, 0x00010101, 0x01010101, }; /* * Initial Permutation macro */ #define DES_IP(X,Y) { \ T = ((X >> 4) ^ Y) & 0x0F0F0F0F; Y ^= T; X ^= (T << 4); \ T = ((X >> 16) ^ Y) & 0x0000FFFF; Y ^= T; X ^= (T << 16); \ T = ((Y >> 2) ^ X) & 0x33333333; X ^= T; Y ^= (T << 2); \ T = ((Y >> 8) ^ X) & 0x00FF00FF; X ^= T; Y ^= (T << 8); \ Y = ((Y << 1) | (Y >> 31)) & 0xFFFFFFFF; \ T = (X ^ Y) & 0xAAAAAAAA; Y ^= T; X ^= T; \ X = ((X << 1) | (X >> 31)) & 0xFFFFFFFF; \ } /* * Final Permutation macro */ #define DES_FP(X,Y) { \ X = ((X << 31) | (X >> 1)) & 0xFFFFFFFF; \ T = (X ^ Y) & 0xAAAAAAAA; X ^= T; Y ^= T; \ Y = ((Y << 31) | (Y >> 1)) & 0xFFFFFFFF; \ T = ((Y >> 8) ^ X) & 0x00FF00FF; X ^= T; Y ^= (T << 8); \ T = ((Y >> 2) ^ X) & 0x33333333; X ^= T; Y ^= (T << 2); \ T = ((X >> 16) ^ Y) & 0x0000FFFF; Y ^= T; X ^= (T << 16); \ T = ((X >> 4) ^ Y) & 0x0F0F0F0F; Y ^= T; X ^= (T << 4); \ } /* * DES round macro */ #define DES_ROUND(X,Y) { \ T = *SK++ ^ X; \ Y ^= SB8[ (T ) & 0x3F ] ^ \ SB6[ (T >> 8) & 0x3F ] ^ \ SB4[ (T >> 16) & 0x3F ] ^ \ SB2[ (T >> 24) & 0x3F ]; \ \ T = *SK++ ^ ((X << 28) | (X >> 4)); \ Y ^= SB7[ (T ) & 0x3F ] ^ \ SB5[ (T >> 8) & 0x3F ] ^ \ SB3[ (T >> 16) & 0x3F ] ^ \ SB1[ (T >> 24) & 0x3F ]; \ } #define SWAP(a,b) { uint32_t t = a; a = b; b = t; t = 0; } void des_init(des_context *ctx) { memset(ctx, 0, sizeof(des_context)); } void des_free(des_context *ctx) { if (ctx == NULL) return; zeroize(ctx, sizeof(des_context)); } void des3_init(des3_context *ctx) { memset(ctx, 0, sizeof(des3_context)); } void des3_free(des3_context *ctx) { if (ctx == NULL) return; zeroize(ctx, sizeof(des3_context)); } static const unsigned char odd_parity_table[128] = { 1, 2, 4, 7, 8, 11, 13, 14, 16, 19, 21, 22, 25, 26, 28, 31, 32, 35, 37, 38, 41, 42, 44, 47, 49, 50, 52, 55, 56, 59, 61, 62, 64, 67, 69, 70, 73, 74, 76, 79, 81, 82, 84, 87, 88, 91, 93, 94, 97, 98, 100, 103, 104, 107, 109, 110, 112, 115, 117, 118, 121, 122, 124, 127, 128, 131, 133, 134, 137, 138, 140, 143, 145, 146, 148, 151, 152, 155, 157, 158, 161, 162, 164, 167, 168, 171, 173, 174, 176, 179, 181, 182, 185, 186, 188, 191, 193, 194, 196, 199, 200, 203, 205, 206, 208, 211, 213, 214, 217, 218, 220, 223, 224, 227, 229, 230, 233, 234, 236, 239, 241, 242, 244, 247, 248, 251, 253, 254 }; void des_key_set_parity(unsigned char key[DES_KEY_SIZE]) { int i; for (i = 0; i < DES_KEY_SIZE; i++) key[i] = odd_parity_table[key[i] / 2]; } /* * Check the given key's parity, returns 1 on failure, 0 on SUCCESS */ int des_key_check_key_parity(const unsigned char key[DES_KEY_SIZE]) { int i; for (i = 0; i < DES_KEY_SIZE; i++) if (key[i] != odd_parity_table[key[i] / 2]) return(1); return(0); } /* * Table of weak and semi-weak keys * * Source: http://en.wikipedia.org/wiki/Weak_key * * Weak: * Alternating ones + zeros (0x0101010101010101) * Alternating 'F' + 'E' (0xFEFEFEFEFEFEFEFE) * '0xE0E0E0E0F1F1F1F1' * '0x1F1F1F1F0E0E0E0E' * * Semi-weak: * 0x011F011F010E010E and 0x1F011F010E010E01 * 0x01E001E001F101F1 and 0xE001E001F101F101 * 0x01FE01FE01FE01FE and 0xFE01FE01FE01FE01 * 0x1FE01FE00EF10EF1 and 0xE01FE01FF10EF10E * 0x1FFE1FFE0EFE0EFE and 0xFE1FFE1FFE0EFE0E * 0xE0FEE0FEF1FEF1FE and 0xFEE0FEE0FEF1FEF1 * */ #define WEAK_KEY_COUNT 16 static const unsigned char weak_key_table[WEAK_KEY_COUNT][DES_KEY_SIZE] = { { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }, { 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, { 0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E }, { 0xE0, 0xE0, 0xE0, 0xE0, 0xF1, 0xF1, 0xF1, 0xF1 }, { 0x01, 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E }, { 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E, 0x01 }, { 0x01, 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1 }, { 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1, 0x01 }, { 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE }, { 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01 }, { 0x1F, 0xE0, 0x1F, 0xE0, 0x0E, 0xF1, 0x0E, 0xF1 }, { 0xE0, 0x1F, 0xE0, 0x1F, 0xF1, 0x0E, 0xF1, 0x0E }, { 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E, 0xFE }, { 0xFE, 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E }, { 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE }, { 0xFE, 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1 } }; int des_key_check_weak(const unsigned char key[DES_KEY_SIZE]) { int i; for (i = 0; i < WEAK_KEY_COUNT; i++) if (memcmp(weak_key_table[i], key, DES_KEY_SIZE) == 0) return(1); return(0); } static void des_setkey(uint32_t SK[32], const unsigned char key[DES_KEY_SIZE]) { int i; uint32_t X, Y, T; GET_UINT32_BE(X, key, 0); GET_UINT32_BE(Y, key, 4); /* * Permuted Choice 1 */ T = ((Y >> 4) ^ X) & 0x0F0F0F0F; X ^= T; Y ^= (T << 4); T = ((Y ) ^ X) & 0x10101010; X ^= T; Y ^= (T ); X = (LHs[ (X ) & 0xF] << 3) | (LHs[ (X >> 8) & 0xF ] << 2) | (LHs[ (X >> 16) & 0xF] << 1) | (LHs[ (X >> 24) & 0xF ] ) | (LHs[ (X >> 5) & 0xF] << 7) | (LHs[ (X >> 13) & 0xF ] << 6) | (LHs[ (X >> 21) & 0xF] << 5) | (LHs[ (X >> 29) & 0xF ] << 4); Y = (RHs[ (Y >> 1) & 0xF] << 3) | (RHs[ (Y >> 9) & 0xF ] << 2) | (RHs[ (Y >> 17) & 0xF] << 1) | (RHs[ (Y >> 25) & 0xF ] ) | (RHs[ (Y >> 4) & 0xF] << 7) | (RHs[ (Y >> 12) & 0xF ] << 6) | (RHs[ (Y >> 20) & 0xF] << 5) | (RHs[ (Y >> 28) & 0xF ] << 4); X &= 0x0FFFFFFF; Y &= 0x0FFFFFFF; /* * calculate subkeys */ for (i = 0; i < 16; i++) { if (i < 2 || i == 8 || i == 15) { X = ((X << 1) | (X >> 27)) & 0x0FFFFFFF; Y = ((Y << 1) | (Y >> 27)) & 0x0FFFFFFF; } else { X = ((X << 2) | (X >> 26)) & 0x0FFFFFFF; Y = ((Y << 2) | (Y >> 26)) & 0x0FFFFFFF; } *SK++ = ((X << 4) & 0x24000000) | ((X << 28) & 0x10000000) | ((X << 14) & 0x08000000) | ((X << 18) & 0x02080000) | ((X << 6) & 0x01000000) | ((X << 9) & 0x00200000) | ((X >> 1) & 0x00100000) | ((X << 10) & 0x00040000) | ((X << 2) & 0x00020000) | ((X >> 10) & 0x00010000) | ((Y >> 13) & 0x00002000) | ((Y >> 4) & 0x00001000) | ((Y << 6) & 0x00000800) | ((Y >> 1) & 0x00000400) | ((Y >> 14) & 0x00000200) | ((Y ) & 0x00000100) | ((Y >> 5) & 0x00000020) | ((Y >> 10) & 0x00000010) | ((Y >> 3) & 0x00000008) | ((Y >> 18) & 0x00000004) | ((Y >> 26) & 0x00000002) | ((Y >> 24) & 0x00000001); *SK++ = ((X << 15) & 0x20000000) | ((X << 17) & 0x10000000) | ((X << 10) & 0x08000000) | ((X << 22) & 0x04000000) | ((X >> 2) & 0x02000000) | ((X << 1) & 0x01000000) | ((X << 16) & 0x00200000) | ((X << 11) & 0x00100000) | ((X << 3) & 0x00080000) | ((X >> 6) & 0x00040000) | ((X << 15) & 0x00020000) | ((X >> 4) & 0x00010000) | ((Y >> 2) & 0x00002000) | ((Y << 8) & 0x00001000) | ((Y >> 14) & 0x00000808) | ((Y >> 9) & 0x00000400) | ((Y ) & 0x00000200) | ((Y << 7) & 0x00000100) | ((Y >> 7) & 0x00000020) | ((Y >> 3) & 0x00000011) | ((Y << 2) & 0x00000004) | ((Y >> 21) & 0x00000002); } } /* * DES key schedule (56-bit, encryption) */ int des_setkey_enc(des_context *ctx, const unsigned char key[DES_KEY_SIZE]) { des_setkey(ctx->sk, key); return(0); } /* * DES key schedule (56-bit, decryption) */ int des_setkey_dec(des_context *ctx, const unsigned char key[DES_KEY_SIZE]) { int i; des_setkey(ctx->sk, key); for (i = 0; i < 16; i += 2) { SWAP(ctx->sk[i ], ctx->sk[30 - i]); SWAP(ctx->sk[i + 1], ctx->sk[31 - i]); } return(0); } static void des3_set2key(uint32_t esk[96], uint32_t dsk[96], const unsigned char key[DES_KEY_SIZE*2]) { int i; des_setkey(esk, key); des_setkey(dsk + 32, key + 8); for (i = 0; i < 32; i += 2) { dsk[i ] = esk[30 - i]; dsk[i + 1] = esk[31 - i]; esk[i + 32] = dsk[62 - i]; esk[i + 33] = dsk[63 - i]; esk[i + 64] = esk[i ]; esk[i + 65] = esk[i + 1]; dsk[i + 64] = dsk[i ]; dsk[i + 65] = dsk[i + 1]; } } /* * Triple-DES key schedule (112-bit, encryption) */ int des3_set2key_enc(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 2]) { uint32_t sk[96]; des3_set2key(ctx->sk, sk, key); zeroize(sk, sizeof(sk)); return(0); } /* * Triple-DES key schedule (112-bit, decryption) */ int des3_set2key_dec(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 2]) { uint32_t sk[96]; des3_set2key(sk, ctx->sk, key); zeroize(sk, sizeof(sk)); return(0); } static void des3_set3key(uint32_t esk[96], uint32_t dsk[96], const unsigned char key[24]) { int i; des_setkey(esk, key); des_setkey(dsk + 32, key + 8); des_setkey(esk + 64, key + 16); for (i = 0; i < 32; i += 2) { dsk[i ] = esk[94 - i]; dsk[i + 1] = esk[95 - i]; esk[i + 32] = dsk[62 - i]; esk[i + 33] = dsk[63 - i]; dsk[i + 64] = esk[30 - i]; dsk[i + 65] = esk[31 - i]; } } /* * Triple-DES key schedule (168-bit, encryption) */ int des3_set3key_enc(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 3]) { uint32_t sk[96]; des3_set3key(ctx->sk, sk, key); zeroize(sk, sizeof(sk)); return(0); } /* * Triple-DES key schedule (168-bit, decryption) */ int des3_set3key_dec(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 3]) { uint32_t sk[96]; des3_set3key(sk, ctx->sk, key); zeroize(sk, sizeof(sk)); return(0); } __constant__ des_context des_ctx; __constant__ des3_context des3_ctx; /* * DES-ECB block encryption/decryption */ __global__ void des_crypt_ecb_kernel( const unsigned char *inputs, unsigned char *outputs) { const unsigned char *input = inputs + TX * DES_BLOCK_SIZE; unsigned char *output = outputs + TX * DES_BLOCK_SIZE; int i; uint32_t X, Y, T, *SK; SK = des_ctx.sk; GET_UINT32_BE(X, input, 0); GET_UINT32_BE(Y, input, 4); DES_IP(X, Y); for (i = 0; i < 8; i++) { DES_ROUND(Y, X); DES_ROUND(X, Y); } DES_FP(Y, X); PUT_UINT32_BE(Y, output, 0); PUT_UINT32_BE(X, output, 4); } int des_transfer_context(des_context *ctx) { cuda_upload_symbol(ctx, des_ctx, sizeof(des_context)); return 0; } int des_crypt_ecb(const unsigned char *input, size_t length, unsigned char *output, cuda_device *d) { cuda_upload_data(input, d->device_data_in, length); int grid_size = length / (MAX_THREAD * DES_BLOCK_SIZE); if (length % (MAX_THREAD * DES_BLOCK_SIZE) != 0) grid_size += 1; int thread_size = (length / DES_BLOCK_SIZE) < MAX_THREAD ? length / DES_BLOCK_SIZE : MAX_THREAD; // printf("DES_KERNEL<<<%d,%d>>>\n", grid_size, thread_size); hipLaunchKernelGGL(( des_crypt_ecb_kernel), dim3(grid_size), dim3(thread_size), 0, 0, d->device_data_in, d->device_data_out); cuda_download_data(output, d->device_data_out, length); return 0; } /* * 3DES-ECB block encryption/decryption */ __global__ void des3_crypt_ecb_kernel( const unsigned char *inputs, unsigned char *outputs) { const unsigned char *input = inputs + TX * DES_BLOCK_SIZE; unsigned char *output = outputs + TX * DES_BLOCK_SIZE; int i; uint32_t X, Y, T, *SK; SK = des3_ctx.sk; GET_UINT32_BE(X, input, 0); GET_UINT32_BE(Y, input, 4); DES_IP(X, Y); for (i = 0; i < 8; i++) { DES_ROUND(Y, X); DES_ROUND(X, Y); } for (i = 0; i < 8; i++) { DES_ROUND(X, Y); DES_ROUND(Y, X); } for (i = 0; i < 8; i++) { DES_ROUND(Y, X); DES_ROUND(X, Y); } DES_FP(Y, X); PUT_UINT32_BE(Y, output, 0); PUT_UINT32_BE(X, output, 4); } int des3_transfer_context(des3_context *ctx) { cuda_upload_symbol(ctx, des3_ctx, sizeof(des3_context)); return 0; } int des3_crypt_ecb( const unsigned char *input, size_t length, unsigned char *output, cuda_device *d) { cuda_upload_data(input, d->device_data_in, length); int grid_size = length / (MAX_THREAD * DES_BLOCK_SIZE); if (length % (MAX_THREAD * DES_BLOCK_SIZE) != 0) grid_size += 1; int thread_size = (length / DES_BLOCK_SIZE) < MAX_THREAD ? length / DES_BLOCK_SIZE : MAX_THREAD; // printf("DES_KERNEL<<<%d,%d>>>\n", grid_size, thread_size); hipLaunchKernelGGL(( des3_crypt_ecb_kernel), dim3(grid_size), dim3(thread_size), 0, 0, d->device_data_in, d->device_data_out); cuda_download_data(output, d->device_data_out, length); return 0; } #if defined(CUDASSL_SELF_TEST) #include <stdio.h> /* * DES and 3DES test vectors from: * * http://csrc.nist.gov/groups/STM/cavp/documents/des/tripledes-vectors.zip */ static const unsigned char des3_test_keys[24] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23 }; static const unsigned char des3_test_buf[8] = { 0x4E, 0x6F, 0x77, 0x20, 0x69, 0x73, 0x20, 0x74 }; static const unsigned char des3_test_ecb_dec[3][8] = { { 0xCD, 0xD6, 0x4F, 0x2F, 0x94, 0x27, 0xC1, 0x5D }, { 0x69, 0x96, 0xC8, 0xFA, 0x47, 0xA2, 0xAB, 0xEB }, { 0x83, 0x25, 0x39, 0x76, 0x44, 0x09, 0x1A, 0x0A } }; static const unsigned char des3_test_ecb_enc[3][8] = { { 0x6A, 0x2A, 0x19, 0xF4, 0x1E, 0xCA, 0x85, 0x4B }, { 0x03, 0xE6, 0x9F, 0x5B, 0xFA, 0x58, 0xEB, 0x42 }, { 0xDD, 0x17, 0xE8, 0xB8, 0xB4, 0x37, 0xD2, 0x32 } }; /* * Checkup routine */ extern "C" int des_self_test(int verbose, cuda_device *d) { int i, j, u, v, ret = 0; des_context ctx; des3_context ctx3; unsigned char buf[MAX_THREAD][DES_BLOCK_SIZE]; des_init(&ctx); des3_init(&ctx3); /* * ECB mode */ for (i = 0; i < 6; i++) { u = i >> 1; v = i & 1; if (verbose != 0) printf(" DES%c-ECB-%3d (%s): ", (u == 0) ? ' ' : '3', 56 + u * 56, (v == DES_DECRYPT) ? "dec" : "enc"); memcpy(buf[0], des3_test_buf, DES_BLOCK_SIZE); memcpy(buf[1], des3_test_buf, DES_BLOCK_SIZE); switch (i) { case 0: des_setkey_dec(&ctx, des3_test_keys); des_transfer_context(&ctx); break; case 1: des_setkey_enc(&ctx, des3_test_keys); des_transfer_context(&ctx); break; case 2: des3_set2key_dec(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; case 3: des3_set2key_enc(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; case 4: des3_set3key_dec(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; case 5: des3_set3key_enc(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; default: return(1); } for (j = 0; j < 10000; j++) { if (u == 0) des_crypt_ecb(*buf, DES_BLOCK_SIZE * 2, *buf, d); else des3_crypt_ecb(*buf, DES_BLOCK_SIZE * 2, *buf, d); } if (v == DES_DECRYPT) { if (memcmp(buf[0], des3_test_ecb_dec[u], DES_BLOCK_SIZE) != 0 && memcmp(buf[1], des3_test_ecb_dec[u], DES_BLOCK_SIZE) != 0) { if (verbose != 0) printf("failed\n"); ret = 1; goto exit; } } else { if (memcmp(buf[0], des3_test_ecb_enc[u], DES_BLOCK_SIZE) != 0 && memcmp(buf[1], des3_test_ecb_enc[u], DES_BLOCK_SIZE) != 0) { if (verbose != 0) printf("failed\n"); ret = 1; goto exit; } } if (verbose != 0) printf("passed\n"); } if (verbose != 0) printf("\n"); exit: des_free(&ctx); des3_free(&ctx3); return(ret); } #define DATASIZE 1000L #define LOOPS 1000L extern "C" int des_performance_test_with_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; unsigned char buf[MAX_THREAD * DATASIZE][DES_BLOCK_SIZE]; des_context ctx; int i; float h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); memset(buf, 0, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); des_init(&ctx); des_setkey_enc(&ctx, key); des_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) des_crypt_ecb(*buf, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE, *buf, d); CUDA_STOP_TIME(" DES -ECB- 56 (enc)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des3_performance_test_with_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; unsigned char buf[MAX_THREAD * DATASIZE][DES_BLOCK_SIZE]; des3_context ctx; int i; float h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); memset(buf, 0, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); des3_init(&ctx); des3_set3key_enc(&ctx, key); des3_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) des3_crypt_ecb(*buf, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE, *buf, d); CUDA_STOP_TIME(" DES3-ECB-168 (enc)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des_performance_test_without_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; des_context ctx; int i; double h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); des_init(&ctx); des_setkey_enc(&ctx, key); des_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) hipLaunchKernelGGL(( des_crypt_ecb_kernel), dim3(DATASIZE), dim3(MAX_THREAD), 0, 0, d->device_data_in, d->device_data_out); CUDA_STOP_TIME(" DES -ECB- 56 (enc only)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des3_performance_test_without_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; des3_context ctx; int i; double h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); des3_init(&ctx); des3_set3key_enc(&ctx, key); des3_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) hipLaunchKernelGGL(( des3_crypt_ecb_kernel), dim3(DATASIZE), dim3(MAX_THREAD), 0, 0, d->device_data_in, d->device_data_out); CUDA_STOP_TIME(" DES3-ECB-128 (enc only)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des_performance_test(int verbose, cuda_device *d) { des_performance_test_with_data_transform(verbose, d); des_performance_test_without_data_transform(verbose, d); return 0; } extern "C" int des3_performance_test(int verbose, cuda_device *d) { des3_performance_test_with_data_transform(verbose, d); des3_performance_test_without_data_transform(verbose, d); return 0; } #endif /* CUDASSL_SELF_TEST */ #endif /* CUDASSL_DES_C */
7555714d06fea42e12ccd79e7b8546ca1bbfd37e.cu
#include "cudassl/des.h" #if defined(CUDASSL_DES_C) /* * Expanded DES S-boxes */ __constant__ static const uint32_t SB1[64] = { 0x01010400, 0x00000000, 0x00010000, 0x01010404, 0x01010004, 0x00010404, 0x00000004, 0x00010000, 0x00000400, 0x01010400, 0x01010404, 0x00000400, 0x01000404, 0x01010004, 0x01000000, 0x00000004, 0x00000404, 0x01000400, 0x01000400, 0x00010400, 0x00010400, 0x01010000, 0x01010000, 0x01000404, 0x00010004, 0x01000004, 0x01000004, 0x00010004, 0x00000000, 0x00000404, 0x00010404, 0x01000000, 0x00010000, 0x01010404, 0x00000004, 0x01010000, 0x01010400, 0x01000000, 0x01000000, 0x00000400, 0x01010004, 0x00010000, 0x00010400, 0x01000004, 0x00000400, 0x00000004, 0x01000404, 0x00010404, 0x01010404, 0x00010004, 0x01010000, 0x01000404, 0x01000004, 0x00000404, 0x00010404, 0x01010400, 0x00000404, 0x01000400, 0x01000400, 0x00000000, 0x00010004, 0x00010400, 0x00000000, 0x01010004 }; __constant__ static const uint32_t SB2[64] = { 0x80108020, 0x80008000, 0x00008000, 0x00108020, 0x00100000, 0x00000020, 0x80100020, 0x80008020, 0x80000020, 0x80108020, 0x80108000, 0x80000000, 0x80008000, 0x00100000, 0x00000020, 0x80100020, 0x00108000, 0x00100020, 0x80008020, 0x00000000, 0x80000000, 0x00008000, 0x00108020, 0x80100000, 0x00100020, 0x80000020, 0x00000000, 0x00108000, 0x00008020, 0x80108000, 0x80100000, 0x00008020, 0x00000000, 0x00108020, 0x80100020, 0x00100000, 0x80008020, 0x80100000, 0x80108000, 0x00008000, 0x80100000, 0x80008000, 0x00000020, 0x80108020, 0x00108020, 0x00000020, 0x00008000, 0x80000000, 0x00008020, 0x80108000, 0x00100000, 0x80000020, 0x00100020, 0x80008020, 0x80000020, 0x00100020, 0x00108000, 0x00000000, 0x80008000, 0x00008020, 0x80000000, 0x80100020, 0x80108020, 0x00108000 }; __constant__ static const uint32_t SB3[64] = { 0x00000208, 0x08020200, 0x00000000, 0x08020008, 0x08000200, 0x00000000, 0x00020208, 0x08000200, 0x00020008, 0x08000008, 0x08000008, 0x00020000, 0x08020208, 0x00020008, 0x08020000, 0x00000208, 0x08000000, 0x00000008, 0x08020200, 0x00000200, 0x00020200, 0x08020000, 0x08020008, 0x00020208, 0x08000208, 0x00020200, 0x00020000, 0x08000208, 0x00000008, 0x08020208, 0x00000200, 0x08000000, 0x08020200, 0x08000000, 0x00020008, 0x00000208, 0x00020000, 0x08020200, 0x08000200, 0x00000000, 0x00000200, 0x00020008, 0x08020208, 0x08000200, 0x08000008, 0x00000200, 0x00000000, 0x08020008, 0x08000208, 0x00020000, 0x08000000, 0x08020208, 0x00000008, 0x00020208, 0x00020200, 0x08000008, 0x08020000, 0x08000208, 0x00000208, 0x08020000, 0x00020208, 0x00000008, 0x08020008, 0x00020200 }; __constant__ static const uint32_t SB4[64] = { 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802080, 0x00800081, 0x00800001, 0x00002001, 0x00000000, 0x00802000, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00800080, 0x00800001, 0x00000001, 0x00002000, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002001, 0x00002080, 0x00800081, 0x00000001, 0x00002080, 0x00800080, 0x00002000, 0x00802080, 0x00802081, 0x00000081, 0x00800080, 0x00800001, 0x00802000, 0x00802081, 0x00000081, 0x00000000, 0x00000000, 0x00802000, 0x00002080, 0x00800080, 0x00800081, 0x00000001, 0x00802001, 0x00002081, 0x00002081, 0x00000080, 0x00802081, 0x00000081, 0x00000001, 0x00002000, 0x00800001, 0x00002001, 0x00802080, 0x00800081, 0x00002001, 0x00002080, 0x00800000, 0x00802001, 0x00000080, 0x00800000, 0x00002000, 0x00802080 }; __constant__ static const uint32_t SB5[64] = { 0x00000100, 0x02080100, 0x02080000, 0x42000100, 0x00080000, 0x00000100, 0x40000000, 0x02080000, 0x40080100, 0x00080000, 0x02000100, 0x40080100, 0x42000100, 0x42080000, 0x00080100, 0x40000000, 0x02000000, 0x40080000, 0x40080000, 0x00000000, 0x40000100, 0x42080100, 0x42080100, 0x02000100, 0x42080000, 0x40000100, 0x00000000, 0x42000000, 0x02080100, 0x02000000, 0x42000000, 0x00080100, 0x00080000, 0x42000100, 0x00000100, 0x02000000, 0x40000000, 0x02080000, 0x42000100, 0x40080100, 0x02000100, 0x40000000, 0x42080000, 0x02080100, 0x40080100, 0x00000100, 0x02000000, 0x42080000, 0x42080100, 0x00080100, 0x42000000, 0x42080100, 0x02080000, 0x00000000, 0x40080000, 0x42000000, 0x00080100, 0x02000100, 0x40000100, 0x00080000, 0x00000000, 0x40080000, 0x02080100, 0x40000100 }; __constant__ static const uint32_t SB6[64] = { 0x20000010, 0x20400000, 0x00004000, 0x20404010, 0x20400000, 0x00000010, 0x20404010, 0x00400000, 0x20004000, 0x00404010, 0x00400000, 0x20000010, 0x00400010, 0x20004000, 0x20000000, 0x00004010, 0x00000000, 0x00400010, 0x20004010, 0x00004000, 0x00404000, 0x20004010, 0x00000010, 0x20400010, 0x20400010, 0x00000000, 0x00404010, 0x20404000, 0x00004010, 0x00404000, 0x20404000, 0x20000000, 0x20004000, 0x00000010, 0x20400010, 0x00404000, 0x20404010, 0x00400000, 0x00004010, 0x20000010, 0x00400000, 0x20004000, 0x20000000, 0x00004010, 0x20000010, 0x20404010, 0x00404000, 0x20400000, 0x00404010, 0x20404000, 0x00000000, 0x20400010, 0x00000010, 0x00004000, 0x20400000, 0x00404010, 0x00004000, 0x00400010, 0x20004010, 0x00000000, 0x20404000, 0x20000000, 0x00400010, 0x20004010 }; __constant__ static const uint32_t SB7[64] = { 0x00200000, 0x04200002, 0x04000802, 0x00000000, 0x00000800, 0x04000802, 0x00200802, 0x04200800, 0x04200802, 0x00200000, 0x00000000, 0x04000002, 0x00000002, 0x04000000, 0x04200002, 0x00000802, 0x04000800, 0x00200802, 0x00200002, 0x04000800, 0x04000002, 0x04200000, 0x04200800, 0x00200002, 0x04200000, 0x00000800, 0x00000802, 0x04200802, 0x00200800, 0x00000002, 0x04000000, 0x00200800, 0x04000000, 0x00200800, 0x00200000, 0x04000802, 0x04000802, 0x04200002, 0x04200002, 0x00000002, 0x00200002, 0x04000000, 0x04000800, 0x00200000, 0x04200800, 0x00000802, 0x00200802, 0x04200800, 0x00000802, 0x04000002, 0x04200802, 0x04200000, 0x00200800, 0x00000000, 0x00000002, 0x04200802, 0x00000000, 0x00200802, 0x04200000, 0x00000800, 0x04000002, 0x04000800, 0x00000800, 0x00200002 }; __constant__ static const uint32_t SB8[64] = { 0x10001040, 0x00001000, 0x00040000, 0x10041040, 0x10000000, 0x10001040, 0x00000040, 0x10000000, 0x00040040, 0x10040000, 0x10041040, 0x00041000, 0x10041000, 0x00041040, 0x00001000, 0x00000040, 0x10040000, 0x10000040, 0x10001000, 0x00001040, 0x00041000, 0x00040040, 0x10040040, 0x10041000, 0x00001040, 0x00000000, 0x00000000, 0x10040040, 0x10000040, 0x10001000, 0x00041040, 0x00040000, 0x00041040, 0x00040000, 0x10041000, 0x00001000, 0x00000040, 0x10040040, 0x00001000, 0x00041040, 0x10001000, 0x00000040, 0x10000040, 0x10040000, 0x10040040, 0x10000000, 0x00040000, 0x10001040, 0x00000000, 0x10041040, 0x00040040, 0x10000040, 0x10040000, 0x10001000, 0x10001040, 0x00000000, 0x10041040, 0x00041000, 0x00041000, 0x00001040, 0x00001040, 0x00040040, 0x10000000, 0x10041000 }; /* * PC1: left and right halves bit-swap */ static const uint32_t LHs[16] = { 0x00000000, 0x00000001, 0x00000100, 0x00000101, 0x00010000, 0x00010001, 0x00010100, 0x00010101, 0x01000000, 0x01000001, 0x01000100, 0x01000101, 0x01010000, 0x01010001, 0x01010100, 0x01010101 }; static const uint32_t RHs[16] = { 0x00000000, 0x01000000, 0x00010000, 0x01010000, 0x00000100, 0x01000100, 0x00010100, 0x01010100, 0x00000001, 0x01000001, 0x00010001, 0x01010001, 0x00000101, 0x01000101, 0x00010101, 0x01010101, }; /* * Initial Permutation macro */ #define DES_IP(X,Y) { \ T = ((X >> 4) ^ Y) & 0x0F0F0F0F; Y ^= T; X ^= (T << 4); \ T = ((X >> 16) ^ Y) & 0x0000FFFF; Y ^= T; X ^= (T << 16); \ T = ((Y >> 2) ^ X) & 0x33333333; X ^= T; Y ^= (T << 2); \ T = ((Y >> 8) ^ X) & 0x00FF00FF; X ^= T; Y ^= (T << 8); \ Y = ((Y << 1) | (Y >> 31)) & 0xFFFFFFFF; \ T = (X ^ Y) & 0xAAAAAAAA; Y ^= T; X ^= T; \ X = ((X << 1) | (X >> 31)) & 0xFFFFFFFF; \ } /* * Final Permutation macro */ #define DES_FP(X,Y) { \ X = ((X << 31) | (X >> 1)) & 0xFFFFFFFF; \ T = (X ^ Y) & 0xAAAAAAAA; X ^= T; Y ^= T; \ Y = ((Y << 31) | (Y >> 1)) & 0xFFFFFFFF; \ T = ((Y >> 8) ^ X) & 0x00FF00FF; X ^= T; Y ^= (T << 8); \ T = ((Y >> 2) ^ X) & 0x33333333; X ^= T; Y ^= (T << 2); \ T = ((X >> 16) ^ Y) & 0x0000FFFF; Y ^= T; X ^= (T << 16); \ T = ((X >> 4) ^ Y) & 0x0F0F0F0F; Y ^= T; X ^= (T << 4); \ } /* * DES round macro */ #define DES_ROUND(X,Y) { \ T = *SK++ ^ X; \ Y ^= SB8[ (T ) & 0x3F ] ^ \ SB6[ (T >> 8) & 0x3F ] ^ \ SB4[ (T >> 16) & 0x3F ] ^ \ SB2[ (T >> 24) & 0x3F ]; \ \ T = *SK++ ^ ((X << 28) | (X >> 4)); \ Y ^= SB7[ (T ) & 0x3F ] ^ \ SB5[ (T >> 8) & 0x3F ] ^ \ SB3[ (T >> 16) & 0x3F ] ^ \ SB1[ (T >> 24) & 0x3F ]; \ } #define SWAP(a,b) { uint32_t t = a; a = b; b = t; t = 0; } void des_init(des_context *ctx) { memset(ctx, 0, sizeof(des_context)); } void des_free(des_context *ctx) { if (ctx == NULL) return; zeroize(ctx, sizeof(des_context)); } void des3_init(des3_context *ctx) { memset(ctx, 0, sizeof(des3_context)); } void des3_free(des3_context *ctx) { if (ctx == NULL) return; zeroize(ctx, sizeof(des3_context)); } static const unsigned char odd_parity_table[128] = { 1, 2, 4, 7, 8, 11, 13, 14, 16, 19, 21, 22, 25, 26, 28, 31, 32, 35, 37, 38, 41, 42, 44, 47, 49, 50, 52, 55, 56, 59, 61, 62, 64, 67, 69, 70, 73, 74, 76, 79, 81, 82, 84, 87, 88, 91, 93, 94, 97, 98, 100, 103, 104, 107, 109, 110, 112, 115, 117, 118, 121, 122, 124, 127, 128, 131, 133, 134, 137, 138, 140, 143, 145, 146, 148, 151, 152, 155, 157, 158, 161, 162, 164, 167, 168, 171, 173, 174, 176, 179, 181, 182, 185, 186, 188, 191, 193, 194, 196, 199, 200, 203, 205, 206, 208, 211, 213, 214, 217, 218, 220, 223, 224, 227, 229, 230, 233, 234, 236, 239, 241, 242, 244, 247, 248, 251, 253, 254 }; void des_key_set_parity(unsigned char key[DES_KEY_SIZE]) { int i; for (i = 0; i < DES_KEY_SIZE; i++) key[i] = odd_parity_table[key[i] / 2]; } /* * Check the given key's parity, returns 1 on failure, 0 on SUCCESS */ int des_key_check_key_parity(const unsigned char key[DES_KEY_SIZE]) { int i; for (i = 0; i < DES_KEY_SIZE; i++) if (key[i] != odd_parity_table[key[i] / 2]) return(1); return(0); } /* * Table of weak and semi-weak keys * * Source: http://en.wikipedia.org/wiki/Weak_key * * Weak: * Alternating ones + zeros (0x0101010101010101) * Alternating 'F' + 'E' (0xFEFEFEFEFEFEFEFE) * '0xE0E0E0E0F1F1F1F1' * '0x1F1F1F1F0E0E0E0E' * * Semi-weak: * 0x011F011F010E010E and 0x1F011F010E010E01 * 0x01E001E001F101F1 and 0xE001E001F101F101 * 0x01FE01FE01FE01FE and 0xFE01FE01FE01FE01 * 0x1FE01FE00EF10EF1 and 0xE01FE01FF10EF10E * 0x1FFE1FFE0EFE0EFE and 0xFE1FFE1FFE0EFE0E * 0xE0FEE0FEF1FEF1FE and 0xFEE0FEE0FEF1FEF1 * */ #define WEAK_KEY_COUNT 16 static const unsigned char weak_key_table[WEAK_KEY_COUNT][DES_KEY_SIZE] = { { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }, { 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, { 0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E }, { 0xE0, 0xE0, 0xE0, 0xE0, 0xF1, 0xF1, 0xF1, 0xF1 }, { 0x01, 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E }, { 0x1F, 0x01, 0x1F, 0x01, 0x0E, 0x01, 0x0E, 0x01 }, { 0x01, 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1 }, { 0xE0, 0x01, 0xE0, 0x01, 0xF1, 0x01, 0xF1, 0x01 }, { 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE }, { 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01, 0xFE, 0x01 }, { 0x1F, 0xE0, 0x1F, 0xE0, 0x0E, 0xF1, 0x0E, 0xF1 }, { 0xE0, 0x1F, 0xE0, 0x1F, 0xF1, 0x0E, 0xF1, 0x0E }, { 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E, 0xFE }, { 0xFE, 0x1F, 0xFE, 0x1F, 0xFE, 0x0E, 0xFE, 0x0E }, { 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE }, { 0xFE, 0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1 } }; int des_key_check_weak(const unsigned char key[DES_KEY_SIZE]) { int i; for (i = 0; i < WEAK_KEY_COUNT; i++) if (memcmp(weak_key_table[i], key, DES_KEY_SIZE) == 0) return(1); return(0); } static void des_setkey(uint32_t SK[32], const unsigned char key[DES_KEY_SIZE]) { int i; uint32_t X, Y, T; GET_UINT32_BE(X, key, 0); GET_UINT32_BE(Y, key, 4); /* * Permuted Choice 1 */ T = ((Y >> 4) ^ X) & 0x0F0F0F0F; X ^= T; Y ^= (T << 4); T = ((Y ) ^ X) & 0x10101010; X ^= T; Y ^= (T ); X = (LHs[ (X ) & 0xF] << 3) | (LHs[ (X >> 8) & 0xF ] << 2) | (LHs[ (X >> 16) & 0xF] << 1) | (LHs[ (X >> 24) & 0xF ] ) | (LHs[ (X >> 5) & 0xF] << 7) | (LHs[ (X >> 13) & 0xF ] << 6) | (LHs[ (X >> 21) & 0xF] << 5) | (LHs[ (X >> 29) & 0xF ] << 4); Y = (RHs[ (Y >> 1) & 0xF] << 3) | (RHs[ (Y >> 9) & 0xF ] << 2) | (RHs[ (Y >> 17) & 0xF] << 1) | (RHs[ (Y >> 25) & 0xF ] ) | (RHs[ (Y >> 4) & 0xF] << 7) | (RHs[ (Y >> 12) & 0xF ] << 6) | (RHs[ (Y >> 20) & 0xF] << 5) | (RHs[ (Y >> 28) & 0xF ] << 4); X &= 0x0FFFFFFF; Y &= 0x0FFFFFFF; /* * calculate subkeys */ for (i = 0; i < 16; i++) { if (i < 2 || i == 8 || i == 15) { X = ((X << 1) | (X >> 27)) & 0x0FFFFFFF; Y = ((Y << 1) | (Y >> 27)) & 0x0FFFFFFF; } else { X = ((X << 2) | (X >> 26)) & 0x0FFFFFFF; Y = ((Y << 2) | (Y >> 26)) & 0x0FFFFFFF; } *SK++ = ((X << 4) & 0x24000000) | ((X << 28) & 0x10000000) | ((X << 14) & 0x08000000) | ((X << 18) & 0x02080000) | ((X << 6) & 0x01000000) | ((X << 9) & 0x00200000) | ((X >> 1) & 0x00100000) | ((X << 10) & 0x00040000) | ((X << 2) & 0x00020000) | ((X >> 10) & 0x00010000) | ((Y >> 13) & 0x00002000) | ((Y >> 4) & 0x00001000) | ((Y << 6) & 0x00000800) | ((Y >> 1) & 0x00000400) | ((Y >> 14) & 0x00000200) | ((Y ) & 0x00000100) | ((Y >> 5) & 0x00000020) | ((Y >> 10) & 0x00000010) | ((Y >> 3) & 0x00000008) | ((Y >> 18) & 0x00000004) | ((Y >> 26) & 0x00000002) | ((Y >> 24) & 0x00000001); *SK++ = ((X << 15) & 0x20000000) | ((X << 17) & 0x10000000) | ((X << 10) & 0x08000000) | ((X << 22) & 0x04000000) | ((X >> 2) & 0x02000000) | ((X << 1) & 0x01000000) | ((X << 16) & 0x00200000) | ((X << 11) & 0x00100000) | ((X << 3) & 0x00080000) | ((X >> 6) & 0x00040000) | ((X << 15) & 0x00020000) | ((X >> 4) & 0x00010000) | ((Y >> 2) & 0x00002000) | ((Y << 8) & 0x00001000) | ((Y >> 14) & 0x00000808) | ((Y >> 9) & 0x00000400) | ((Y ) & 0x00000200) | ((Y << 7) & 0x00000100) | ((Y >> 7) & 0x00000020) | ((Y >> 3) & 0x00000011) | ((Y << 2) & 0x00000004) | ((Y >> 21) & 0x00000002); } } /* * DES key schedule (56-bit, encryption) */ int des_setkey_enc(des_context *ctx, const unsigned char key[DES_KEY_SIZE]) { des_setkey(ctx->sk, key); return(0); } /* * DES key schedule (56-bit, decryption) */ int des_setkey_dec(des_context *ctx, const unsigned char key[DES_KEY_SIZE]) { int i; des_setkey(ctx->sk, key); for (i = 0; i < 16; i += 2) { SWAP(ctx->sk[i ], ctx->sk[30 - i]); SWAP(ctx->sk[i + 1], ctx->sk[31 - i]); } return(0); } static void des3_set2key(uint32_t esk[96], uint32_t dsk[96], const unsigned char key[DES_KEY_SIZE*2]) { int i; des_setkey(esk, key); des_setkey(dsk + 32, key + 8); for (i = 0; i < 32; i += 2) { dsk[i ] = esk[30 - i]; dsk[i + 1] = esk[31 - i]; esk[i + 32] = dsk[62 - i]; esk[i + 33] = dsk[63 - i]; esk[i + 64] = esk[i ]; esk[i + 65] = esk[i + 1]; dsk[i + 64] = dsk[i ]; dsk[i + 65] = dsk[i + 1]; } } /* * Triple-DES key schedule (112-bit, encryption) */ int des3_set2key_enc(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 2]) { uint32_t sk[96]; des3_set2key(ctx->sk, sk, key); zeroize(sk, sizeof(sk)); return(0); } /* * Triple-DES key schedule (112-bit, decryption) */ int des3_set2key_dec(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 2]) { uint32_t sk[96]; des3_set2key(sk, ctx->sk, key); zeroize(sk, sizeof(sk)); return(0); } static void des3_set3key(uint32_t esk[96], uint32_t dsk[96], const unsigned char key[24]) { int i; des_setkey(esk, key); des_setkey(dsk + 32, key + 8); des_setkey(esk + 64, key + 16); for (i = 0; i < 32; i += 2) { dsk[i ] = esk[94 - i]; dsk[i + 1] = esk[95 - i]; esk[i + 32] = dsk[62 - i]; esk[i + 33] = dsk[63 - i]; dsk[i + 64] = esk[30 - i]; dsk[i + 65] = esk[31 - i]; } } /* * Triple-DES key schedule (168-bit, encryption) */ int des3_set3key_enc(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 3]) { uint32_t sk[96]; des3_set3key(ctx->sk, sk, key); zeroize(sk, sizeof(sk)); return(0); } /* * Triple-DES key schedule (168-bit, decryption) */ int des3_set3key_dec(des3_context *ctx, const unsigned char key[DES_KEY_SIZE * 3]) { uint32_t sk[96]; des3_set3key(sk, ctx->sk, key); zeroize(sk, sizeof(sk)); return(0); } __constant__ des_context des_ctx; __constant__ des3_context des3_ctx; /* * DES-ECB block encryption/decryption */ __global__ void des_crypt_ecb_kernel( const unsigned char *inputs, unsigned char *outputs) { const unsigned char *input = inputs + TX * DES_BLOCK_SIZE; unsigned char *output = outputs + TX * DES_BLOCK_SIZE; int i; uint32_t X, Y, T, *SK; SK = des_ctx.sk; GET_UINT32_BE(X, input, 0); GET_UINT32_BE(Y, input, 4); DES_IP(X, Y); for (i = 0; i < 8; i++) { DES_ROUND(Y, X); DES_ROUND(X, Y); } DES_FP(Y, X); PUT_UINT32_BE(Y, output, 0); PUT_UINT32_BE(X, output, 4); } int des_transfer_context(des_context *ctx) { cuda_upload_symbol(ctx, des_ctx, sizeof(des_context)); return 0; } int des_crypt_ecb(const unsigned char *input, size_t length, unsigned char *output, cuda_device *d) { cuda_upload_data(input, d->device_data_in, length); int grid_size = length / (MAX_THREAD * DES_BLOCK_SIZE); if (length % (MAX_THREAD * DES_BLOCK_SIZE) != 0) grid_size += 1; int thread_size = (length / DES_BLOCK_SIZE) < MAX_THREAD ? length / DES_BLOCK_SIZE : MAX_THREAD; // printf("DES_KERNEL<<<%d,%d>>>\n", grid_size, thread_size); des_crypt_ecb_kernel<<<grid_size, thread_size>>>(d->device_data_in, d->device_data_out); cuda_download_data(output, d->device_data_out, length); return 0; } /* * 3DES-ECB block encryption/decryption */ __global__ void des3_crypt_ecb_kernel( const unsigned char *inputs, unsigned char *outputs) { const unsigned char *input = inputs + TX * DES_BLOCK_SIZE; unsigned char *output = outputs + TX * DES_BLOCK_SIZE; int i; uint32_t X, Y, T, *SK; SK = des3_ctx.sk; GET_UINT32_BE(X, input, 0); GET_UINT32_BE(Y, input, 4); DES_IP(X, Y); for (i = 0; i < 8; i++) { DES_ROUND(Y, X); DES_ROUND(X, Y); } for (i = 0; i < 8; i++) { DES_ROUND(X, Y); DES_ROUND(Y, X); } for (i = 0; i < 8; i++) { DES_ROUND(Y, X); DES_ROUND(X, Y); } DES_FP(Y, X); PUT_UINT32_BE(Y, output, 0); PUT_UINT32_BE(X, output, 4); } int des3_transfer_context(des3_context *ctx) { cuda_upload_symbol(ctx, des3_ctx, sizeof(des3_context)); return 0; } int des3_crypt_ecb( const unsigned char *input, size_t length, unsigned char *output, cuda_device *d) { cuda_upload_data(input, d->device_data_in, length); int grid_size = length / (MAX_THREAD * DES_BLOCK_SIZE); if (length % (MAX_THREAD * DES_BLOCK_SIZE) != 0) grid_size += 1; int thread_size = (length / DES_BLOCK_SIZE) < MAX_THREAD ? length / DES_BLOCK_SIZE : MAX_THREAD; // printf("DES_KERNEL<<<%d,%d>>>\n", grid_size, thread_size); des3_crypt_ecb_kernel<<<grid_size, thread_size>>>(d->device_data_in, d->device_data_out); cuda_download_data(output, d->device_data_out, length); return 0; } #if defined(CUDASSL_SELF_TEST) #include <stdio.h> /* * DES and 3DES test vectors from: * * http://csrc.nist.gov/groups/STM/cavp/documents/des/tripledes-vectors.zip */ static const unsigned char des3_test_keys[24] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23 }; static const unsigned char des3_test_buf[8] = { 0x4E, 0x6F, 0x77, 0x20, 0x69, 0x73, 0x20, 0x74 }; static const unsigned char des3_test_ecb_dec[3][8] = { { 0xCD, 0xD6, 0x4F, 0x2F, 0x94, 0x27, 0xC1, 0x5D }, { 0x69, 0x96, 0xC8, 0xFA, 0x47, 0xA2, 0xAB, 0xEB }, { 0x83, 0x25, 0x39, 0x76, 0x44, 0x09, 0x1A, 0x0A } }; static const unsigned char des3_test_ecb_enc[3][8] = { { 0x6A, 0x2A, 0x19, 0xF4, 0x1E, 0xCA, 0x85, 0x4B }, { 0x03, 0xE6, 0x9F, 0x5B, 0xFA, 0x58, 0xEB, 0x42 }, { 0xDD, 0x17, 0xE8, 0xB8, 0xB4, 0x37, 0xD2, 0x32 } }; /* * Checkup routine */ extern "C" int des_self_test(int verbose, cuda_device *d) { int i, j, u, v, ret = 0; des_context ctx; des3_context ctx3; unsigned char buf[MAX_THREAD][DES_BLOCK_SIZE]; des_init(&ctx); des3_init(&ctx3); /* * ECB mode */ for (i = 0; i < 6; i++) { u = i >> 1; v = i & 1; if (verbose != 0) printf(" DES%c-ECB-%3d (%s): ", (u == 0) ? ' ' : '3', 56 + u * 56, (v == DES_DECRYPT) ? "dec" : "enc"); memcpy(buf[0], des3_test_buf, DES_BLOCK_SIZE); memcpy(buf[1], des3_test_buf, DES_BLOCK_SIZE); switch (i) { case 0: des_setkey_dec(&ctx, des3_test_keys); des_transfer_context(&ctx); break; case 1: des_setkey_enc(&ctx, des3_test_keys); des_transfer_context(&ctx); break; case 2: des3_set2key_dec(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; case 3: des3_set2key_enc(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; case 4: des3_set3key_dec(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; case 5: des3_set3key_enc(&ctx3, des3_test_keys); des3_transfer_context(&ctx3); break; default: return(1); } for (j = 0; j < 10000; j++) { if (u == 0) des_crypt_ecb(*buf, DES_BLOCK_SIZE * 2, *buf, d); else des3_crypt_ecb(*buf, DES_BLOCK_SIZE * 2, *buf, d); } if (v == DES_DECRYPT) { if (memcmp(buf[0], des3_test_ecb_dec[u], DES_BLOCK_SIZE) != 0 && memcmp(buf[1], des3_test_ecb_dec[u], DES_BLOCK_SIZE) != 0) { if (verbose != 0) printf("failed\n"); ret = 1; goto exit; } } else { if (memcmp(buf[0], des3_test_ecb_enc[u], DES_BLOCK_SIZE) != 0 && memcmp(buf[1], des3_test_ecb_enc[u], DES_BLOCK_SIZE) != 0) { if (verbose != 0) printf("failed\n"); ret = 1; goto exit; } } if (verbose != 0) printf("passed\n"); } if (verbose != 0) printf("\n"); exit: des_free(&ctx); des3_free(&ctx3); return(ret); } #define DATASIZE 1000L #define LOOPS 1000L extern "C" int des_performance_test_with_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; unsigned char buf[MAX_THREAD * DATASIZE][DES_BLOCK_SIZE]; des_context ctx; int i; float h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); memset(buf, 0, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); des_init(&ctx); des_setkey_enc(&ctx, key); des_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) des_crypt_ecb(*buf, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE, *buf, d); CUDA_STOP_TIME(" DES -ECB- 56 (enc)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des3_performance_test_with_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; unsigned char buf[MAX_THREAD * DATASIZE][DES_BLOCK_SIZE]; des3_context ctx; int i; float h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); memset(buf, 0, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); des3_init(&ctx); des3_set3key_enc(&ctx, key); des3_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) des3_crypt_ecb(*buf, MAX_THREAD * DES_BLOCK_SIZE * DATASIZE, *buf, d); CUDA_STOP_TIME(" DES3-ECB-168 (enc)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des_performance_test_without_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; des_context ctx; int i; double h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); des_init(&ctx); des_setkey_enc(&ctx, key); des_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) des_crypt_ecb_kernel<<<DATASIZE, MAX_THREAD>>>(d->device_data_in, d->device_data_out); CUDA_STOP_TIME(" DES -ECB- 56 (enc only)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des3_performance_test_without_data_transform(int verbose, cuda_device *d) { unsigned char key[DES_KEY_SIZE]; des3_context ctx; int i; double h; CUDA_START_TIME memset(key, 0, DES_KEY_SIZE); des3_init(&ctx); des3_set3key_enc(&ctx, key); des3_transfer_context(&ctx); for (int i = 0; i < LOOPS; ++i) des3_crypt_ecb_kernel<<<DATASIZE, MAX_THREAD>>>(d->device_data_in, d->device_data_out); CUDA_STOP_TIME(" DES3-ECB-128 (enc only)") printf(" Block Data size: %ld\n", MAX_THREAD * DES_BLOCK_SIZE * DATASIZE); printf(" Block Loops: %ld\n", LOOPS); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS, " ", " in total\n"); TALK_LIKE_A_HUMAN_BEING(MAX_THREAD * DES_BLOCK_SIZE * DATASIZE * LOOPS / gpu_time * 1000, " ", "/sec\n"); printf(" %ld loops in total\n", LOOPS * MAX_THREAD * DATASIZE); printf(" %f loops/sec\n", LOOPS * MAX_THREAD * DATASIZE / gpu_time * 1000); if (verbose != 0) printf("\n"); return 0; } extern "C" int des_performance_test(int verbose, cuda_device *d) { des_performance_test_with_data_transform(verbose, d); des_performance_test_without_data_transform(verbose, d); return 0; } extern "C" int des3_performance_test(int verbose, cuda_device *d) { des3_performance_test_with_data_transform(verbose, d); des3_performance_test_without_data_transform(verbose, d); return 0; } #endif /* CUDASSL_SELF_TEST */ #endif /* CUDASSL_DES_C */
b75e384da2be7d391331c60a32f16fbcc2b7476a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hyperbolic_tangent_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "../hyperbolic_tangent_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #include "util_cuda.h" static __forceinline__ __device__ float hyperbolic_tangent( float x, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier) { float y = __expf(x * hyperbolic_tangent_steepness2); return __fdividef(y - 1.0F, y + 1.0F) * hyperbolic_tangent_major_multiplier; } __global__ void hyperbolic_tangent_upd_kernel( const float4 * __restrict input, float4 * __restrict output, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = hyperbolic_tangent(val.x, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.y = hyperbolic_tangent(val.y, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.z = hyperbolic_tangent(val.z, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.w = hyperbolic_tangent(val.w, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); output[elem_id] = val; } } static __forceinline__ __device__ float hyperbolic_tangent_deriviative( float x, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3) { float normalized_value = x * hyperbolic_tangent_major_multiplier_reverted; return hyperbolic_tangent_steepness3 * (1.0F - (normalized_value * normalized_value)); } __global__ void hyperbolic_tangent_deriviative_upd_kernel( float4 * __restrict errors, const float4 * __restrict output_neurons, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = output_neurons[elem_id]; val.x = hyperbolic_tangent_deriviative(val.x, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.y = hyperbolic_tangent_deriviative(val.y, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.z = hyperbolic_tangent_deriviative(val.z, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.w = hyperbolic_tangent_deriviative(val.w, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); float4 current_error = errors[elem_id]; current_error.x *= val.x; current_error.y *= val.y; current_error.z *= val.z; current_error.w *= val.w; errors[elem_id] = current_error; } } namespace nnforge { namespace cuda { hyperbolic_tangent_layer_updater_cuda::hyperbolic_tangent_layer_updater_cuda() { } hyperbolic_tangent_layer_updater_cuda::~hyperbolic_tangent_layer_updater_cuda() { } void hyperbolic_tangent_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (offset_input_entry_id > 0) throw neural_network_exception("hyperbolic_tangent_layer_updater_cuda is not able to run using offset"); int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( hyperbolic_tangent_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_neurons_buffer, *output_neurons_buffer, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier, elem_count); } void hyperbolic_tangent_layer_updater_cuda::enqueue_backprop( hipStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( hyperbolic_tangent_deriviative_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *output_errors_buffer, *output_neurons_buffer, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3, elem_count); } bool hyperbolic_tangent_layer_updater_cuda::is_in_place_backprop() const { return true; } void hyperbolic_tangent_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const hyperbolic_tangent_layer> layer_derived = nnforge_dynamic_pointer_cast<const hyperbolic_tangent_layer>(layer_schema); hyperbolic_tangent_steepness2 = layer_derived->steepness * 2.0F; hyperbolic_tangent_major_multiplier = layer_derived->scale; hyperbolic_tangent_steepness3 = layer_derived->steepness * layer_derived->scale; hyperbolic_tangent_major_multiplier_reverted = 1.0F / layer_derived->scale; } } }
b75e384da2be7d391331c60a32f16fbcc2b7476a.cu
/* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "hyperbolic_tangent_layer_updater_cuda.h" #include <cuda_runtime.h> #include "../hyperbolic_tangent_layer.h" #include "../neural_network_exception.h" #include "../nn_types.h" #include "util_cuda.h" static __forceinline__ __device__ float hyperbolic_tangent( float x, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier) { float y = __expf(x * hyperbolic_tangent_steepness2); return __fdividef(y - 1.0F, y + 1.0F) * hyperbolic_tangent_major_multiplier; } __global__ void hyperbolic_tangent_upd_kernel( const float4 * __restrict input, float4 * __restrict output, float hyperbolic_tangent_steepness2, float hyperbolic_tangent_major_multiplier, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = input[elem_id]; val.x = hyperbolic_tangent(val.x, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.y = hyperbolic_tangent(val.y, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.z = hyperbolic_tangent(val.z, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); val.w = hyperbolic_tangent(val.w, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier); output[elem_id] = val; } } static __forceinline__ __device__ float hyperbolic_tangent_deriviative( float x, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3) { float normalized_value = x * hyperbolic_tangent_major_multiplier_reverted; return hyperbolic_tangent_steepness3 * (1.0F - (normalized_value * normalized_value)); } __global__ void hyperbolic_tangent_deriviative_upd_kernel( float4 * __restrict errors, const float4 * __restrict output_neurons, float hyperbolic_tangent_major_multiplier_reverted, float hyperbolic_tangent_steepness3, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { float4 val = output_neurons[elem_id]; val.x = hyperbolic_tangent_deriviative(val.x, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.y = hyperbolic_tangent_deriviative(val.y, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.z = hyperbolic_tangent_deriviative(val.z, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); val.w = hyperbolic_tangent_deriviative(val.w, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3); float4 current_error = errors[elem_id]; current_error.x *= val.x; current_error.y *= val.y; current_error.z *= val.z; current_error.w *= val.w; errors[elem_id] = current_error; } } namespace nnforge { namespace cuda { hyperbolic_tangent_layer_updater_cuda::hyperbolic_tangent_layer_updater_cuda() { } hyperbolic_tangent_layer_updater_cuda::~hyperbolic_tangent_layer_updater_cuda() { } void hyperbolic_tangent_layer_updater_cuda::enqueue_test( unsigned int offset_input_entry_id, cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (offset_input_entry_id > 0) throw neural_network_exception("hyperbolic_tangent_layer_updater_cuda is not able to run using offset"); int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hyperbolic_tangent_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_neurons_buffer, *output_neurons_buffer, hyperbolic_tangent_steepness2, hyperbolic_tangent_major_multiplier, elem_count); } void hyperbolic_tangent_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { int elem_count = (input_elem_count_per_entry * entry_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hyperbolic_tangent_deriviative_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *output_errors_buffer, *output_neurons_buffer, hyperbolic_tangent_major_multiplier_reverted, hyperbolic_tangent_steepness3, elem_count); } bool hyperbolic_tangent_layer_updater_cuda::is_in_place_backprop() const { return true; } void hyperbolic_tangent_layer_updater_cuda::updater_configured() { nnforge_shared_ptr<const hyperbolic_tangent_layer> layer_derived = nnforge_dynamic_pointer_cast<const hyperbolic_tangent_layer>(layer_schema); hyperbolic_tangent_steepness2 = layer_derived->steepness * 2.0F; hyperbolic_tangent_major_multiplier = layer_derived->scale; hyperbolic_tangent_steepness3 = layer_derived->steepness * layer_derived->scale; hyperbolic_tangent_major_multiplier_reverted = 1.0F / layer_derived->scale; } } }
bd59eb843067de6a16665471ed8a2f40c071e439.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void neg_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = -dy[i]; } }
bd59eb843067de6a16665471ed8a2f40c071e439.cu
extern "C" __global__ void neg_float(int n,int idx,float *dy,int incy,float *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) result[i] = -dy[i]; } }
2798b2cfeec845c1ecfcb5682385b67049ae8e0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //nvcc fractalAnim.cu -o temp -lglut -lGL -lm -run #include <GL/glut.h> #include <stdlib.h> #include <iostream> #include <stdio.h> #include <math.h> #include <time.h> #include <signal.h> using namespace std; float *A_CPU, *B_CPU, *C_CPU, *pixels_CPU; float *A_GPU, *B_GPU, *C_GPU, *pixels_GPU; dim3 dimBlock; /*float A = -0.624; float B = 0.4351; // */ float A = 0; float B = 0.75; // */ float t = 0; float tmod = 100.0; float titer = 1.0; float moveiter = 1.0; int N = 100; unsigned int window_width = 1024; unsigned int window_height = 1024; float xMin = -2.0; float xMax = 2.0; float yMin = -2.0; float yMax = 2.0; float stepSizeX = (xMax - xMin)/((float)window_width); float stepSizeY = (yMax - yMin)/((float)window_height); void AllocateMemory(){ hipMalloc((void**)&pixels_GPU, window_width*window_height*3*sizeof(float)); hipMalloc((void**)&B_GPU, N*sizeof(float)); hipMalloc((void**)&C_GPU, N*sizeof(float)); pixels_CPU = (float *)malloc(window_width*window_height*3*sizeof(float)); A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); } // */ //Saves the appropriate memory chunks for later use. //References the globally defined variables. void Initialize(){ for(int i = 0; i < N; i++){ A_CPU[i] = (float)i; B_CPU[i] = (float)i; } //Sets these arrays to the values 1..N. } // */ float color (float x, float y) //hopefully centered on (0,0)? { float mag,maxMag,t1; float maxCount = 200; float count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + A; y = (2.0 * t1 * y) + B; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { return(1.0); } else { return(0.0); }// */ } __global__ void cudaColor(float *pixels_GPU, float X, float iY){ float x = (((float)threadIdx.x)/(blockDim.x))*4-2; float y = (((float)blockIdx.x)/(gridDim.x))*4-2; float mag,maxMag, t1; int maxCount = 200; int count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + X; y = (2.0 * t1 * y) + iY; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.5*log((double)count)/log((double)maxCount); pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 1.0*log((double)count)/log((double)maxCount); pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.4; } else { pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.0; pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; }// */ } void update(int value){ t = t + titer; /*A = -pow(sin(t/500.0),2); B = sin(2*t/500.0)/2;// */ /*A = -pow(sin((2*t/5)/500.0),2); B = sin((2*t/3)/500.0)/2;// */ /*A = -pow(sin((2*t/5)/100.0),2); B = sin(sqrt(2)*(t)/100.0)/2;// */ A = -pow(sin((2*t/5)/100.0),2); B = sin(((sqrt(5)+1)/2)*(t)/100.0)/2;// */ glutPostRedisplay(); glutTimerFunc(16,update, 0); } /*static void signalHandler(int signum) { int command; bool exitMenu = 0; //cout << "I handled it :)" << endl; while (exitMenu == 0) { cout << "Enter 0 to exit the program." << endl; cout << "Enter 1 to continue." << endl; cin >> command; if(command == 0) { exitMenu = 1; } else if (command == 1){ exitMenu = 1; cout << "resuming..." << endl; } else { cout << "Invalid Command!" << endl; } cout << endl; } }// */ void display(void) { hipLaunchKernelGGL(( cudaColor), dim3(1024), dim3(1024), 0, 0, pixels_GPU, A, B); hipMemcpy(pixels_CPU, pixels_GPU, window_width*window_height*3*sizeof(float), hipMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels_CPU); glFlush(); } __global__ void Addition(float *A, float *B, float *C, int n){ int tid = threadIdx.x; int bid = blockIdx.x; if(tid < n){ C[bid*1024 + tid] = A[bid*1024 + tid] + B[bid*1024 +tid]; } } void CleanUp(float *A_CPU, float *B_CPU, float *C_CPU, float *A_GPU, float *B_GPU, float *C_GPU){ free(A_CPU); free(B_CPU); free(C_CPU); hipFree(A_GPU); hipFree(B_GPU); hipFree(C_GPU); } // */ //Frees the memory for the three relevant global variables. int main(int argc, char *argv[]) { if(argc == 2){ char *ptr; N = strtol(argv[1], &ptr, 10); } else if(argc > 2){ printf("One or zero arguments expected."); return(1); } AllocateMemory(); //Initialize(); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Fractals man, fractals."); glutDisplayFunc(display); glutSpecialFunc(processSpecialKeys); glutTimerFunc(16, update, 0); glutMainLoop(); }
2798b2cfeec845c1ecfcb5682385b67049ae8e0f.cu
//nvcc fractalAnim.cu -o temp -lglut -lGL -lm -run #include <GL/glut.h> #include <stdlib.h> #include <iostream> #include <stdio.h> #include <math.h> #include <time.h> #include <signal.h> using namespace std; float *A_CPU, *B_CPU, *C_CPU, *pixels_CPU; float *A_GPU, *B_GPU, *C_GPU, *pixels_GPU; dim3 dimBlock; /*float A = -0.624; float B = 0.4351; // */ float A = 0; float B = 0.75; // */ float t = 0; float tmod = 100.0; float titer = 1.0; float moveiter = 1.0; int N = 100; unsigned int window_width = 1024; unsigned int window_height = 1024; float xMin = -2.0; float xMax = 2.0; float yMin = -2.0; float yMax = 2.0; float stepSizeX = (xMax - xMin)/((float)window_width); float stepSizeY = (yMax - yMin)/((float)window_height); void AllocateMemory(){ cudaMalloc((void**)&pixels_GPU, window_width*window_height*3*sizeof(float)); cudaMalloc((void**)&B_GPU, N*sizeof(float)); cudaMalloc((void**)&C_GPU, N*sizeof(float)); pixels_CPU = (float *)malloc(window_width*window_height*3*sizeof(float)); A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); } // */ //Saves the appropriate memory chunks for later use. //References the globally defined variables. void Initialize(){ for(int i = 0; i < N; i++){ A_CPU[i] = (float)i; B_CPU[i] = (float)i; } //Sets these arrays to the values 1..N. } // */ float color (float x, float y) //hopefully centered on (0,0)? { float mag,maxMag,t1; float maxCount = 200; float count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + A; y = (2.0 * t1 * y) + B; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { return(1.0); } else { return(0.0); }// */ } __global__ void cudaColor(float *pixels_GPU, float X, float iY){ float x = (((float)threadIdx.x)/(blockDim.x))*4-2; float y = (((float)blockIdx.x)/(gridDim.x))*4-2; float mag,maxMag, t1; int maxCount = 200; int count = 0; maxMag = 10; mag = 0.0; while (mag < maxMag && count < maxCount) { t1 = x; x = x*x - y*y + X; y = (2.0 * t1 * y) + iY; mag = sqrt(x*x + y*y); count++; } if(count < maxCount) { pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.5*log((double)count)/log((double)maxCount); pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 1.0*log((double)count)/log((double)maxCount); pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.4; } else { pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3] = 0.0; pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 1] = 0.0; pixels_GPU[(blockIdx.x*blockDim.x + threadIdx.x)*3 + 2] = 0.0; }// */ } void update(int value){ t = t + titer; /*A = -pow(sin(t/500.0),2); B = sin(2*t/500.0)/2;// */ /*A = -pow(sin((2*t/5)/500.0),2); B = sin((2*t/3)/500.0)/2;// */ /*A = -pow(sin((2*t/5)/100.0),2); B = sin(sqrt(2)*(t)/100.0)/2;// */ A = -pow(sin((2*t/5)/100.0),2); B = sin(((sqrt(5)+1)/2)*(t)/100.0)/2;// */ glutPostRedisplay(); glutTimerFunc(16,update, 0); } /*static void signalHandler(int signum) { int command; bool exitMenu = 0; //cout << "I handled it :)" << endl; while (exitMenu == 0) { cout << "Enter 0 to exit the program." << endl; cout << "Enter 1 to continue." << endl; cin >> command; if(command == 0) { exitMenu = 1; } else if (command == 1){ exitMenu = 1; cout << "resuming..." << endl; } else { cout << "Invalid Command!" << endl; } cout << endl; } }// */ void display(void) { cudaColor<<<1024, 1024>>>(pixels_GPU, A, B); cudaMemcpy(pixels_CPU, pixels_GPU, window_width*window_height*3*sizeof(float), cudaMemcpyDeviceToHost); glDrawPixels(window_width, window_height, GL_RGB, GL_FLOAT, pixels_CPU); glFlush(); } __global__ void Addition(float *A, float *B, float *C, int n){ int tid = threadIdx.x; int bid = blockIdx.x; if(tid < n){ C[bid*1024 + tid] = A[bid*1024 + tid] + B[bid*1024 +tid]; } } void CleanUp(float *A_CPU, float *B_CPU, float *C_CPU, float *A_GPU, float *B_GPU, float *C_GPU){ free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); } // */ //Frees the memory for the three relevant global variables. int main(int argc, char *argv[]) { if(argc == 2){ char *ptr; N = strtol(argv[1], &ptr, 10); } else if(argc > 2){ printf("One or zero arguments expected."); return(1); } AllocateMemory(); //Initialize(); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_SINGLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Fractals man, fractals."); glutDisplayFunc(display); glutSpecialFunc(processSpecialKeys); glutTimerFunc(16, update, 0); glutMainLoop(); }
df8c976469c4c4752a1c848d70ab06deee6f7d07.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <hipcub/hipcub.hpp> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic shape plugin requires TRT version greater than 6.0. #if IS_TRT_VERSION_GE(6000) template <typename T> EmbEltwiseLayernormPluginDynamicImpl< T>::~EmbEltwiseLayernormPluginDynamicImpl() { this->terminate(); } inline half fp32tofp16(float x) { return static_cast<half>(x); } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() { embs_gpu_.resize(embs_.size()); for (int i = 0; i < embs_.size(); i++) { if (embs_[i]) { T *host_ptr; auto size = emb_sizes_[i]; if (std::is_same<T, half>::value) { host_ptr = new T[size]; std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16); } else { host_ptr = reinterpret_cast<T *>(embs_[i]); } hipMalloc(&embs_gpu_[i], sizeof(T) * size); hipMemcpy(embs_gpu_[i], host_ptr, size * sizeof(T), hipMemcpyHostToDevice); if (std::is_same<T, half>::value) { delete[] host_ptr; } } } if (bias_) { hipMalloc(&bias_gpu_, sizeof(float) * bias_size_); hipMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float), hipMemcpyHostToDevice); } if (scale_) { hipMalloc(&scale_gpu_, sizeof(float) * scale_size_); hipMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float), hipMemcpyHostToDevice); } int input_num = embs_.size(); in_ptr_tensor_.Resize({input_num}); emb_ptr_tensor_.Resize({input_num}); hipGetDevice(&device_id_); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); hipMemcpy(emb_ptr_gpu_d, embs_gpu_.data(), sizeof(uintptr_t) * input_num, hipMemcpyHostToDevice); return 0; } template <typename T> void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() { for (int i = 0; i < embs_gpu_.size(); ++i) { if (embs_gpu_[i]) { hipFree(embs_gpu_[i]); embs_gpu_[i] = nullptr; } } if (bias_gpu_) { hipFree(bias_gpu_); bias_gpu_ = nullptr; } if (scale_gpu_) { hipFree(scale_gpu_); scale_gpu_ = nullptr; } } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) { auto id_dims = input_desc[0].dims; int batch = id_dims.d[0]; int seq_len = id_dims.d[1]; int input_num = embs_.size(); auto in_ptr_gpu_d = in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto new_input_ptr = reinterpret_cast<uintptr_t>(inputs[0]); if (old_input_ptr_ != new_input_ptr) { old_input_ptr_ = new_input_ptr; hipMemcpyAsync(in_ptr_gpu_d, reinterpret_cast<const void *>(inputs), sizeof(uintptr_t) * input_num, hipMemcpyHostToDevice, stream); } auto out_type = output_desc[0].type; if (std::is_same<T, float>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kFLOAT, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp32 input.")); } else if (std::is_same<T, half>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kHALF, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp16 input.")); } else { PADDLE_THROW(platform::errors::Fatal( "Unsupport data type, the out type of EmbEltwiseLayernorm should be " "float or half.")); } auto *output_d = reinterpret_cast<T *>(outputs[0]); operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d, scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d, eps_, input_num, stream); return hipGetLastError() != hipSuccess; } template class EmbEltwiseLayernormPluginDynamicImpl<float>; #ifdef SUPPORTS_CUDA_FP16 template class EmbEltwiseLayernormPluginDynamicImpl<half>; #endif // SUPPORTS_CUDA_FP16 int EmbEltwiseLayernormPluginDynamic::initialize() { impl_->initialize(); return 0; } void EmbEltwiseLayernormPluginDynamic::terminate() { impl_->terminate(); } nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { // NOLINT PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); nvinfer1::DimsExprs ret; ret.nbDims = 5; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(hidden_size_); ret.d[3] = expr_builder.constant(1); ret.d[4] = expr_builder.constant(1); return ret; } bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); int all_nums = nb_inputs + nb_outputs; const nvinfer1::PluginTensorDesc &desc = in_out[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { return desc.type == nvinfer1::DataType::kINT32; } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos < all_nums - 1) { return desc.type == nvinfer1::DataType::kINT32 && desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1]; } if (pos == all_nums - 1) { if (with_fp16_ == false) { return desc.type == nvinfer1::DataType::kFLOAT; } else { return desc.type == nvinfer1::DataType::kHALF; } } return false; } nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); if (with_fp16_) return nvinfer1::DataType::kHALF; else return nvinfer1::DataType::kFLOAT; } int EmbEltwiseLayernormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) { impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream); return hipGetLastError() != hipSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
df8c976469c4c4752a1c848d70ab06deee6f7d07.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <cassert> #include <cub/cub.cuh> // NOLINT #include <vector> #include "glog/logging.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h" #include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { // Dynamic shape plugin requires TRT version greater than 6.0. #if IS_TRT_VERSION_GE(6000) template <typename T> EmbEltwiseLayernormPluginDynamicImpl< T>::~EmbEltwiseLayernormPluginDynamicImpl() { this->terminate(); } inline half fp32tofp16(float x) { return static_cast<half>(x); } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::initialize() { embs_gpu_.resize(embs_.size()); for (int i = 0; i < embs_.size(); i++) { if (embs_[i]) { T *host_ptr; auto size = emb_sizes_[i]; if (std::is_same<T, half>::value) { host_ptr = new T[size]; std::transform(embs_[i], (embs_[i] + size), host_ptr, fp32tofp16); } else { host_ptr = reinterpret_cast<T *>(embs_[i]); } cudaMalloc(&embs_gpu_[i], sizeof(T) * size); cudaMemcpy(embs_gpu_[i], host_ptr, size * sizeof(T), cudaMemcpyHostToDevice); if (std::is_same<T, half>::value) { delete[] host_ptr; } } } if (bias_) { cudaMalloc(&bias_gpu_, sizeof(float) * bias_size_); cudaMemcpy(bias_gpu_, bias_, bias_size_ * sizeof(float), cudaMemcpyHostToDevice); } if (scale_) { cudaMalloc(&scale_gpu_, sizeof(float) * scale_size_); cudaMemcpy(scale_gpu_, scale_, scale_size_ * sizeof(float), cudaMemcpyHostToDevice); } int input_num = embs_.size(); in_ptr_tensor_.Resize({input_num}); emb_ptr_tensor_.Resize({input_num}); cudaGetDevice(&device_id_); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); cudaMemcpy(emb_ptr_gpu_d, embs_gpu_.data(), sizeof(uintptr_t) * input_num, cudaMemcpyHostToDevice); return 0; } template <typename T> void EmbEltwiseLayernormPluginDynamicImpl<T>::terminate() { for (int i = 0; i < embs_gpu_.size(); ++i) { if (embs_gpu_[i]) { cudaFree(embs_gpu_[i]); embs_gpu_[i] = nullptr; } } if (bias_gpu_) { cudaFree(bias_gpu_); bias_gpu_ = nullptr; } if (scale_gpu_) { cudaFree(scale_gpu_); scale_gpu_ = nullptr; } } template <typename T> int EmbEltwiseLayernormPluginDynamicImpl<T>::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) { auto id_dims = input_desc[0].dims; int batch = id_dims.d[0]; int seq_len = id_dims.d[1]; int input_num = embs_.size(); auto in_ptr_gpu_d = in_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto emb_ptr_gpu_d = emb_ptr_tensor_.mutable_data<int64_t>(platform::CUDAPlace(device_id_)); auto new_input_ptr = reinterpret_cast<uintptr_t>(inputs[0]); if (old_input_ptr_ != new_input_ptr) { old_input_ptr_ = new_input_ptr; cudaMemcpyAsync(in_ptr_gpu_d, reinterpret_cast<const void *>(inputs), sizeof(uintptr_t) * input_num, cudaMemcpyHostToDevice, stream); } auto out_type = output_desc[0].type; if (std::is_same<T, float>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kFLOAT, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp32 input.")); } else if (std::is_same<T, half>::value) { PADDLE_ENFORCE_EQ( out_type == nvinfer1::DataType::kHALF, true, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only support fp16 input.")); } else { PADDLE_THROW(platform::errors::Fatal( "Unsupport data type, the out type of EmbEltwiseLayernorm should be " "float or half.")); } auto *output_d = reinterpret_cast<T *>(outputs[0]); operators::math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden_size_, in_ptr_gpu_d, scale_gpu_, bias_gpu_, emb_ptr_gpu_d, output_d, eps_, input_num, stream); return cudaGetLastError() != cudaSuccess; } template class EmbEltwiseLayernormPluginDynamicImpl<float>; #ifdef SUPPORTS_CUDA_FP16 template class EmbEltwiseLayernormPluginDynamicImpl<half>; #endif // SUPPORTS_CUDA_FP16 int EmbEltwiseLayernormPluginDynamic::initialize() { impl_->initialize(); return 0; } void EmbEltwiseLayernormPluginDynamic::terminate() { impl_->terminate(); } nvinfer1::DimsExprs EmbEltwiseLayernormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) { // NOLINT PADDLE_ENFORCE_EQ(output_index, 0, platform::errors::InvalidArgument( "There is only one output of the EmbEltwiseLayernorm, " "so the index should be zero," "but it's (%d)", output_index)); nvinfer1::DimsExprs ret; ret.nbDims = 5; ret.d[0] = inputs[0].d[0]; ret.d[1] = inputs[0].d[1]; ret.d[2] = expr_builder.constant(hidden_size_); ret.d[3] = expr_builder.constant(1); ret.d[4] = expr_builder.constant(1); return ret; } bool EmbEltwiseLayernormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of swish plugin shoule not be nullptr.")); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_EQ(nb_outputs, 1, platform::errors::InvalidArgument( "The EmbEltwiseLayerNorm's output should be one" "but it's (%d) outputs.", nb_outputs)); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); int all_nums = nb_inputs + nb_outputs; const nvinfer1::PluginTensorDesc &desc = in_out[pos]; if (desc.format != nvinfer1::TensorFormat::kLINEAR) { return false; } if (pos == 0) { return desc.type == nvinfer1::DataType::kINT32; } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; if (pos < all_nums - 1) { return desc.type == nvinfer1::DataType::kINT32 && desc.dims.d[0] == prev.dims.d[0] && desc.dims.d[1] == prev.dims.d[1]; } if (pos == all_nums - 1) { if (with_fp16_ == false) { return desc.type == nvinfer1::DataType::kFLOAT; } else { return desc.type == nvinfer1::DataType::kHALF; } } return false; } nvinfer1::DataType EmbEltwiseLayernormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The EmbEltwiseLayernorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); if (with_fp16_) return nvinfer1::DataType::kHALF; else return nvinfer1::DataType::kFLOAT; } int EmbEltwiseLayernormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) { impl_->enqueue(input_desc, output_desc, inputs, outputs, workspace, stream); return cudaGetLastError() != cudaSuccess; } #endif } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
2bd36e93fef4c46c4da391aeb24a755d95a8de19.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> using namespace std; __global__ void matrixAddKernel(float* A, float* B, float* C, int n){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n*n) C[i] = A[i] + B[i]; } __global__ void matrixAddKernel2(float* A, float* B, float* C, int n){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n){ for (int j = i * n; j < i * n + n; j++) C[j] = A[j] + B[j]; } } __global__ void matrixAddKernel3(float* A, float* B, float* C, int n){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) for (int j = i; j < n*n; j += n) C[j] = A[j] + B[j]; } void matrixAdd(float* A, float* B, float* C, int n) { int size = n * n * sizeof(float); float *d_A, *d_B, *d_C; hipMalloc((void **)&d_A, size); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); hipMalloc((void **)&d_B, size); hipMemcpy(d_B, B, size, hipMemcpyHostToDevice); hipMalloc((void **)&d_C, size); hipLaunchKernelGGL(( matrixAddKernel) , dim3(ceil((n*n) / 256.0)), dim3(256) , 0, 0, d_A, d_B, d_C, n); hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); } int main() { int n; float *B; float *A;float *C; n=100; A = (float*)malloc(n*n*sizeof(float)); B = (float*)malloc(n*sizeof(float)); C = (float*)malloc(n*sizeof(float)); for (int i = 0; i < n*n; i++) { A[i] = 1; B[i] = 2; } matrixAdd(A, B, C, n); }
2bd36e93fef4c46c4da391aeb24a755d95a8de19.cu
#include <iostream> #include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #include <device_launch_parameters.h> using namespace std; __global__ void matrixAddKernel(float* A, float* B, float* C, int n){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n*n) C[i] = A[i] + B[i]; } __global__ void matrixAddKernel2(float* A, float* B, float* C, int n){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n){ for (int j = i * n; j < i * n + n; j++) C[j] = A[j] + B[j]; } } __global__ void matrixAddKernel3(float* A, float* B, float* C, int n){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) for (int j = i; j < n*n; j += n) C[j] = A[j] + B[j]; } void matrixAdd(float* A, float* B, float* C, int n) { int size = n * n * sizeof(float); float *d_A, *d_B, *d_C; cudaMalloc((void **)&d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_B, size); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); cudaMalloc((void **)&d_C, size); matrixAddKernel <<< ceil((n*n) / 256.0), 256 >>> (d_A, d_B, d_C, n); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } int main() { int n; float *B; float *A;float *C; n=100; A = (float*)malloc(n*n*sizeof(float)); B = (float*)malloc(n*sizeof(float)); C = (float*)malloc(n*sizeof(float)); for (int i = 0; i < n*n; i++) { A[i] = 1; B[i] = 2; } matrixAdd(A, B, C, n); }
f9591ea81109f11535709ad4c4616dc27768682d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<time.h> #define SIZE 100 __global__ void sum(const int* __restrict__ input, const int size, int* sumOut) { int i = threadIdx.x + blockDim.x * blockIdx.x; atomicAdd(sumOut, input[i]); __syncthreads(); } int main() { int i; int a[SIZE]; int c = 0; int *dev_a, *dev_c; hipMalloc((void **) &dev_a, SIZE*sizeof(int)); hipMalloc((void **) &dev_c, sizeof(int)); srand(time(0)); for( i = 0 ; i < SIZE ; i++) { a[i] = (rand() % (1000 - 100 + 1)) + 100; } for( i = 0 ; i < SIZE ; i++) { printf("%d ",a[i]); if (i%10==0 && i!=0){ printf("\n"); } } hipMemcpy(dev_c , &c, sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_a , a, SIZE*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( sum), dim3(2),dim3(SIZE/2), 0, 0, dev_a,SIZE,dev_c); hipDeviceSynchronize(); hipMemcpy(&c, dev_c, sizeof(int),hipMemcpyDeviceToHost); printf("\n"); printf("sum = %d ",c); hipFree(dev_a); hipFree(dev_c); return 0; }
f9591ea81109f11535709ad4c4616dc27768682d.cu
#include<stdio.h> #include<time.h> #define SIZE 100 __global__ void sum(const int* __restrict__ input, const int size, int* sumOut) { int i = threadIdx.x + blockDim.x * blockIdx.x; atomicAdd(sumOut, input[i]); __syncthreads(); } int main() { int i; int a[SIZE]; int c = 0; int *dev_a, *dev_c; cudaMalloc((void **) &dev_a, SIZE*sizeof(int)); cudaMalloc((void **) &dev_c, sizeof(int)); srand(time(0)); for( i = 0 ; i < SIZE ; i++) { a[i] = (rand() % (1000 - 100 + 1)) + 100; } for( i = 0 ; i < SIZE ; i++) { printf("%d ",a[i]); if (i%10==0 && i!=0){ printf("\n"); } } cudaMemcpy(dev_c , &c, sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_a , a, SIZE*sizeof(int),cudaMemcpyHostToDevice); sum<<<2,SIZE/2>>>(dev_a,SIZE,dev_c); cudaDeviceSynchronize(); cudaMemcpy(&c, dev_c, sizeof(int),cudaMemcpyDeviceToHost); printf("\n"); printf("sum = %d ",c); cudaFree(dev_a); cudaFree(dev_c); return 0; }
7a624388b88e40fb0299ee698ff4650922864417.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" typedef unsigned char uint8_t; struct __device_builtin__ __align__(_NCS_) uint8n { uint8_t _VARNAMES_; }; extern "C" __global__ void compute_product( const uint8_t* __restrict__ A, const float* __restrict__ B, const char* __restrict__ isEmpty, const int* __restrict__ divStart, const int* __restrict__ divSize, float* __restrict__ V, int* __restrict__ I, int N, int L, int O, int nProbe ) { const int tid = threadIdx.x; // thread ID const int qid = blockIdx.x; // query ID // const uint8n* A2 = reinterpret_cast<const uint8n*>( const_cast<uint8_t*>(A) ) const uint8n* A2 = reinterpret_cast<const uint8n*>(A); // ? // Load precomputed distances extern __shared__ volatile float Bsh[]; #pragma unroll if (tid < 256){ for (int i = 0; i < _M_; i++){ int bz = i; int by = qid; int bx = tid; Bsh[i * _K_ + tid] = B[(bz * L * _K_) + (by * _K_) + (bx)]; } } __syncthreads(); // Load A and compute distance int iN = tid; int counter = tid; int start = 0; int size = 0; int cDiv = -1; bool break_loop = false; while (iN < N){ while ( (iN - start) >= size){ cDiv ++; if (cDiv >= nProbe){ break_loop = true; break; } int residual = iN - start - size; start = divStart[(qid) * nProbe + (cDiv)]; iN = start + residual; size = divSize[(qid) * nProbe + (cDiv)]; if (iN >= N){ break_loop = true; break; } } if (break_loop) break; float sum = 0.f; #pragma unroll for (int i = 0; i < _M_ / _NCS_; i++){ uint8n Avals = A2[(i * N) + (iN)]; _CODEBLOCK_ } // write to V and I int isCurrentEmpty; isCurrentEmpty = isEmpty[iN]; /* if (isCurrentEmpty == 0){ V[(qid) * O + counter] = sum; I[(qid) * O + counter] = iN; } else { V[(qid) * O + counter] = -999999.f; I[(qid) * O + counter] = -1; } */ if (counter < O){ V[(qid) * O + counter] = isCurrentEmpty == 0 ? sum : -999999.f; I[(qid) * O + counter] = isCurrentEmpty == 0 ? iN : -1; // atomicAdd(V + (qid) * O + counter, isCurrentEmpty == 0 ? sum : -99999.f); } iN += _TPB_; counter += _TPB_; } }
7a624388b88e40fb0299ee698ff4650922864417.cu
typedef unsigned char uint8_t; struct __device_builtin__ __align__(_NCS_) uint8n { uint8_t _VARNAMES_; }; extern "C" __global__ void compute_product( const uint8_t* __restrict__ A, const float* __restrict__ B, const char* __restrict__ isEmpty, const int* __restrict__ divStart, const int* __restrict__ divSize, float* __restrict__ V, int* __restrict__ I, int N, int L, int O, int nProbe ) { const int tid = threadIdx.x; // thread ID const int qid = blockIdx.x; // query ID // const uint8n* A2 = reinterpret_cast<const uint8n*>( const_cast<uint8_t*>(A) ) const uint8n* A2 = reinterpret_cast<const uint8n*>(A); // ? // Load precomputed distances extern __shared__ volatile float Bsh[]; #pragma unroll if (tid < 256){ for (int i = 0; i < _M_; i++){ int bz = i; int by = qid; int bx = tid; Bsh[i * _K_ + tid] = B[(bz * L * _K_) + (by * _K_) + (bx)]; } } __syncthreads(); // Load A and compute distance int iN = tid; int counter = tid; int start = 0; int size = 0; int cDiv = -1; bool break_loop = false; while (iN < N){ while ( (iN - start) >= size){ cDiv ++; if (cDiv >= nProbe){ break_loop = true; break; } int residual = iN - start - size; start = divStart[(qid) * nProbe + (cDiv)]; iN = start + residual; size = divSize[(qid) * nProbe + (cDiv)]; if (iN >= N){ break_loop = true; break; } } if (break_loop) break; float sum = 0.f; #pragma unroll for (int i = 0; i < _M_ / _NCS_; i++){ uint8n Avals = A2[(i * N) + (iN)]; _CODEBLOCK_ } // write to V and I int isCurrentEmpty; isCurrentEmpty = isEmpty[iN]; /* if (isCurrentEmpty == 0){ V[(qid) * O + counter] = sum; I[(qid) * O + counter] = iN; } else { V[(qid) * O + counter] = -999999.f; I[(qid) * O + counter] = -1; } */ if (counter < O){ V[(qid) * O + counter] = isCurrentEmpty == 0 ? sum : -999999.f; I[(qid) * O + counter] = isCurrentEmpty == 0 ? iN : -1; // atomicAdd(V + (qid) * O + counter, isCurrentEmpty == 0 ? sum : -99999.f); } iN += _TPB_; counter += _TPB_; } }
imp.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hiprand/hiprand_kernel.h> #include <assert.h> #include <conf.h> #include "inc/conf.h" #include "utils/msg.h" #include "utils/cc.h" #include "utils/kl.h" #include "d/api.h" #include "inc/dev.h" #include "inc/type.h" #include "coords/type.h" #include "coords/imp/type.h" #include "coords/imp.h" #include "imp.h" #include "imp/type.h" #include "imp/main.h"
imp.cu
#include <stdio.h> #include <curand_kernel.h> #include <assert.h> #include <conf.h> #include "inc/conf.h" #include "utils/msg.h" #include "utils/cc.h" #include "utils/kl.h" #include "d/api.h" #include "inc/dev.h" #include "inc/type.h" #include "coords/type.h" #include "coords/imp/type.h" #include "coords/imp.h" #include "imp.h" #include "imp/type.h" #include "imp/main.h"
b641221f93a9cf2df06ae2c96cce3f182e41f710.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) { float compute_local[8]; __shared__ float A_shared[256]; __shared__ float B_shared[256]; float A_shared_local[2]; float B_shared_local[4]; for (int i_c_init = 0; i_c_init < 2; ++i_c_init) { for (int j_c_init = 0; j_c_init < 4; ++j_c_init) { compute_local[(((i_c_init * 4) + j_c_init))] = 0.000000e+00f; } } for (int k_outer = 0; k_outer < 32; ++k_outer) { __syncthreads(); A_shared[(((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)))] = ((float*)A)[((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 1))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 2))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 3))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 16))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 512))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 17))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 513))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 18))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 514))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 19))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 515))]; B_shared[(((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)))] = ((float*)B)[(((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 1))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 2))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 3))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 16))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 512))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 17))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 513))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 18))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 514))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 19))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 515))]; __syncthreads(); for (int k_inner = 0; k_inner < 16; ++k_inner) { A_shared_local[(0)] = A_shared[(((((int)threadIdx.y) * 32) + k_inner))]; A_shared_local[(1)] = A_shared[((((((int)threadIdx.y) * 32) + k_inner) + 16))]; B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 64) + k_inner))]; B_shared_local[(1)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 16))]; B_shared_local[(2)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 32))]; B_shared_local[(3)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 48))]; compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)])); compute_local[(1)] = (compute_local[(1)] + (A_shared_local[(0)] * B_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (A_shared_local[(0)] * B_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (A_shared_local[(0)] * B_shared_local[(3)])); compute_local[(4)] = (compute_local[(4)] + (A_shared_local[(1)] * B_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (A_shared_local[(1)] * B_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (A_shared_local[(1)] * B_shared_local[(2)])); compute_local[(7)] = (compute_local[(7)] + (A_shared_local[(1)] * B_shared_local[(3)])); } } ((float*)compute)[((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)))] = compute_local[(0)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = compute_local[(1)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = compute_local[(2)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = compute_local[(3)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2048))] = compute_local[(4)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2049))] = compute_local[(5)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2050))] = compute_local[(6)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2051))] = compute_local[(7)]; }
b641221f93a9cf2df06ae2c96cce3f182e41f710.cu
extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) { float compute_local[8]; __shared__ float A_shared[256]; __shared__ float B_shared[256]; float A_shared_local[2]; float B_shared_local[4]; for (int i_c_init = 0; i_c_init < 2; ++i_c_init) { for (int j_c_init = 0; j_c_init < 4; ++j_c_init) { compute_local[(((i_c_init * 4) + j_c_init))] = 0.000000e+00f; } } for (int k_outer = 0; k_outer < 32; ++k_outer) { __syncthreads(); A_shared[(((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)))] = ((float*)A)[((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 1))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 2))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 3))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 16))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 512))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 17))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 513))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 18))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 514))]; A_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 19))] = ((float*)A)[(((((((int)threadIdx.y) * 1024) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 515))]; B_shared[(((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)))] = ((float*)B)[(((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 1))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 2))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 3))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 16))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 512))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 17))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 513))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 18))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 514))]; B_shared[((((((int)threadIdx.y) * 32) + (((int)threadIdx.x) * 4)) + 19))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (((int)threadIdx.y) * 1024)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 515))]; __syncthreads(); for (int k_inner = 0; k_inner < 16; ++k_inner) { A_shared_local[(0)] = A_shared[(((((int)threadIdx.y) * 32) + k_inner))]; A_shared_local[(1)] = A_shared[((((((int)threadIdx.y) * 32) + k_inner) + 16))]; B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 64) + k_inner))]; B_shared_local[(1)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 16))]; B_shared_local[(2)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 32))]; B_shared_local[(3)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 48))]; compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)])); compute_local[(1)] = (compute_local[(1)] + (A_shared_local[(0)] * B_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (A_shared_local[(0)] * B_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (A_shared_local[(0)] * B_shared_local[(3)])); compute_local[(4)] = (compute_local[(4)] + (A_shared_local[(1)] * B_shared_local[(0)])); compute_local[(5)] = (compute_local[(5)] + (A_shared_local[(1)] * B_shared_local[(1)])); compute_local[(6)] = (compute_local[(6)] + (A_shared_local[(1)] * B_shared_local[(2)])); compute_local[(7)] = (compute_local[(7)] + (A_shared_local[(1)] * B_shared_local[(3)])); } } ((float*)compute)[((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)))] = compute_local[(0)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = compute_local[(1)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = compute_local[(2)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = compute_local[(3)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2048))] = compute_local[(4)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2049))] = compute_local[(5)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2050))] = compute_local[(6)]; ((float*)compute)[(((((((int)threadIdx.y) * 4096) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2051))] = compute_local[(7)]; }
911c5247cb14ec4a8311619c688ba56ee244b55d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<cuda_runtime.h> #include "wb.cuh" // Compute C = A * B // Sgemm stands for single precision general matrix-matrix multiply __global__ void sgemm(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns) { //@@ Insert code to implement matrix multiplication here int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < numARows && col < numBColumns) { float sum = 0; for (int ii = 0; ii < numAColumns; ii++) { sum += A[row * numAColumns + ii] * B[ii * numBColumns + col]; } C[row * numBColumns + col] = sum; } } #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; int numCColumns; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Allocate the hostC matrix hostC = (float *)malloc(numARows * numBColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); numCRows = numARows; numCColumns = numBColumns; wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(float))); wbCheck(hipMalloc((void **)&deviceB, numBRows * numBColumns * sizeof(float))); wbCheck(hipMalloc((void **)&deviceC, numARows * numBColumns * sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(hipMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice)); wbCheck(hipMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 blockDim(16, 16); // changed to BColumns and ARows from Acolumns and BRows dim3 gridDim(ceil(((float)numBColumns) / blockDim.x), ceil(((float)numARows) / blockDim.y)); wbLog(TRACE, "The block dimensions are ", blockDim.x, " x ", blockDim.y); wbLog(TRACE, "The grid dimensions are ", gridDim.x, " x ", gridDim.y); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here wbCheck(hipMemset(deviceC, 0, numARows * numBColumns * sizeof(float))); sgemm << <gridDim, blockDim >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(hipMemcpy(hostC, deviceC, numARows * numBColumns * sizeof(float), hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numARows, numBColumns); free(hostA); free(hostB); free(hostC); system("pause"); return 0; }
911c5247cb14ec4a8311619c688ba56ee244b55d.cu
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<cuda_runtime.h> #include "wb.cuh" // Compute C = A * B // Sgemm stands for single precision general matrix-matrix multiply __global__ void sgemm(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns) { //@@ Insert code to implement matrix multiplication here int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < numARows && col < numBColumns) { float sum = 0; for (int ii = 0; ii < numAColumns; ii++) { sum += A[row * numAColumns + ii] * B[ii * numBColumns + col]; } C[row * numBColumns + col] = sum; } } #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while (0) int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; int numCColumns; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Allocate the hostC matrix hostC = (float *)malloc(numARows * numBColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); numCRows = numARows; numCColumns = numBColumns; wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbLog(TRACE, "The dimensions of C are ", numCRows, " x ", numCColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(float))); wbCheck(cudaMalloc((void **)&deviceB, numBRows * numBColumns * sizeof(float))); wbCheck(cudaMalloc((void **)&deviceC, numARows * numBColumns * sizeof(float))); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(cudaMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 blockDim(16, 16); // changed to BColumns and ARows from Acolumns and BRows dim3 gridDim(ceil(((float)numBColumns) / blockDim.x), ceil(((float)numARows) / blockDim.y)); wbLog(TRACE, "The block dimensions are ", blockDim.x, " x ", blockDim.y); wbLog(TRACE, "The grid dimensions are ", gridDim.x, " x ", gridDim.y); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here wbCheck(cudaMemset(deviceC, 0, numARows * numBColumns * sizeof(float))); sgemm << <gridDim, blockDim >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(cudaMemcpy(hostC, deviceC, numARows * numBColumns * sizeof(float), cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numARows, numBColumns); free(hostA); free(hostB); free(hostC); system("pause"); return 0; }
d383937da5059ce65bee790a934db64946d89a33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*** Copyright (c) 2017 Patryk Orzechowski Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ***/ #ifndef _EVALUATE_TRENDS_CU_ #define _EVALUATE_TRENDS_CU_ template<typename T> __device__ void evaluate_trends(int *bicl_indices, int *compressed_biclusters, int num_rows, int num_cols, T *data, int *trendcheck, float *trendvalue, const float EPSILON, float MISSING_VALUE, int increasing=1) { long long int index_x = blockIdx.x * blockDim.x + threadIdx.x; //block of bicluster long long int index_y = blockIdx.y * blockDim.y + threadIdx.y; //block of row trendcheck[threadIdx.x]=0; trendvalue[threadIdx.x]=0; if (index_x<num_rows ) { trendcheck[threadIdx.x] = 1; trendvalue[threadIdx.x] = data[compressed_biclusters[bicl_indices[index_y]]+num_cols*index_x]; } __syncthreads(); if (index_x<num_rows ) { for(int compressedId=bicl_indices[index_y]+1; compressedId<bicl_indices[index_y+1]; ++compressedId) { int pos=compressed_biclusters[compressedId]; trendcheck[threadIdx.x] += (increasing*(data[pos+num_cols*index_x]+EPSILON-trendvalue[threadIdx.x])>= 0 && data[pos+num_cols*index_x]!=MISSING_VALUE); trendvalue[threadIdx.x] = data[pos+num_cols*index_x]; __syncthreads(); } } } #endif
d383937da5059ce65bee790a934db64946d89a33.cu
/*** Copyright (c) 2017 Patryk Orzechowski Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ***/ #ifndef _EVALUATE_TRENDS_CU_ #define _EVALUATE_TRENDS_CU_ template<typename T> __device__ void evaluate_trends(int *bicl_indices, int *compressed_biclusters, int num_rows, int num_cols, T *data, int *trendcheck, float *trendvalue, const float EPSILON, float MISSING_VALUE, int increasing=1) { long long int index_x = blockIdx.x * blockDim.x + threadIdx.x; //block of bicluster long long int index_y = blockIdx.y * blockDim.y + threadIdx.y; //block of row trendcheck[threadIdx.x]=0; trendvalue[threadIdx.x]=0; if (index_x<num_rows ) { trendcheck[threadIdx.x] = 1; trendvalue[threadIdx.x] = data[compressed_biclusters[bicl_indices[index_y]]+num_cols*index_x]; } __syncthreads(); if (index_x<num_rows ) { for(int compressedId=bicl_indices[index_y]+1; compressedId<bicl_indices[index_y+1]; ++compressedId) { int pos=compressed_biclusters[compressedId]; trendcheck[threadIdx.x] += (increasing*(data[pos+num_cols*index_x]+EPSILON-trendvalue[threadIdx.x])>= 0 && data[pos+num_cols*index_x]!=MISSING_VALUE); trendvalue[threadIdx.x] = data[pos+num_cols*index_x]; __syncthreads(); } } } #endif
39ea95b9a3775047b6b9c8130a139e3e45dee0ca.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include "kuda.h" #include "lodepng.h" // Banyak nx * nx Matrix // Banyak Max * Max Matrix int Max; void printMatrixGlcm(int *C, const int Max,int degree) { int *ic = C; FILE * fp = NULL; if(degree==0){ fp = fopen("matrix_glcm_0_method1_bukti.txt", "w+"); } else if(degree==90){ fp = fopen("matrix_glcm_90.txt", "w+"); } else if(degree==180){ fp = fopen("matrix_glcm_180.txt", "w+"); } else if(degree==270){ fp = fopen("matrix_glcm_270.txt", "w+"); } else if(degree==45){ fp = fopen("matrix_glcm_45.txt", "w+"); } else if(degree==135){ fp = fopen("matrix_glcm_135.txt", "w+"); } else if(degree==225){ fp = fopen("matrix_glcm_225.txt", "w+"); } else if(degree==315){ fp = fopen("matrix_glcm_315.txt", "w+"); } if(fp == NULL){ printf("Error creating results file\n"); exit(1); } for (int iy = 0; iy <Max; iy++) { for (int ix = 0; ix <Max; ix++) { fprintf(fp, "%d ", ic[ix]); } fprintf(fp, "\n\n"); ic += (Max); } printf("\n"); fclose(fp); return; } void printMatrixnxormalization(float *C, const int Max,int degree) { float *ic = C; FILE * fp = NULL; if(degree==0){ fp = fopen("matrix_normalisasi_0_metode1bukti.txt", "w+"); } else if(degree==90){ fp = fopen("matrix_normalisasi_90.txt", "w+"); } else if(degree==180){ fp = fopen("matrix_normalisasi_180.txt", "w+"); } else if(degree==270){ fp = fopen("matrix_normalisasi_270.txt", "w+"); } else if(degree==45){ fp = fopen("matrix_normalisasi_45.txt", "w+"); } else if(degree==135){ fp = fopen("matrix_normalisasi_135.txt", "w+"); } else if(degree==225){ fp = fopen("matrix_normalisasi_225.txt", "w+"); } else if(degree==315){ fp = fopen("matrix_normalisasi_315.txt", "w+"); } if(fp == NULL){ printf("Error creating results file\n"); exit(1); } for (int iy = 0; iy < Max; iy++) { for (int ix = 0; ix <Max; ix++) { fprintf(fp, "%.7f ", ic[ix]); } fprintf(fp, "\n\n"); ic +=Max; } printf("\n"); fclose(fp); return; } // void calculate_glcm_host(int *matrix,int *glcm,int nx,int ny,int Max){ // int i,j; // for(i=0;i<nx;i++){ // for(j=0;j<ny;j++){ // glcm[Max*matrix[i]+matrix[j]] +=1; // } // } // } //calculate glcm __global__ void glcm_calculation_nol(int *A,int *glcm, const int nx, const int ny,int maxx) { unsigned int idx =blockIdx.x*nx+threadIdx.x; int i; int k=0; for(i=0;i<nx;i++){ if(idx>=i*nx && idx<((i+1) *nx)-1){ k=maxx*A[idx]+A[idx+1]; atomicAdd(&glcm[k],1); } } } __global__ void glcm_calculation_180(int *A,int *glcm, const int nx, const int ny,int max){ //int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =blockIdx.x*nx+threadIdx.x; int i; int k=0; for(i=0;i<nx;i++){ if(idx>=i*nx && idx<((i+1) *nx)-1){ k=max*A[idx+1]+A[idx]; atomicAdd(&glcm[k],1); } } } __global__ void glcm_calculation_270(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(idx>=i*nx && idx<((i+1) *nx)){ k=max*A[idx]+A[idx+nx]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_90(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(idx>=i*nx && idx<((i+1) *nx)){ k=max*A[idx+nx]+A[idx]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_45(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=1;i<nx;i++){ if(blockIdx.x==i && idx <((i+1)*nx)-1){ k=max*A[idx]+A[idx-(nx-1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_135(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=1;i<nx;i++){ if(blockIdx.x==i && idx >i*nx){ k=max*A[idx]+A[idx-(nx+1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_225(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(blockIdx.x==i && idx >i*nx){ k=max*A[idx]+A[idx+(nx-1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_315(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(blockIdx.x==i && idx <((i+1)*nx)-1){ k=max*A[idx]+A[idx+(nx+1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void Mul(float *newMatrix,float *mulMatrix,int Max,float *sumMatrix){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // int Index = iy * nx + ix; for (int k = 0; k < Max; k++) { // Accumulate results for a single element // c[row * nx + col] += a[row * nx + k] * b[k * nx + col]; // printf("C[%d] = a[%d] * b[%d]\n",row * nx + col,row * nx + k, k * nx + col); atomicAdd(&mulMatrix[row * Max + col],newMatrix[row * Max + k] * newMatrix[k * Max + col]); // atomicAdd(&sumMatrix[0],mulMatrix[row * Max + col]); } } __global__ void Jumlah(float *sumMatrix,float *mulMatrix){ int Index = blockIdx.x * blockDim.x + threadIdx.x; // if(Index<1) printf("%f",mulMatrix[0]); atomicAdd(&sumMatrix[0],mulMatrix[Index]); } __global__ void AddToitTranspose(int *transposed,int *glcm,int Max){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; //printf("%d %d\n",row*Max+col,col*Max+row); transposed[row*Max+col]=glcm[row*Max+col]+glcm[col*Max+row]; } __global__ void normalization(int *glcm,float *norm,int Max,int sum){ int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * Max + ix; __syncthreads(); if(idx<(Max+1)*(Max+1)&&glcm[idx]!=0){ norm[idx]=float(glcm[idx])/float(sum); } } __global__ void calculate_contrast(float *norm,float *contrast,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // int Index = iy * N + ix; // for (int k = 0; k < Max; k++) { // // Accumulate results for a single element // // c[row * N + col] += a[row * N + k] * b[k * N + col]; // // printf("C[%d] = a[%d] * b[%d]\n",row * N + col,row * N + k, k * N + col); // atomicAdd(&mulMatrix[row * Max + col],norm[row * Max + k] * norm[k * Max + col]); // } if(norm[row*Max+col]>0){ //printf("%f\n",norm[row*Max+col]); atomicAdd(&contrast[0],((row-col)*(row-col))*norm[row*Max+col]); //printf("nilai contrast %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",contrast[0]); } // if (Index == 0){ // printf("ASM %f\n",ASM[0]); // } } __global__ void calculate_IDM(float *norm,float *IDM,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ atomicAdd(&IDM[0],norm[row*Max+col] / (1+((row-col)*(row-col))) ); //printf("nilai IDM %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",IDM[0]); } } __global__ void calculate_entropy(float *norm,float *entropy,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ atomicAdd(&entropy[0],(norm[row*Max+col] * log10f(norm[row*Max+col])) ); //printf("nilai entropy %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",entropy[0]); } } __global__ void calculate_ASM(float *norm,float *ASM,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); if(norm[row*Max+col]>0){ // printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",ASM[0]); } } __global__ void calculate_miu_i(float *norm,float *miu_i,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&miu_i[0],row*norm[row*Max+col]); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_miu_j(float *norm,float *miu_j,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&miu_j[0],col*norm[row*Max+col]); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_std_i(float *norm,float *std_i,float*miu_i,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&std_i[0],norm[row*Max+col] * ((row-miu_i[0])*(row-miu_i[0]))); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_std_j(float *norm,float *std_i,float *miu_j,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&std_i[0],norm[row*Max+col]*(((col-miu_j[0])*(col-miu_j[0])))); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } }__global__ void calculate_korelasi(float *norm,float *korelasi,float *miu_i,float *std_i,float *miu_j,float *std_j,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&korelasi[0],(((row-miu_i[0])*(col-miu_j[0]))*norm[row*Max+col])/(std_i[0]*std_j[0])); //printf("nilai korelasi %f %f \n",(row-miu_i[0]),(col-miu_j[0])); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_variance(float *norm,float *variance,float *miu_i,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&variance[0],((row-miu_i[0])*(row-miu_i[0]))*norm[row*Max+col]); //printf("nilai korelasi %f %f \n",(row-miu_i[0]),(col-miu_j[0])); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_sumaverage(float *norm,float *sav,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=2;k<2*Max;k++){ if((row+col)==k){ atomicAdd(&sav[0],k*(1*norm[row*Max+col])); } else{ atomicAdd(&sav[0],0); } } } __global__ void calculate_sumentropy(float *norm,float *sen,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=2;k<2*Max;k++){ if((row+col)==k && norm[row*Max+col]>0){ //printf("%f\n",norm[row*Max+col]); atomicAdd(&sen[0],(1*norm[row*Max+col])*(log10(1*norm[row*Max+col]))); } else{ atomicAdd(&sen[0],0); } } } __global__ void calculate_sumvariance(float *norm,float *sva,float *sen,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=2;k<2*Max;k++){ if((row+col)==k && norm[row*Max+col]>0){ //printf("sva%f\n",norm[row*Max+col]); atomicAdd(&sva[0],((k-sen[0])*(k-sen[0]))*(1*norm[row*Max+col])); } else{ atomicAdd(&sva[0],0); } } } __global__ void calculate_differenceentropy(float *norm,float *den,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k=0; for(k=0;k<Max-1;k++){ if(abs(row-col)==k && norm[row*Max+col]>0){ atomicAdd(&den[0],(1*norm[row*Max+col])*(log10(1*norm[row*Max+col]))); //printf("apa %f\n",den[0]); } else{ atomicAdd(&den[0],0); } } } __global__ void calculate_HX(float *norm,float *HX,int Max){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ atomicAdd(&HX[0],norm[row*Max+col]*log10f(norm[row*Max+col])); } } __global__ void calculate_HY(float *norm,float *HY,int Max){ //printf("%d\n",max); int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(norm[row*Max+col]>0){ atomicAdd(&HY[0],norm[row*Max+col]*log10f(norm[row*Max+col])); } } __global__ void calculate_HXY1(float *norm,float *HXY1,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("%.13f %.13f %f %f \n",norm[row],norm[col],norm[row*Max+col],log10f(norm[row]*norm[col])); atomicAdd(&HXY1[0],norm[row*Max+col]*log10f(norm[row*Max+col])); } } __global__ void calculate_dva(float *norm,float *dva,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=0;k<Max-1;k++){ if(abs(row-col)==k && norm[row*Max+col]>0){ //printf("%f\n",norm[row*Max+col]); atomicAdd(&dva[0],(k*k)*(1*norm[row*Max+col])); } else{ atomicAdd(&dva[0],0); } } } // void takeimagevalue(const char* filename, rgb_image *img) // { // unsigned error; // unsigned char* png; // size_t pngsize;; // lodepng_load_file(&png, &pngsize, filename); // error = lodepng_decode32(&img->image, &img->width, &img->height, png, pngsize); // if(error) printf("error %u: %s\n", error, lodepng_error_text(error)); // } // void transformToGrayCuda(rgb_image *img){ // unsigned char* image = img->image; // unsigned char* image_d; // unsigned int width = img->width; // unsigned int height = img->height; // int n =width*height; // size_t size = n * 4 * sizeof(unsigned char); // int device_count = 0; // hipError_t status = hipGetDeviceCount(&device_count); // status = hipMalloc((void **) &image_d, size); // hipMemcpy(image_d, image, size, hipMemcpyHostToDevice); // dim3 block_size(16, 16); // dim3 num_blocks(img->width / block_size.x, img->height / block_size.y); // setPixelToGrayscale<<<num_blocks, block_size>>>(image_d, img->width, img->height); // hipMemcpy(image, image_d, size, hipMemcpyDeviceToHost); // hipFree(image_d); // } // __global__ // void setPixelToGrayscale(unsigned char *image, unsigned width, unsigned height) // { // float gray; // float r, g, b; // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // if (x < width && y < height) { // r = image[4 * width * y + 4 * x + 0]; // g = image[4 * width * y + 4 * x + 1]; // b = image[4 * width * y + 4 * x + 2]; // gray =.299f*r + .587f*g + .114f*b; // image[4 * width * y + 4 * x + 0] = gray; // image[4 * width * y + 4 * x + 1] = gray; // image[4 * width * y + 4 * x + 2] = gray; // image[4 * width * y + 4 * x + 3] = 255; // } // } // void saveimagegray(const char* filename, rgb_image *img) // { // /*Encode the image*/ // unsigned error = lodepng_encode32_file(filename, img->image, img->width, img->height); // /*if there's an error, display it*/ // if(error) printf("error %u: %s\n", error, lodepng_error_text(error)); // } int main(int argc, char *argv[]){ char *d; long deg =strtol(argv[1],&d,10); int degree=deg; // char *data; // long datas =strtol(argv[2],&data,10); // int ukuran=datas; //const char* filename = argc > 1 ? argv[1] : "test.png"; //rgb_image img; // takeimagevalue(filename, &img); // transformToGrayCuda(&img); //int nx =img.width; //int ny =img.height; int ukuran,graylevel; printf("ukuran gambar: "); scanf("%d",&ukuran); printf("max graylevel : "); scanf("%d",&graylevel); printf("calculate glcm for image size %dx%d %d degre Starting...\n", ukuran,ukuran,degree); int nx,ny; nx=ukuran; ny=ukuran; int *matrix,*glcm,*transposed; float *norm,*mulMatrix,*sumMatrix; hipMallocManaged(&matrix, (nx * ny) * sizeof(int)); for(int i = 0 ; i < (nx * nx) ; ++i){ matrix[i] = rand() %graylevel; if(matrix[i] > Max){ Max = matrix[i]; } } for(int i = 0 ; i < nx ; ++i){ for(int j = 0 ; j < nx ; ++j){ printf("%4d",matrix[i * nx + j]); } printf("\n"); } //printf("\n\n"); Max = Max + 1; // karena index dimulai dari 0 dan Maximum 3 ( 0 - 3 = 4 ) jadi Max ditambah 1; int kBytes = Max * Max * sizeof(float); int nBytes = nx * ny * sizeof(float); hipMallocManaged(&glcm, (Max * Max) * sizeof(int)); hipMallocManaged(&transposed, (Max * Max) * sizeof(int)); hipMallocManaged(&mulMatrix, (Max * Max) * sizeof(float)); hipMallocManaged(&sumMatrix, (Max * Max) * sizeof(float)); hipMallocManaged(&norm, (Max * Max) * sizeof(float)); for(int i = 0 ; i < (Max * Max) ; ++i){ glcm[i] = 0; mulMatrix[i] = 0; } float*ASM,*contrast,*IDM,*entropy,*miu_i,*miu_j,*std_i,*std_j,*korelasi,*variance,*sav,*sen,*sva,*den,*HX,*HY,*HXY1,*dva; hipMallocManaged(&ASM, (Max * Max) * sizeof(float)); hipMallocManaged(&contrast, (Max * Max) * sizeof(float)); hipMallocManaged(&IDM, (Max * Max) * sizeof(float)); hipMallocManaged(&entropy, (Max * Max) * sizeof(float)); hipMallocManaged(&miu_i, (Max * Max) * sizeof(float)); hipMallocManaged(&miu_j, (Max * Max) * sizeof(float)); hipMallocManaged(&std_i, (Max * Max) * sizeof(float)); hipMallocManaged(&std_j, (Max * Max) * sizeof(float)); hipMallocManaged(&korelasi, (Max * Max) * sizeof(float)); hipMallocManaged(&variance, (Max * Max) * sizeof(float)); hipMallocManaged(&sav, (Max * Max) * sizeof(float)); hipMallocManaged(&sen, (Max * Max) * sizeof(float)); hipMallocManaged(&sva, (Max * Max) * sizeof(float)); hipMallocManaged(&den, (Max * Max) * sizeof(float)); hipMallocManaged(&HX, (Max * Max) * sizeof(float)); hipMallocManaged(&HY, (Max * Max) * sizeof(float)); hipMallocManaged(&dva, (Max * Max) * sizeof(float)); hipMallocManaged(&HXY1, (Max * Max) * sizeof(float)); dim3 block(ny); dim3 grid((nx + block.x - 1) / block.x, (nx + block.y - 1) / block.y); dim3 blocks(2,2); dim3 grids((Max + blocks.x - 1) / blocks.x, (Max + blocks.y - 1) / blocks.y); hipGetLastError(); clock_t start, end; double t = 0; start = clock(); // invoke kernel for calculation //hipLaunchKernelGGL(( glcm_calculation_nol), dim3(ny),dim3(nx), 0, 0, matrix,glcm, nx, ny,Max); // hipDeviceSynchronize(); // AddToitTranspose<<<grids,blocks>>>(transposed,glcm,Max); // hipDeviceSynchronize(); // printf("hasil transpose\n"); // for(int i = 0 ; i < Max ; ++i){ // for(int j = 0 ; j < Max ; ++j){ // printf("%4d ",transposed[i * Max + j]); // } // printf("\n"); // } if(degree==0){ hipLaunchKernelGGL(( glcm_calculation_nol), dim3(ny),dim3(nx), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree ==180){ hipLaunchKernelGGL(( glcm_calculation_180), dim3(ny),dim3(nx), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==270){ dim3 block(1, nx); dim3 grid((ny + block.x - 1) / block.x, (ny + block.y - 1) / block.y); hipLaunchKernelGGL(( glcm_calculation_270), dim3(grid),dim3(block), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==90){ dim3 block(1, nx); dim3 grid((ny + block.x - 1) / block.x, (ny + block.y - 1) / block.y); hipLaunchKernelGGL(( glcm_calculation_90), dim3(grid),dim3(block), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==45){ hipLaunchKernelGGL(( glcm_calculation_45), dim3(ny),dim3(nx), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==135){ hipLaunchKernelGGL(( glcm_calculation_135), dim3(ny),dim3(nx), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==225){ hipLaunchKernelGGL(( glcm_calculation_225), dim3(ny),dim3(nx), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==315){ hipLaunchKernelGGL(( glcm_calculation_315), dim3(ny),dim3(nx), 0, 0, matrix,glcm, nx, ny,Max); hipDeviceSynchronize(); end = clock(); hipLaunchKernelGGL(( AddToitTranspose), dim3(grid),dim3(block), 0, 0, transposed,glcm,Max); hipDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } // printMatrixGlcm(glcm,Max,0); printf("hasil glcm\n"); for(int i = 0 ; i < Max ; ++i){ for(int j = 0 ; j < Max ; ++j){ printf("%4d ",glcm[i * Max + j]); } printf("\n"); } t = ((double) (end - start))/CLOCKS_PER_SEC; int sum; sum=0; for(int i=0;i<Max*Max;i++){ sum +=transposed[i]; //if(transposed[i]>0){ // printf("%d\n",transposed[i]); //} } printf("sum %d",sum); hipLaunchKernelGGL(( normalization), dim3(Max),dim3(Max), 0, 0, transposed,norm,Max,sum); hipDeviceSynchronize(); printf("Hasil normalisasi : \n"); // for(int i = 0 ; i < Max ; ++i){ // for(int j = 0 ; j < Max ; ++j){ // //if(norm[i * Max + j]>0) // printf("%.7f ",norm[i * Max + j]); // } // printf("\n"); // } printMatrixnxormalization(norm,Max,0); float sums; sums=0; for(int i=0;i<Max*Max;i++){ sums +=norm[i]; } //Jumlah <<< Max,Max >>>(sumMatrix,norm); printf("jumlah %f\n",sums); int *dif; dif = (int *)malloc(kBytes); int *d_dif; (hipMalloc((void **)&d_dif, nBytes)); // transfer data from host to device (hipMemcpy(d_dif, dif, kBytes, hipMemcpyHostToDevice)); dim3 b(32,32); dim3 g((Max + b.x - 1) / b.x, (Max + b.y - 1) / b.y); //Step1 hipLaunchKernelGGL(( calculate_contrast), dim3(g),dim3(b), 0, 0, norm,contrast,Max); hipLaunchKernelGGL(( calculate_entropy), dim3(g),dim3(b), 0, 0, norm,entropy,Max); hipLaunchKernelGGL(( calculate_IDM), dim3(g),dim3(b), 0, 0, norm,IDM,Max); hipLaunchKernelGGL(( calculate_ASM), dim3(g),dim3(b), 0, 0, norm,ASM,Max); hipLaunchKernelGGL(( calculate_miu_i), dim3(g),dim3(b), 0, 0, norm,miu_i,Max); hipLaunchKernelGGL(( calculate_miu_j), dim3(g),dim3(b), 0, 0, norm,miu_j,Max); hipDeviceSynchronize(); //Step2 hipLaunchKernelGGL(( calculate_std_i), dim3(g),dim3(b), 0, 0, norm,std_i,miu_i,Max); hipLaunchKernelGGL(( calculate_std_j), dim3(g),dim3(b), 0, 0, norm,std_j,miu_j,Max); hipLaunchKernelGGL(( calculate_variance), dim3(g),dim3(b), 0, 0, norm,variance,miu_i,Max); hipLaunchKernelGGL(( calculate_sumaverage), dim3(g),dim3(b), 0, 0, norm,sav,Max); hipLaunchKernelGGL(( calculate_sumentropy), dim3(g),dim3(b), 0, 0, norm,sen,Max); hipLaunchKernelGGL(( calculate_differenceentropy), dim3(g),dim3(b), 0, 0, norm,den,Max); hipLaunchKernelGGL(( calculate_HX), dim3(g),dim3(b), 0, 0, norm,HX,Max); hipLaunchKernelGGL(( calculate_HY), dim3(g),dim3(b), 0, 0, norm,HY,Max); hipLaunchKernelGGL(( calculate_HXY1), dim3(g),dim3(b), 0, 0, norm,HXY1,Max); hipDeviceSynchronize(); //Step3 hipLaunchKernelGGL(( calculate_sumvariance), dim3(g),dim3(b), 0, 0, norm,sva,sen,Max); hipLaunchKernelGGL(( calculate_korelasi), dim3(g),dim3(b), 0, 0, norm,korelasi,miu_i,std_i,miu_j,std_j,Max); hipLaunchKernelGGL(( calculate_dva), dim3(g),dim3(b), 0, 0, norm,dva,Max); hipDeviceSynchronize(); printf("ASM : %.13f\n",ASM[0]); printf("Contrast : %.13f\n",contrast[0]); printf("IDM : %.13f\n",IDM[0]); printf("entropy : %.13f\n",-(entropy[0])); printf("miu_i : %.13f\n",(miu_i[0])); printf("miu_j : %.13f\n",(miu_j[0])); printf("std_i : %.13f\n",(std_i[0])); printf("std_j : %.13f\n",(std_j[0])); printf("variance : %.13f\n",(variance[0])); printf("SAV : %.13f\n",(sav[0])); printf("SEN : %.13f\n",-(sen[0])); printf("SVA : %.13f\n",(sva[0])); printf("DEN : %.13f\n",-(den[0])); printf("HX : %.13f\n",-(HX[0])); printf("HY : %.13f\n",-(HY[0])); printf("HXY1 : %.13f\n",-(HXY1[0])); printf("IMC : %.13f\n",(entropy[0]-HXY1[0])/max(-(HX[0]),-(HY[0]))); printf("korelasi : %.13f\n",(korelasi[0])); printf("Differnece Variance : %.13f\n",(dva[0])); printf("matrix gambar disimpan di matrixgambarmetode1 bukti.txt\n"); printf("matrix glcm disimpan di matrix_glcm_bukti_%d.txt\n",0); printf("matrix glcm normalisasi disimpan di matrix_ormalisasi_%d_bukti.txt\n",0); printf("waktu eksekusi: %f\n",t); // free host and devide memory hipFree(matrix);hipFree(glcm);hipFree(norm); hipFree(mulMatrix); }
39ea95b9a3775047b6b9c8130a139e3e45dee0ca.cu
#include <cuda_runtime.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include "kuda.h" #include "lodepng.h" // Banyak nx * nx Matrix // Banyak Max * Max Matrix int Max; void printMatrixGlcm(int *C, const int Max,int degree) { int *ic = C; FILE * fp = NULL; if(degree==0){ fp = fopen("matrix_glcm_0_method1_bukti.txt", "w+"); } else if(degree==90){ fp = fopen("matrix_glcm_90.txt", "w+"); } else if(degree==180){ fp = fopen("matrix_glcm_180.txt", "w+"); } else if(degree==270){ fp = fopen("matrix_glcm_270.txt", "w+"); } else if(degree==45){ fp = fopen("matrix_glcm_45.txt", "w+"); } else if(degree==135){ fp = fopen("matrix_glcm_135.txt", "w+"); } else if(degree==225){ fp = fopen("matrix_glcm_225.txt", "w+"); } else if(degree==315){ fp = fopen("matrix_glcm_315.txt", "w+"); } if(fp == NULL){ printf("Error creating results file\n"); exit(1); } for (int iy = 0; iy <Max; iy++) { for (int ix = 0; ix <Max; ix++) { fprintf(fp, "%d ", ic[ix]); } fprintf(fp, "\n\n"); ic += (Max); } printf("\n"); fclose(fp); return; } void printMatrixnxormalization(float *C, const int Max,int degree) { float *ic = C; FILE * fp = NULL; if(degree==0){ fp = fopen("matrix_normalisasi_0_metode1bukti.txt", "w+"); } else if(degree==90){ fp = fopen("matrix_normalisasi_90.txt", "w+"); } else if(degree==180){ fp = fopen("matrix_normalisasi_180.txt", "w+"); } else if(degree==270){ fp = fopen("matrix_normalisasi_270.txt", "w+"); } else if(degree==45){ fp = fopen("matrix_normalisasi_45.txt", "w+"); } else if(degree==135){ fp = fopen("matrix_normalisasi_135.txt", "w+"); } else if(degree==225){ fp = fopen("matrix_normalisasi_225.txt", "w+"); } else if(degree==315){ fp = fopen("matrix_normalisasi_315.txt", "w+"); } if(fp == NULL){ printf("Error creating results file\n"); exit(1); } for (int iy = 0; iy < Max; iy++) { for (int ix = 0; ix <Max; ix++) { fprintf(fp, "%.7f ", ic[ix]); } fprintf(fp, "\n\n"); ic +=Max; } printf("\n"); fclose(fp); return; } // void calculate_glcm_host(int *matrix,int *glcm,int nx,int ny,int Max){ // int i,j; // for(i=0;i<nx;i++){ // for(j=0;j<ny;j++){ // glcm[Max*matrix[i]+matrix[j]] +=1; // } // } // } //calculate glcm __global__ void glcm_calculation_nol(int *A,int *glcm, const int nx, const int ny,int maxx) { unsigned int idx =blockIdx.x*nx+threadIdx.x; int i; int k=0; for(i=0;i<nx;i++){ if(idx>=i*nx && idx<((i+1) *nx)-1){ k=maxx*A[idx]+A[idx+1]; atomicAdd(&glcm[k],1); } } } __global__ void glcm_calculation_180(int *A,int *glcm, const int nx, const int ny,int max){ //int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =blockIdx.x*nx+threadIdx.x; int i; int k=0; for(i=0;i<nx;i++){ if(idx>=i*nx && idx<((i+1) *nx)-1){ k=max*A[idx+1]+A[idx]; atomicAdd(&glcm[k],1); } } } __global__ void glcm_calculation_270(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(idx>=i*nx && idx<((i+1) *nx)){ k=max*A[idx]+A[idx+nx]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_90(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(idx>=i*nx && idx<((i+1) *nx)){ k=max*A[idx+nx]+A[idx]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_45(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=1;i<nx;i++){ if(blockIdx.x==i && idx <((i+1)*nx)-1){ k=max*A[idx]+A[idx-(nx-1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_135(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=1;i<nx;i++){ if(blockIdx.x==i && idx >i*nx){ k=max*A[idx]+A[idx-(nx+1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_225(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(blockIdx.x==i && idx >i*nx){ k=max*A[idx]+A[idx+(nx-1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void glcm_calculation_315(int *A,int *glcm, const int nx, const int ny,int max){ int ix = threadIdx.x + blockIdx.x* blockDim.x; int iy = threadIdx.y + blockIdx.y* blockDim.y; unsigned int idx =iy*nx+ix; int i; int k=0; for(i=0;i<nx-1;i++){ if(blockIdx.x==i && idx <((i+1)*nx)-1){ k=max*A[idx]+A[idx+(nx+1)]; atomicAdd(&glcm[k],1); } } __syncthreads(); } __global__ void Mul(float *newMatrix,float *mulMatrix,int Max,float *sumMatrix){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // int Index = iy * nx + ix; for (int k = 0; k < Max; k++) { // Accumulate results for a single element // c[row * nx + col] += a[row * nx + k] * b[k * nx + col]; // printf("C[%d] = a[%d] * b[%d]\n",row * nx + col,row * nx + k, k * nx + col); atomicAdd(&mulMatrix[row * Max + col],newMatrix[row * Max + k] * newMatrix[k * Max + col]); // atomicAdd(&sumMatrix[0],mulMatrix[row * Max + col]); } } __global__ void Jumlah(float *sumMatrix,float *mulMatrix){ int Index = blockIdx.x * blockDim.x + threadIdx.x; // if(Index<1) printf("%f",mulMatrix[0]); atomicAdd(&sumMatrix[0],mulMatrix[Index]); } __global__ void AddToitTranspose(int *transposed,int *glcm,int Max){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; //printf("%d %d\n",row*Max+col,col*Max+row); transposed[row*Max+col]=glcm[row*Max+col]+glcm[col*Max+row]; } __global__ void normalization(int *glcm,float *norm,int Max,int sum){ int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * Max + ix; __syncthreads(); if(idx<(Max+1)*(Max+1)&&glcm[idx]!=0){ norm[idx]=float(glcm[idx])/float(sum); } } __global__ void calculate_contrast(float *norm,float *contrast,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // int Index = iy * N + ix; // for (int k = 0; k < Max; k++) { // // Accumulate results for a single element // // c[row * N + col] += a[row * N + k] * b[k * N + col]; // // printf("C[%d] = a[%d] * b[%d]\n",row * N + col,row * N + k, k * N + col); // atomicAdd(&mulMatrix[row * Max + col],norm[row * Max + k] * norm[k * Max + col]); // } if(norm[row*Max+col]>0){ //printf("%f\n",norm[row*Max+col]); atomicAdd(&contrast[0],((row-col)*(row-col))*norm[row*Max+col]); //printf("nilai contrast %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",contrast[0]); } // if (Index == 0){ // printf("ASM %f\n",ASM[0]); // } } __global__ void calculate_IDM(float *norm,float *IDM,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ atomicAdd(&IDM[0],norm[row*Max+col] / (1+((row-col)*(row-col))) ); //printf("nilai IDM %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",IDM[0]); } } __global__ void calculate_entropy(float *norm,float *entropy,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ atomicAdd(&entropy[0],(norm[row*Max+col] * log10f(norm[row*Max+col])) ); //printf("nilai entropy %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",entropy[0]); } } __global__ void calculate_ASM(float *norm,float *ASM,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); if(norm[row*Max+col]>0){ // printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",ASM[0]); } } __global__ void calculate_miu_i(float *norm,float *miu_i,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&miu_i[0],row*norm[row*Max+col]); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_miu_j(float *norm,float *miu_j,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&miu_j[0],col*norm[row*Max+col]); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_std_i(float *norm,float *std_i,float*miu_i,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&std_i[0],norm[row*Max+col] * ((row-miu_i[0])*(row-miu_i[0]))); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_std_j(float *norm,float *std_i,float *miu_j,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&std_i[0],norm[row*Max+col]*(((col-miu_j[0])*(col-miu_j[0])))); //printf("nilai miu_i %d %d %d %f\n",((row-col)*(row-col)),row,col,norm[row*Max+col]); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } }__global__ void calculate_korelasi(float *norm,float *korelasi,float *miu_i,float *std_i,float *miu_j,float *std_j,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&korelasi[0],(((row-miu_i[0])*(col-miu_j[0]))*norm[row*Max+col])/(std_i[0]*std_j[0])); //printf("nilai korelasi %f %f \n",(row-miu_i[0]),(col-miu_j[0])); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_variance(float *norm,float *variance,float *miu_i,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("nilai %d %d %d %f\n",row*Max+col,row,col,norm[row*Max+col]); atomicAdd(&variance[0],((row-miu_i[0])*(row-miu_i[0]))*norm[row*Max+col]); //printf("nilai korelasi %f %f \n",(row-miu_i[0]),(col-miu_j[0])); //atomicAdd(&ASM[0],norm[row*Max+col]*norm[row*Max+col]); //printf("%f\n",miu_i[0]); } } __global__ void calculate_sumaverage(float *norm,float *sav,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=2;k<2*Max;k++){ if((row+col)==k){ atomicAdd(&sav[0],k*(1*norm[row*Max+col])); } else{ atomicAdd(&sav[0],0); } } } __global__ void calculate_sumentropy(float *norm,float *sen,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=2;k<2*Max;k++){ if((row+col)==k && norm[row*Max+col]>0){ //printf("%f\n",norm[row*Max+col]); atomicAdd(&sen[0],(1*norm[row*Max+col])*(log10(1*norm[row*Max+col]))); } else{ atomicAdd(&sen[0],0); } } } __global__ void calculate_sumvariance(float *norm,float *sva,float *sen,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=2;k<2*Max;k++){ if((row+col)==k && norm[row*Max+col]>0){ //printf("sva%f\n",norm[row*Max+col]); atomicAdd(&sva[0],((k-sen[0])*(k-sen[0]))*(1*norm[row*Max+col])); } else{ atomicAdd(&sva[0],0); } } } __global__ void calculate_differenceentropy(float *norm,float *den,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k=0; for(k=0;k<Max-1;k++){ if(abs(row-col)==k && norm[row*Max+col]>0){ atomicAdd(&den[0],(1*norm[row*Max+col])*(log10(1*norm[row*Max+col]))); //printf("apa %f\n",den[0]); } else{ atomicAdd(&den[0],0); } } } __global__ void calculate_HX(float *norm,float *HX,int Max){ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ atomicAdd(&HX[0],norm[row*Max+col]*log10f(norm[row*Max+col])); } } __global__ void calculate_HY(float *norm,float *HY,int Max){ //printf("%d\n",max); int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(norm[row*Max+col]>0){ atomicAdd(&HY[0],norm[row*Max+col]*log10f(norm[row*Max+col])); } } __global__ void calculate_HXY1(float *norm,float *HXY1,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if(norm[row*Max+col]>0){ //printf("%.13f %.13f %f %f \n",norm[row],norm[col],norm[row*Max+col],log10f(norm[row]*norm[col])); atomicAdd(&HXY1[0],norm[row*Max+col]*log10f(norm[row*Max+col])); } } __global__ void calculate_dva(float *norm,float *dva,int Max){ //printf("%d\n",max); int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int k; for(k=0;k<Max-1;k++){ if(abs(row-col)==k && norm[row*Max+col]>0){ //printf("%f\n",norm[row*Max+col]); atomicAdd(&dva[0],(k*k)*(1*norm[row*Max+col])); } else{ atomicAdd(&dva[0],0); } } } // void takeimagevalue(const char* filename, rgb_image *img) // { // unsigned error; // unsigned char* png; // size_t pngsize;; // lodepng_load_file(&png, &pngsize, filename); // error = lodepng_decode32(&img->image, &img->width, &img->height, png, pngsize); // if(error) printf("error %u: %s\n", error, lodepng_error_text(error)); // } // void transformToGrayCuda(rgb_image *img){ // unsigned char* image = img->image; // unsigned char* image_d; // unsigned int width = img->width; // unsigned int height = img->height; // int n =width*height; // size_t size = n * 4 * sizeof(unsigned char); // int device_count = 0; // cudaError_t status = cudaGetDeviceCount(&device_count); // status = cudaMalloc((void **) &image_d, size); // cudaMemcpy(image_d, image, size, cudaMemcpyHostToDevice); // dim3 block_size(16, 16); // dim3 num_blocks(img->width / block_size.x, img->height / block_size.y); // setPixelToGrayscale<<<num_blocks, block_size>>>(image_d, img->width, img->height); // cudaMemcpy(image, image_d, size, cudaMemcpyDeviceToHost); // cudaFree(image_d); // } // __global__ // void setPixelToGrayscale(unsigned char *image, unsigned width, unsigned height) // { // float gray; // float r, g, b; // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // if (x < width && y < height) { // r = image[4 * width * y + 4 * x + 0]; // g = image[4 * width * y + 4 * x + 1]; // b = image[4 * width * y + 4 * x + 2]; // gray =.299f*r + .587f*g + .114f*b; // image[4 * width * y + 4 * x + 0] = gray; // image[4 * width * y + 4 * x + 1] = gray; // image[4 * width * y + 4 * x + 2] = gray; // image[4 * width * y + 4 * x + 3] = 255; // } // } // void saveimagegray(const char* filename, rgb_image *img) // { // /*Encode the image*/ // unsigned error = lodepng_encode32_file(filename, img->image, img->width, img->height); // /*if there's an error, display it*/ // if(error) printf("error %u: %s\n", error, lodepng_error_text(error)); // } int main(int argc, char *argv[]){ char *d; long deg =strtol(argv[1],&d,10); int degree=deg; // char *data; // long datas =strtol(argv[2],&data,10); // int ukuran=datas; //const char* filename = argc > 1 ? argv[1] : "test.png"; //rgb_image img; // takeimagevalue(filename, &img); // transformToGrayCuda(&img); //int nx =img.width; //int ny =img.height; int ukuran,graylevel; printf("ukuran gambar: "); scanf("%d",&ukuran); printf("max graylevel : "); scanf("%d",&graylevel); printf("calculate glcm for image size %dx%d %d degre Starting...\n", ukuran,ukuran,degree); int nx,ny; nx=ukuran; ny=ukuran; int *matrix,*glcm,*transposed; float *norm,*mulMatrix,*sumMatrix; cudaMallocManaged(&matrix, (nx * ny) * sizeof(int)); for(int i = 0 ; i < (nx * nx) ; ++i){ matrix[i] = rand() %graylevel; if(matrix[i] > Max){ Max = matrix[i]; } } for(int i = 0 ; i < nx ; ++i){ for(int j = 0 ; j < nx ; ++j){ printf("%4d",matrix[i * nx + j]); } printf("\n"); } //printf("\n\n"); Max = Max + 1; // karena index dimulai dari 0 dan Maximum 3 ( 0 - 3 = 4 ) jadi Max ditambah 1; int kBytes = Max * Max * sizeof(float); int nBytes = nx * ny * sizeof(float); cudaMallocManaged(&glcm, (Max * Max) * sizeof(int)); cudaMallocManaged(&transposed, (Max * Max) * sizeof(int)); cudaMallocManaged(&mulMatrix, (Max * Max) * sizeof(float)); cudaMallocManaged(&sumMatrix, (Max * Max) * sizeof(float)); cudaMallocManaged(&norm, (Max * Max) * sizeof(float)); for(int i = 0 ; i < (Max * Max) ; ++i){ glcm[i] = 0; mulMatrix[i] = 0; } float*ASM,*contrast,*IDM,*entropy,*miu_i,*miu_j,*std_i,*std_j,*korelasi,*variance,*sav,*sen,*sva,*den,*HX,*HY,*HXY1,*dva; cudaMallocManaged(&ASM, (Max * Max) * sizeof(float)); cudaMallocManaged(&contrast, (Max * Max) * sizeof(float)); cudaMallocManaged(&IDM, (Max * Max) * sizeof(float)); cudaMallocManaged(&entropy, (Max * Max) * sizeof(float)); cudaMallocManaged(&miu_i, (Max * Max) * sizeof(float)); cudaMallocManaged(&miu_j, (Max * Max) * sizeof(float)); cudaMallocManaged(&std_i, (Max * Max) * sizeof(float)); cudaMallocManaged(&std_j, (Max * Max) * sizeof(float)); cudaMallocManaged(&korelasi, (Max * Max) * sizeof(float)); cudaMallocManaged(&variance, (Max * Max) * sizeof(float)); cudaMallocManaged(&sav, (Max * Max) * sizeof(float)); cudaMallocManaged(&sen, (Max * Max) * sizeof(float)); cudaMallocManaged(&sva, (Max * Max) * sizeof(float)); cudaMallocManaged(&den, (Max * Max) * sizeof(float)); cudaMallocManaged(&HX, (Max * Max) * sizeof(float)); cudaMallocManaged(&HY, (Max * Max) * sizeof(float)); cudaMallocManaged(&dva, (Max * Max) * sizeof(float)); cudaMallocManaged(&HXY1, (Max * Max) * sizeof(float)); dim3 block(ny); dim3 grid((nx + block.x - 1) / block.x, (nx + block.y - 1) / block.y); dim3 blocks(2,2); dim3 grids((Max + blocks.x - 1) / blocks.x, (Max + blocks.y - 1) / blocks.y); cudaGetLastError(); clock_t start, end; double t = 0; start = clock(); // invoke kernel for calculation // glcm_calculation_nol<<<ny,nx>>>(matrix,glcm, nx, ny,Max); // cudaDeviceSynchronize(); // AddToitTranspose<<<grids,blocks>>>(transposed,glcm,Max); // cudaDeviceSynchronize(); // printf("hasil transpose\n"); // for(int i = 0 ; i < Max ; ++i){ // for(int j = 0 ; j < Max ; ++j){ // printf("%4d ",transposed[i * Max + j]); // } // printf("\n"); // } if(degree==0){ glcm_calculation_nol<<<ny,nx>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree ==180){ glcm_calculation_180<<<ny,nx>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==270){ dim3 block(1, nx); dim3 grid((ny + block.x - 1) / block.x, (ny + block.y - 1) / block.y); glcm_calculation_270<<<grid,block>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==90){ dim3 block(1, nx); dim3 grid((ny + block.x - 1) / block.x, (ny + block.y - 1) / block.y); glcm_calculation_90<<<grid,block>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==45){ glcm_calculation_45<<<ny,nx>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==135){ glcm_calculation_135<<<ny,nx>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==225){ glcm_calculation_225<<<ny,nx>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } else if(degree==315){ glcm_calculation_315<<<ny,nx>>>(matrix,glcm, nx, ny,Max); cudaDeviceSynchronize(); end = clock(); AddToitTranspose<<<grid,block>>>(transposed,glcm,Max); cudaDeviceSynchronize(); printMatrixGlcm(transposed,Max,degree); } // printMatrixGlcm(glcm,Max,0); printf("hasil glcm\n"); for(int i = 0 ; i < Max ; ++i){ for(int j = 0 ; j < Max ; ++j){ printf("%4d ",glcm[i * Max + j]); } printf("\n"); } t = ((double) (end - start))/CLOCKS_PER_SEC; int sum; sum=0; for(int i=0;i<Max*Max;i++){ sum +=transposed[i]; //if(transposed[i]>0){ // printf("%d\n",transposed[i]); //} } printf("sum %d",sum); normalization<<<Max,Max>>>(transposed,norm,Max,sum); cudaDeviceSynchronize(); printf("Hasil normalisasi : \n"); // for(int i = 0 ; i < Max ; ++i){ // for(int j = 0 ; j < Max ; ++j){ // //if(norm[i * Max + j]>0) // printf("%.7f ",norm[i * Max + j]); // } // printf("\n"); // } printMatrixnxormalization(norm,Max,0); float sums; sums=0; for(int i=0;i<Max*Max;i++){ sums +=norm[i]; } //Jumlah <<< Max,Max >>>(sumMatrix,norm); printf("jumlah %f\n",sums); int *dif; dif = (int *)malloc(kBytes); int *d_dif; (cudaMalloc((void **)&d_dif, nBytes)); // transfer data from host to device (cudaMemcpy(d_dif, dif, kBytes, cudaMemcpyHostToDevice)); dim3 b(32,32); dim3 g((Max + b.x - 1) / b.x, (Max + b.y - 1) / b.y); //Step1 calculate_contrast<<<g,b>>>(norm,contrast,Max); calculate_entropy<<<g,b>>>(norm,entropy,Max); calculate_IDM<<<g,b>>>(norm,IDM,Max); calculate_ASM<<<g,b>>>(norm,ASM,Max); calculate_miu_i<<<g,b>>>(norm,miu_i,Max); calculate_miu_j<<<g,b>>>(norm,miu_j,Max); cudaDeviceSynchronize(); //Step2 calculate_std_i<<<g,b>>>(norm,std_i,miu_i,Max); calculate_std_j<<<g,b>>>(norm,std_j,miu_j,Max); calculate_variance<<<g,b>>>(norm,variance,miu_i,Max); calculate_sumaverage<<<g,b>>>(norm,sav,Max); calculate_sumentropy<<<g,b>>>(norm,sen,Max); calculate_differenceentropy<<<g,b>>>(norm,den,Max); calculate_HX<<<g,b>>>(norm,HX,Max); calculate_HY<<<g,b>>>(norm,HY,Max); calculate_HXY1<<<g,b>>>(norm,HXY1,Max); cudaDeviceSynchronize(); //Step3 calculate_sumvariance<<<g,b>>>(norm,sva,sen,Max); calculate_korelasi<<<g,b>>>(norm,korelasi,miu_i,std_i,miu_j,std_j,Max); calculate_dva<<<g,b>>>(norm,dva,Max); cudaDeviceSynchronize(); printf("ASM : %.13f\n",ASM[0]); printf("Contrast : %.13f\n",contrast[0]); printf("IDM : %.13f\n",IDM[0]); printf("entropy : %.13f\n",-(entropy[0])); printf("miu_i : %.13f\n",(miu_i[0])); printf("miu_j : %.13f\n",(miu_j[0])); printf("std_i : %.13f\n",(std_i[0])); printf("std_j : %.13f\n",(std_j[0])); printf("variance : %.13f\n",(variance[0])); printf("SAV : %.13f\n",(sav[0])); printf("SEN : %.13f\n",-(sen[0])); printf("SVA : %.13f\n",(sva[0])); printf("DEN : %.13f\n",-(den[0])); printf("HX : %.13f\n",-(HX[0])); printf("HY : %.13f\n",-(HY[0])); printf("HXY1 : %.13f\n",-(HXY1[0])); printf("IMC : %.13f\n",(entropy[0]-HXY1[0])/max(-(HX[0]),-(HY[0]))); printf("korelasi : %.13f\n",(korelasi[0])); printf("Differnece Variance : %.13f\n",(dva[0])); printf("matrix gambar disimpan di matrixgambarmetode1 bukti.txt\n"); printf("matrix glcm disimpan di matrix_glcm_bukti_%d.txt\n",0); printf("matrix glcm normalisasi disimpan di matrix_ormalisasi_%d_bukti.txt\n",0); printf("waktu eksekusi: %f\n",t); // free host and devide memory cudaFree(matrix);cudaFree(glcm);cudaFree(norm); cudaFree(mulMatrix); }
f441faa64ac01f15c8faed32e80cdbbcda226d3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernelExtractUniqueEdge.h" __global__ void kernelExtractUniqueEdge(int *d_O,int *d_LO,unsigned int numberElementOfd_O,int *d_N,int *d_LN,unsigned int numberElementOfd_N,int *d_singlePattern,unsigned int Lv,unsigned int Le){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i<numberElementOfd_O){ /*printf("\nThread:%d",i); */ if(d_O[i]!=-1){ int j; //printf("\nThread:%d",i); for(j=i+1;j<numberElementOfd_O;++j){ if(d_O[j]!=-1) {break;} } int ek; if (j==numberElementOfd_O) { ek=numberElementOfd_N; } else { ek=d_O[j]; } int Li=d_LO[i]; int startIndex=((Lv+(Lv-(Li-1)))*(Lv-(Lv-(Li-1))+1)/2)*Le; for (int k=d_O[i];k<ek;++k){ int Lj, Lij; Lij=d_LN[k]; Lj=d_LO[d_N[k]]; if(Lj<Li) continue; startIndex=startIndex+Lij*(Lv-Li) + (Lj-Li); d_singlePattern[startIndex]=1; //printf("\nThread:%d Li:%d Lj:%d Le:%d index:%d d_signlePattern:%d\n",i,Li,Lj,Le,startIndex,d_singlePattern[startIndex]); startIndex=startIndex-(Lj-Li); //printf("index:%d [%d] ",index,d_singlePattern[index]); } } } }
f441faa64ac01f15c8faed32e80cdbbcda226d3e.cu
#include "kernelExtractUniqueEdge.h" __global__ void kernelExtractUniqueEdge(int *d_O,int *d_LO,unsigned int numberElementOfd_O,int *d_N,int *d_LN,unsigned int numberElementOfd_N,int *d_singlePattern,unsigned int Lv,unsigned int Le){ int i = blockIdx.x*blockDim.x + threadIdx.x; if (i<numberElementOfd_O){ /*printf("\nThread:%d",i); */ if(d_O[i]!=-1){ int j; //printf("\nThread:%d",i); for(j=i+1;j<numberElementOfd_O;++j){ if(d_O[j]!=-1) {break;} } int ek; if (j==numberElementOfd_O) { ek=numberElementOfd_N; } else { ek=d_O[j]; } int Li=d_LO[i]; int startIndex=((Lv+(Lv-(Li-1)))*(Lv-(Lv-(Li-1))+1)/2)*Le; for (int k=d_O[i];k<ek;++k){ int Lj, Lij; Lij=d_LN[k]; Lj=d_LO[d_N[k]]; if(Lj<Li) continue; startIndex=startIndex+Lij*(Lv-Li) + (Lj-Li); d_singlePattern[startIndex]=1; //printf("\nThread:%d Li:%d Lj:%d Le:%d index:%d d_signlePattern:%d\n",i,Li,Lj,Le,startIndex,d_singlePattern[startIndex]); startIndex=startIndex-(Lj-Li); //printf("index:%d [%d] ",index,d_singlePattern[index]); } } } }
329dbd8c95ee0795d5b67b7a2533bf8367784210.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <string.h> #include <stdio.h> #include <time.h> #include "bundleElt.h" #define MAX_ETA 1e6 #define MIN_TANH_MAGNITUDE 1e-10 // NOTE: pragma unroll before the loops did not improve performance // For implementation, // I had to make the loop termination index constant; and wrap the // the loop body in a if (n < thisRowLength) ... __global__ void checkNodeProcessingOptimalBlock (unsigned int numChecks, unsigned int maxBitsForCheck, bundleElt *lambdaByCheckIndex, bundleElt *eta, unsigned int* mapRows2Cols, bundleElt *etaByBitIndex, unsigned int nChecksByBits, unsigned int nBitsByChecks, unsigned int nBundles) { unsigned int bundleIndex, bundleBase, etaIndex; unsigned int m, n; unsigned int thisRowLength, currentIndex; bundleElt arg, value; bundleElt myprod; bundleIndex = blockIdx.x / numChecks; m = blockIdx.x % numChecks; n = threadIdx.x + 1; if (bundleIndex < nBundles) { bundleBase = bundleIndex* nChecksByBits; __shared__ bundleElt rowVals[128]; thisRowLength = (int) ONEVAL(eta[m]); if (n <= thisRowLength) { currentIndex = m + (n* numChecks); etaIndex = bundleBase + currentIndex; arg = (eta[etaIndex] - lambdaByCheckIndex[etaIndex]) / 2.0; for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) { value.s[slot] = tanhf(arg.s[slot]); if (value.s[slot] == 0.0) {value.s[slot] = MIN_TANH_MAGNITUDE;} } rowVals[n] = value; __syncthreads(); myprod = make_bundleElt(1.0); for (unsigned int j=1; j<= thisRowLength; j++) if (j != n) myprod *= rowVals[j]; for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) value.s[slot] = -2 * atanhf(myprod.s[slot]); value = clamp(value, -MAX_ETA, MAX_ETA); eta[etaIndex] = value; etaByBitIndex[ (bundleIndex * nBitsByChecks) + mapRows2Cols[currentIndex] ] = value; } } }
329dbd8c95ee0795d5b67b7a2533bf8367784210.cu
#include <math.h> #include <string.h> #include <stdio.h> #include <time.h> #include "bundleElt.h" #define MAX_ETA 1e6 #define MIN_TANH_MAGNITUDE 1e-10 // NOTE: pragma unroll before the loops did not improve performance // For implementation, // I had to make the loop termination index constant; and wrap the // the loop body in a if (n < thisRowLength) ... __global__ void checkNodeProcessingOptimalBlock (unsigned int numChecks, unsigned int maxBitsForCheck, bundleElt *lambdaByCheckIndex, bundleElt *eta, unsigned int* mapRows2Cols, bundleElt *etaByBitIndex, unsigned int nChecksByBits, unsigned int nBitsByChecks, unsigned int nBundles) { unsigned int bundleIndex, bundleBase, etaIndex; unsigned int m, n; unsigned int thisRowLength, currentIndex; bundleElt arg, value; bundleElt myprod; bundleIndex = blockIdx.x / numChecks; m = blockIdx.x % numChecks; n = threadIdx.x + 1; if (bundleIndex < nBundles) { bundleBase = bundleIndex* nChecksByBits; __shared__ bundleElt rowVals[128]; thisRowLength = (int) ONEVAL(eta[m]); if (n <= thisRowLength) { currentIndex = m + (n* numChecks); etaIndex = bundleBase + currentIndex; arg = (eta[etaIndex] - lambdaByCheckIndex[etaIndex]) / 2.0; for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) { value.s[slot] = tanhf(arg.s[slot]); if (value.s[slot] == 0.0) {value.s[slot] = MIN_TANH_MAGNITUDE;} } rowVals[n] = value; __syncthreads(); myprod = make_bundleElt(1.0); for (unsigned int j=1; j<= thisRowLength; j++) if (j != n) myprod *= rowVals[j]; for (unsigned int slot=0; slot< SLOTS_PER_ELT; slot++) value.s[slot] = -2 * atanhf(myprod.s[slot]); value = clamp(value, -MAX_ETA, MAX_ETA); eta[etaIndex] = value; etaByBitIndex[ (bundleIndex * nBitsByChecks) + mapRows2Cols[currentIndex] ] = value; } } }
4beb4f450ae1ea94f02144bafe2afe061dba2eab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { #include "../shape/head.h" } __global__ void lghtcrv_spline_krnl(struct dat_t *ddat, int set, double yp1, double ypn, double *u, int ncalc) { /*(double *x - lghtcrv->x * double *y - lghtcrv->y * int n - calc * double yp1 - 2.0e30 * double ypn - 2.0e30 * double *y2 - lghtcrv->y2)*/ int i, k, n=ncalc; double p, qn, sig, un; double *x = ddat->set[set].desc.lghtcrv.x; double *y = ddat->set[set].desc.lghtcrv.y; double *y2 = ddat->set[set].desc.lghtcrv.y2; /* single-threaded kernel */ if (threadIdx.x == 0) { u[0] = 0.0; if (yp1 > 0.99e30) y2[1] = u[1]=0.0; else { y2[1] = -0.5; u[1] = (3.0 / (x[2]-x[1])) * ((y[2]-y[1]) / (x[2]-x[1]) - yp1); } for (i=2; i<=n-1; i++) { sig = (x[i]-x[i-1]) / (x[i+1]-x[i-1]); p = sig * y2[i-1] + 2.0; y2[i] = (sig - 1.0) / p; u[i] = (y[i+1] - y[i]) / (x[i+1] - x[i]) - (y[i] - y[i-1]) / (x[i] - x[i-1]); u[i] = (6.0 * u[i] / (x[i+1]-x[i-1]) - sig*u[i-1] ) / p; } if (ypn > 0.99e30) qn = un = 0.0; else { qn = 0.5; un = (3.0 / (x[n] - x[n-1])) * (ypn - (y[n] - y[n-1]) / (x[n] - x[n-1])); } y2[n] = (un - qn * u[n-1]) / (qn * y2[n-1] + 1.0); for (k=n-1; k>=1; k--) y2[k] = y2[k] * y2[k+1] + u[k]; } }
4beb4f450ae1ea94f02144bafe2afe061dba2eab.cu
extern "C" { #include "../shape/head.h" } __global__ void lghtcrv_spline_krnl(struct dat_t *ddat, int set, double yp1, double ypn, double *u, int ncalc) { /*(double *x - lghtcrv->x * double *y - lghtcrv->y * int n - calc * double yp1 - 2.0e30 * double ypn - 2.0e30 * double *y2 - lghtcrv->y2)*/ int i, k, n=ncalc; double p, qn, sig, un; double *x = ddat->set[set].desc.lghtcrv.x; double *y = ddat->set[set].desc.lghtcrv.y; double *y2 = ddat->set[set].desc.lghtcrv.y2; /* single-threaded kernel */ if (threadIdx.x == 0) { u[0] = 0.0; if (yp1 > 0.99e30) y2[1] = u[1]=0.0; else { y2[1] = -0.5; u[1] = (3.0 / (x[2]-x[1])) * ((y[2]-y[1]) / (x[2]-x[1]) - yp1); } for (i=2; i<=n-1; i++) { sig = (x[i]-x[i-1]) / (x[i+1]-x[i-1]); p = sig * y2[i-1] + 2.0; y2[i] = (sig - 1.0) / p; u[i] = (y[i+1] - y[i]) / (x[i+1] - x[i]) - (y[i] - y[i-1]) / (x[i] - x[i-1]); u[i] = (6.0 * u[i] / (x[i+1]-x[i-1]) - sig*u[i-1] ) / p; } if (ypn > 0.99e30) qn = un = 0.0; else { qn = 0.5; un = (3.0 / (x[n] - x[n-1])) * (ypn - (y[n] - y[n-1]) / (x[n] - x[n-1])); } y2[n] = (un - qn * u[n-1]) / (qn * y2[n-1] + 1.0); for (k=n-1; k>=1; k--) y2[k] = y2[k] * y2[k+1] + u[k]; } }
77b95364b56672123aff4050a416db70e39f8a8c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <iostream> #include <fstream> #include <vector> #include <sys/stat.h> #include "gtest/gtest.h" #include "gmock/gmock.h" #include <cudf.h> #include <NVStrings.h> bool checkFile(const char *fname) { struct stat st; return (stat(fname, &st) ? 0 : 1); } // DESCRIPTION: Simple test internal helper class to transfer cudf column data // from device to host for test comparisons and debugging/development template <typename T> class gdf_host_column { public: gdf_host_column() = delete; explicit gdf_host_column(gdf_column* const col) { m_hostdata = std::vector<T>(col->size); hipMemcpy(m_hostdata.data(), col->data, sizeof(T) * col->size, hipMemcpyDeviceToHost); } auto hostdata() const -> const auto& { return m_hostdata; } void print() const { for (size_t i = 0; i < m_hostdata.size(); ++i) { std::cout << "[" << i << "]: value=" << m_hostdata[i] << "\n"; } } private: std::vector<T> m_hostdata; }; TEST(gdf_csv_test, Simple) { const char* fname = "/tmp/CsvSimpleTest.csv"; const char* names[] = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" }; const char* types[] = { "int32", "int32", "int32", "int32", "int32", "int32", "int32", "int32", "int32", "int32", }; std::ofstream outfile(fname, std::ofstream::out); outfile << "10,20,30,40,50,60,70,80,90,100\n"\ "11,21,31,41,51,61,71,81,91,101\n"\ "12,22,32,42,52,62,72,82,92,102\n"\ "13,23,33,43,53,63,73,83,93,103\n"; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); auto firstCol = gdf_host_column<int32_t>(args.data[0]); auto sixthCol = gdf_host_column<int32_t>(args.data[5]); EXPECT_THAT(firstCol.hostdata(), ::testing::ElementsAre(10, 11, 12, 13)); EXPECT_THAT(sixthCol.hostdata(), ::testing::ElementsAre(60, 61, 62, 63)); } } TEST(gdf_csv_test, MortPerf) { gdf_error error = GDF_SUCCESS; csv_read_arg args{}; const int num_cols = 31; args.num_cols = num_cols; args.nrows = -1; const char ** dnames = new const char *[num_cols] { "loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb", "loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity", "maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code", "zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after", "disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs", "asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds", "credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds", "non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag", "foreclosure_principal_write_off_amount", "servicing_activity_indicator" }; args.names = dnames; const char ** dtype = new const char *[num_cols] { "int64", "date", "category", "float64", "float64", "float64", "float64", "float64", "date", "float64", "category", "category", "category", "date", "date", "date", "date", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "category", "float64", "category" }; args.dtype = dtype; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = (char *)("/tmp/Performance_2000Q1.txt"); if ( checkFile(args.filepath_or_buffer)) { args.delimiter = '|'; args.lineterminator = '\n'; args.delim_whitespace = 0; args.skipinitialspace = 0; args.skiprows = 0; args.skipfooter = 0; args.dayfirst = 0; args.mangle_dupe_cols=true; args.num_cols_out=0; args.use_cols_int = NULL; args.use_cols_char = NULL; args.use_cols_char_len = 0; args.use_cols_int_len = 0; args.names = NULL; args.dtype = NULL; error = read_csv(&args); } EXPECT_TRUE( error == GDF_SUCCESS ); } TEST(gdf_csv_test, Strings) { const char* fname = "/tmp/CsvStringsTest.csv"; const char* names[] = { "line", "verse" }; const char* types[] = { "int32", "str" }; std::ofstream outfile(fname, std::ofstream::out); outfile << names[0] << ',' << names[1] << ',' << '\n'; outfile << "10,abc def ghi" << '\n'; outfile << "20,\"jkl mno pqr\"" << '\n'; outfile << "30,stu \"\"vwx\"\" yz" << '\n'; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.skiprows = 1; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // No filtering of any columns EXPECT_EQ( args.num_cols_out, args.num_cols ); // Check the parsed string column metadata ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); auto stringList = reinterpret_cast<NVStrings*>(args.data[1]->data); ASSERT_NE( stringList, nullptr ); auto stringCount = stringList->size(); ASSERT_EQ( stringCount, 3u ); auto stringLengths = std::unique_ptr<int[]>{ new int[stringCount] }; ASSERT_NE( stringList->len(stringLengths.get(), false), 0u ); // Check the actual strings themselves auto strings = std::unique_ptr<char*[]>{ new char*[stringCount] }; for (size_t i = 0; i < stringCount; ++i) { ASSERT_GT( stringLengths[i], 0 ); strings[i] = new char[stringLengths[i]]; } EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); EXPECT_STREQ( strings[0], "abc def ghi" ); EXPECT_STREQ( strings[1], "\"jkl mno pqr\"" ); EXPECT_STREQ( strings[2], "stu \"\"vwx\"\" yz" ); for (size_t i = 0; i < stringCount; ++i) { delete[] strings[i]; } } } TEST(gdf_csv_test, QuotedStrings) { const char* fname = "/tmp/CsvQuotedStringsTest.csv"; const char* names[] = { "line", "verse" }; const char* types[] = { "int32", "str" }; std::ofstream outfile(fname, std::ofstream::out); outfile << names[0] << ',' << names[1] << ',' << '\n'; outfile << "10,`abc,\ndef, ghi`" << '\n'; outfile << "20,`jkl, ``mno``, pqr`" << '\n'; outfile << "30,stu `vwx` yz" << '\n'; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.quotechar = '`'; args.quoting = true; // strip outermost quotechar args.doublequote = true; // replace double quotechar with single args.skiprows = 1; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // No filtering of any columns EXPECT_EQ( args.num_cols_out, args.num_cols ); // Check the parsed string column metadata ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); auto stringList = reinterpret_cast<NVStrings*>(args.data[1]->data); ASSERT_NE( stringList, nullptr ); auto stringCount = stringList->size(); ASSERT_EQ( stringCount, 3u ); auto stringLengths = std::unique_ptr<int[]>{ new int[stringCount] }; ASSERT_NE( stringList->len(stringLengths.get(), false), 0u ); // Check the actual strings themselves auto strings = std::unique_ptr<char*[]>{ new char*[stringCount] }; for (size_t i = 0; i < stringCount; ++i) { ASSERT_GT( stringLengths[i], 0 ); strings[i] = new char[stringLengths[i]]; } EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); EXPECT_STREQ( strings[0], "abc,\ndef, ghi" ); EXPECT_STREQ( strings[1], "jkl, `mno`, pqr" ); EXPECT_STREQ( strings[2], "stu `vwx` yz" ); for (size_t i = 0; i < stringCount; ++i) { delete[] strings[i]; } } } TEST(gdf_csv_test, KeepFullQuotedStrings) { const char* fname = "/tmp/CsvKeepFullQuotedStringsTest.csv"; const char* names[] = { "line", "verse" }; const char* types[] = { "int32", "str" }; std::ofstream outfile(fname, std::ofstream::out); outfile << names[0] << ',' << names[1] << ',' << '\n'; outfile << "10,\"abc,\ndef, ghi\"" << '\n'; outfile << "20,\"jkl, \"\"mno\"\", pqr\"" << '\n'; outfile << "30,stu \"vwx\" yz" << '\n'; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.quotechar = '\"'; args.quoting = false; // do not strip outermost quotechar args.doublequote = false; // do not replace double quotechar with single args.skiprows = 1; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // No filtering of any columns EXPECT_EQ( args.num_cols_out, args.num_cols ); // Check the parsed string column metadata ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); auto stringList = reinterpret_cast<NVStrings*>(args.data[1]->data); ASSERT_NE( stringList, nullptr ); auto stringCount = stringList->size(); ASSERT_EQ( stringCount, 3u ); auto stringLengths = std::unique_ptr<int[]>{ new int[stringCount] }; ASSERT_NE( stringList->len(stringLengths.get(), false), 0u ); // Check the actual strings themselves auto strings = std::unique_ptr<char*[]>{ new char*[stringCount] }; for (size_t i = 0; i < stringCount; ++i) { ASSERT_GT( stringLengths[i], 0 ); strings[i] = new char[stringLengths[i]]; } EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); EXPECT_STREQ( strings[0], "\"abc,\ndef, ghi\"" ); EXPECT_STREQ( strings[1], "\"jkl, \"\"mno\"\", pqr\"" ); EXPECT_STREQ( strings[2], "stu \"vwx\" yz" ); for (size_t i = 0; i < stringCount; ++i) { delete[] strings[i]; } } } TEST(gdf_csv_test, SpecifiedBoolValues) { const char* fname = "/tmp/CsvSpecifiedBoolValuesTest.csv"; const char* names[] = { "A", "B", "C" }; const char* types[] = { "int32", "int32", "short" }; const char* trueValues[] = { "yes", "Yes", "YES", "foo", "FOO" }; const char* falseValues[] = { "no", "No", "NO", "Bar", "bar" }; std::ofstream outfile(fname, std::ofstream::out); outfile << "YES,1,bar\nno,2,FOO\nBar,3,yes\nNo,4,NO\nYes,5,foo\n"; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.true_values = trueValues; args.num_true_values = std::extent<decltype(trueValues)>::value; args.false_values = falseValues; args.num_false_values = std::extent<decltype(falseValues)>::value; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // Booleans are the same (integer) data type, but valued at 0 or 1 EXPECT_EQ( args.num_cols_out, args.num_cols ); ASSERT_EQ( args.data[0]->dtype, GDF_INT32 ); ASSERT_EQ( args.data[2]->dtype, GDF_INT16 ); auto firstCol = gdf_host_column<int32_t>(args.data[0]); EXPECT_THAT(firstCol.hostdata(), ::testing::ElementsAre(1, 0, 0, 0, 1)); auto thirdCol = gdf_host_column<int16_t>(args.data[2]); EXPECT_THAT(thirdCol.hostdata(), ::testing::ElementsAre(0, 1, 1, 0, 1)); } } TEST(gdf_csv_test, Dates) { const char* fname = "/tmp/CsvDatesTest.csv"; const char* names[] = { "A" }; const char* types[] = { "date" }; std::ofstream outfile(fname, std::ofstream::out); outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n"; outfile << "18/04/1995\n14/07/1994\n07/06/2006\n16/09/2005\n2/2/1970\n"; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.dayfirst = true; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); EXPECT_EQ( args.num_cols_out, args.num_cols ); ASSERT_EQ( args.data[0]->dtype, GDF_DATE64 ); auto ACol = gdf_host_column<uint64_t>(args.data[0]); EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(983750400000, 1288483200000, 782611200000, 656208000000, 0, 798163200000, 774144000000, 1149638400000, 1126828800000, 2764800000) ); } }
77b95364b56672123aff4050a416db70e39f8a8c.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <iostream> #include <fstream> #include <vector> #include <sys/stat.h> #include "gtest/gtest.h" #include "gmock/gmock.h" #include <cudf.h> #include <NVStrings.h> bool checkFile(const char *fname) { struct stat st; return (stat(fname, &st) ? 0 : 1); } // DESCRIPTION: Simple test internal helper class to transfer cudf column data // from device to host for test comparisons and debugging/development template <typename T> class gdf_host_column { public: gdf_host_column() = delete; explicit gdf_host_column(gdf_column* const col) { m_hostdata = std::vector<T>(col->size); cudaMemcpy(m_hostdata.data(), col->data, sizeof(T) * col->size, cudaMemcpyDeviceToHost); } auto hostdata() const -> const auto& { return m_hostdata; } void print() const { for (size_t i = 0; i < m_hostdata.size(); ++i) { std::cout << "[" << i << "]: value=" << m_hostdata[i] << "\n"; } } private: std::vector<T> m_hostdata; }; TEST(gdf_csv_test, Simple) { const char* fname = "/tmp/CsvSimpleTest.csv"; const char* names[] = { "A", "B", "C", "D", "E", "F", "G", "H", "I", "J" }; const char* types[] = { "int32", "int32", "int32", "int32", "int32", "int32", "int32", "int32", "int32", "int32", }; std::ofstream outfile(fname, std::ofstream::out); outfile << "10,20,30,40,50,60,70,80,90,100\n"\ "11,21,31,41,51,61,71,81,91,101\n"\ "12,22,32,42,52,62,72,82,92,102\n"\ "13,23,33,43,53,63,73,83,93,103\n"; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); auto firstCol = gdf_host_column<int32_t>(args.data[0]); auto sixthCol = gdf_host_column<int32_t>(args.data[5]); EXPECT_THAT(firstCol.hostdata(), ::testing::ElementsAre(10, 11, 12, 13)); EXPECT_THAT(sixthCol.hostdata(), ::testing::ElementsAre(60, 61, 62, 63)); } } TEST(gdf_csv_test, MortPerf) { gdf_error error = GDF_SUCCESS; csv_read_arg args{}; const int num_cols = 31; args.num_cols = num_cols; args.nrows = -1; const char ** dnames = new const char *[num_cols] { "loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb", "loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity", "maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code", "zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after", "disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs", "asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds", "credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds", "non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag", "foreclosure_principal_write_off_amount", "servicing_activity_indicator" }; args.names = dnames; const char ** dtype = new const char *[num_cols] { "int64", "date", "category", "float64", "float64", "float64", "float64", "float64", "date", "float64", "category", "category", "category", "date", "date", "date", "date", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "float64", "category", "float64", "category" }; args.dtype = dtype; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = (char *)("/tmp/Performance_2000Q1.txt"); if ( checkFile(args.filepath_or_buffer)) { args.delimiter = '|'; args.lineterminator = '\n'; args.delim_whitespace = 0; args.skipinitialspace = 0; args.skiprows = 0; args.skipfooter = 0; args.dayfirst = 0; args.mangle_dupe_cols=true; args.num_cols_out=0; args.use_cols_int = NULL; args.use_cols_char = NULL; args.use_cols_char_len = 0; args.use_cols_int_len = 0; args.names = NULL; args.dtype = NULL; error = read_csv(&args); } EXPECT_TRUE( error == GDF_SUCCESS ); } TEST(gdf_csv_test, Strings) { const char* fname = "/tmp/CsvStringsTest.csv"; const char* names[] = { "line", "verse" }; const char* types[] = { "int32", "str" }; std::ofstream outfile(fname, std::ofstream::out); outfile << names[0] << ',' << names[1] << ',' << '\n'; outfile << "10,abc def ghi" << '\n'; outfile << "20,\"jkl mno pqr\"" << '\n'; outfile << "30,stu \"\"vwx\"\" yz" << '\n'; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.skiprows = 1; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // No filtering of any columns EXPECT_EQ( args.num_cols_out, args.num_cols ); // Check the parsed string column metadata ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); auto stringList = reinterpret_cast<NVStrings*>(args.data[1]->data); ASSERT_NE( stringList, nullptr ); auto stringCount = stringList->size(); ASSERT_EQ( stringCount, 3u ); auto stringLengths = std::unique_ptr<int[]>{ new int[stringCount] }; ASSERT_NE( stringList->len(stringLengths.get(), false), 0u ); // Check the actual strings themselves auto strings = std::unique_ptr<char*[]>{ new char*[stringCount] }; for (size_t i = 0; i < stringCount; ++i) { ASSERT_GT( stringLengths[i], 0 ); strings[i] = new char[stringLengths[i]]; } EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); EXPECT_STREQ( strings[0], "abc def ghi" ); EXPECT_STREQ( strings[1], "\"jkl mno pqr\"" ); EXPECT_STREQ( strings[2], "stu \"\"vwx\"\" yz" ); for (size_t i = 0; i < stringCount; ++i) { delete[] strings[i]; } } } TEST(gdf_csv_test, QuotedStrings) { const char* fname = "/tmp/CsvQuotedStringsTest.csv"; const char* names[] = { "line", "verse" }; const char* types[] = { "int32", "str" }; std::ofstream outfile(fname, std::ofstream::out); outfile << names[0] << ',' << names[1] << ',' << '\n'; outfile << "10,`abc,\ndef, ghi`" << '\n'; outfile << "20,`jkl, ``mno``, pqr`" << '\n'; outfile << "30,stu `vwx` yz" << '\n'; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.quotechar = '`'; args.quoting = true; // strip outermost quotechar args.doublequote = true; // replace double quotechar with single args.skiprows = 1; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // No filtering of any columns EXPECT_EQ( args.num_cols_out, args.num_cols ); // Check the parsed string column metadata ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); auto stringList = reinterpret_cast<NVStrings*>(args.data[1]->data); ASSERT_NE( stringList, nullptr ); auto stringCount = stringList->size(); ASSERT_EQ( stringCount, 3u ); auto stringLengths = std::unique_ptr<int[]>{ new int[stringCount] }; ASSERT_NE( stringList->len(stringLengths.get(), false), 0u ); // Check the actual strings themselves auto strings = std::unique_ptr<char*[]>{ new char*[stringCount] }; for (size_t i = 0; i < stringCount; ++i) { ASSERT_GT( stringLengths[i], 0 ); strings[i] = new char[stringLengths[i]]; } EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); EXPECT_STREQ( strings[0], "abc,\ndef, ghi" ); EXPECT_STREQ( strings[1], "jkl, `mno`, pqr" ); EXPECT_STREQ( strings[2], "stu `vwx` yz" ); for (size_t i = 0; i < stringCount; ++i) { delete[] strings[i]; } } } TEST(gdf_csv_test, KeepFullQuotedStrings) { const char* fname = "/tmp/CsvKeepFullQuotedStringsTest.csv"; const char* names[] = { "line", "verse" }; const char* types[] = { "int32", "str" }; std::ofstream outfile(fname, std::ofstream::out); outfile << names[0] << ',' << names[1] << ',' << '\n'; outfile << "10,\"abc,\ndef, ghi\"" << '\n'; outfile << "20,\"jkl, \"\"mno\"\", pqr\"" << '\n'; outfile << "30,stu \"vwx\" yz" << '\n'; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.quotechar = '\"'; args.quoting = false; // do not strip outermost quotechar args.doublequote = false; // do not replace double quotechar with single args.skiprows = 1; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // No filtering of any columns EXPECT_EQ( args.num_cols_out, args.num_cols ); // Check the parsed string column metadata ASSERT_EQ( args.data[1]->dtype, GDF_STRING ); auto stringList = reinterpret_cast<NVStrings*>(args.data[1]->data); ASSERT_NE( stringList, nullptr ); auto stringCount = stringList->size(); ASSERT_EQ( stringCount, 3u ); auto stringLengths = std::unique_ptr<int[]>{ new int[stringCount] }; ASSERT_NE( stringList->len(stringLengths.get(), false), 0u ); // Check the actual strings themselves auto strings = std::unique_ptr<char*[]>{ new char*[stringCount] }; for (size_t i = 0; i < stringCount; ++i) { ASSERT_GT( stringLengths[i], 0 ); strings[i] = new char[stringLengths[i]]; } EXPECT_EQ( stringList->to_host(strings.get(), 0, stringCount), 0 ); EXPECT_STREQ( strings[0], "\"abc,\ndef, ghi\"" ); EXPECT_STREQ( strings[1], "\"jkl, \"\"mno\"\", pqr\"" ); EXPECT_STREQ( strings[2], "stu \"vwx\" yz" ); for (size_t i = 0; i < stringCount; ++i) { delete[] strings[i]; } } } TEST(gdf_csv_test, SpecifiedBoolValues) { const char* fname = "/tmp/CsvSpecifiedBoolValuesTest.csv"; const char* names[] = { "A", "B", "C" }; const char* types[] = { "int32", "int32", "short" }; const char* trueValues[] = { "yes", "Yes", "YES", "foo", "FOO" }; const char* falseValues[] = { "no", "No", "NO", "Bar", "bar" }; std::ofstream outfile(fname, std::ofstream::out); outfile << "YES,1,bar\nno,2,FOO\nBar,3,yes\nNo,4,NO\nYes,5,foo\n"; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.true_values = trueValues; args.num_true_values = std::extent<decltype(trueValues)>::value; args.false_values = falseValues; args.num_false_values = std::extent<decltype(falseValues)>::value; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); // Booleans are the same (integer) data type, but valued at 0 or 1 EXPECT_EQ( args.num_cols_out, args.num_cols ); ASSERT_EQ( args.data[0]->dtype, GDF_INT32 ); ASSERT_EQ( args.data[2]->dtype, GDF_INT16 ); auto firstCol = gdf_host_column<int32_t>(args.data[0]); EXPECT_THAT(firstCol.hostdata(), ::testing::ElementsAre(1, 0, 0, 0, 1)); auto thirdCol = gdf_host_column<int16_t>(args.data[2]); EXPECT_THAT(thirdCol.hostdata(), ::testing::ElementsAre(0, 1, 1, 0, 1)); } } TEST(gdf_csv_test, Dates) { const char* fname = "/tmp/CsvDatesTest.csv"; const char* names[] = { "A" }; const char* types[] = { "date" }; std::ofstream outfile(fname, std::ofstream::out); outfile << "05/03/2001\n31/10/2010\n20/10/1994\n18/10/1990\n1/1/1970\n"; outfile << "18/04/1995\n14/07/1994\n07/06/2006\n16/09/2005\n2/2/1970\n"; outfile.close(); ASSERT_TRUE( checkFile(fname) ); { csv_read_arg args{}; args.input_data_form = gdf_csv_input_form::FILE_PATH; args.filepath_or_buffer = fname; args.num_cols = std::extent<decltype(names)>::value; args.names = names; args.dtype = types; args.delimiter = ','; args.lineterminator = '\n'; args.dayfirst = true; args.nrows = -1; EXPECT_EQ( read_csv(&args), GDF_SUCCESS ); EXPECT_EQ( args.num_cols_out, args.num_cols ); ASSERT_EQ( args.data[0]->dtype, GDF_DATE64 ); auto ACol = gdf_host_column<uint64_t>(args.data[0]); EXPECT_THAT( ACol.hostdata(), ::testing::ElementsAre(983750400000, 1288483200000, 782611200000, 656208000000, 0, 798163200000, 774144000000, 1149638400000, 1126828800000, 2764800000) ); } }
f4515f9da8ea33b699f06fb662cbb19eaa0fb1f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* https://blog.csdn.net/smartcat2010/article/details/105167981 https://developer.nvidia.com/blog/cuda-graphs/ */ // #define NSTEP 1000 #define NKERNEL 20 // start CPU wallclock timer for(int istep=0; istep<NSTEP; istep++){ for(int ikrnl=0; ikrnl<NKERNEL; ikrnl++){ hipLaunchKernelGGL(( shortKernel), dim3(blocks), dim3(threads), 0, stream, out_d, in_d); hipStreamSynchronize(stream); } } //end CPU wallclock time /* 9.6skernel2.9us kernel-->kernel--> devicehostCPUdevice; cudaMemcpy hipError_t hipMemcpyAsync(void* dst, const void* src, size_t count,hipMemcpyKind kind, hipStream_t stream = 0); stream */ // // start wallclock timer for(int istep=0; istep<NSTEP; istep++){ for(int ikrnl=0; ikrnl<NKERNEL; ikrnl++){ hipLaunchKernelGGL(( shortKernel), dim3(blocks), dim3(threads), 0, stream, out_d, in_d); } hipStreamSynchronize(stream); } //end wallclock timer /* 3.8skernel2.9us kernelkernel kernel */ //Graph bool graphCreated=false; hipGraph_t graph; hipGraphExec_t instance; for(int istep=0; istep<NSTEP; istep++){ if(!graphCreated){ hipStreamBeginCapture(stream, hipStreamCaptureModeGlobal); for(int ikrnl=0; ikrnl<NKERNEL; ikrnl++){ hipLaunchKernelGGL(( shortKernel), dim3(blocks), dim3(threads), 0, stream, out_d, in_d); } hipStreamEndCapture(stream, &graph); hipGraphInstantiate(&instance, graph, NULL, NULL, 0); graphCreated=true; } hipGraphLaunch(instance, stream); hipStreamSynchronize(stream); } //3.4skernel2.9us //graphgraphgraph
f4515f9da8ea33b699f06fb662cbb19eaa0fb1f6.cu
/* 源链接: https://blog.csdn.net/smartcat2010/article/details/105167981 https://developer.nvidia.com/blog/cuda-graphs/ */ //初始 #define NSTEP 1000 #define NKERNEL 20 // start CPU wallclock timer for(int istep=0; istep<NSTEP; istep++){ for(int ikrnl=0; ikrnl<NKERNEL; ikrnl++){ shortKernel<<<blocks, threads, 0, stream>>>(out_d, in_d); cudaStreamSynchronize(stream); } } //end CPU wallclock time /* 总共平均耗时9.6μs;kernel执行耗时2.9us; 缺点:启动kernel-->执行kernel-->等待执行完; device和host是异步的,当CPU调用device函数后就返回了; cudaMemcpy函数是个同步函数。 cudaError_t cudaMemcpyAsync(void* dst, const void* src, size_t count,cudaMemcpyKind kind, cudaStream_t stream = 0); 值得注意的就是最后一个参数,stream表示流,一般情况设置为默认流,这个函数和主机是异步的,执行后控制权立刻归还主机, */ //改进 // start wallclock timer for(int istep=0; istep<NSTEP; istep++){ for(int ikrnl=0; ikrnl<NKERNEL; ikrnl++){ shortKernel<<<blocks, threads, 0, stream>>>(out_d, in_d); } cudaStreamSynchronize(stream); } //end wallclock timer /* 总共平均耗时3.8μs;kernel执行耗时2.9us; 优点:启动下一个kernel和执行上一个kernel,能够并行起来; 缺点:每个kernel还得启动一次; */ //Graph优化版本: bool graphCreated=false; cudaGraph_t graph; cudaGraphExec_t instance; for(int istep=0; istep<NSTEP; istep++){ if(!graphCreated){ cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal); for(int ikrnl=0; ikrnl<NKERNEL; ikrnl++){ shortKernel<<<blocks, threads, 0, stream>>>(out_d, in_d); } cudaStreamEndCapture(stream, &graph); cudaGraphInstantiate(&instance, graph, NULL, NULL, 0); graphCreated=true; } cudaGraphLaunch(instance, stream); cudaStreamSynchronize(stream); } //总共平均耗时3.4μs;kernel执行耗时2.9us; //优点:整个graph启动一次;头一次构建graph慢,但是后面的迭代就可以复用该graph了;
4a5c36f07a9f532b99e7853a10aab5b29dea8524.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef SCATTER_IMPL_CU #define SCATTER_IMPL_CU //#include <cutil.h> #include "QP_Utility.cuh" #include "scatterImpl.cuh" #include "GPUPrimitive_Def.cu" __global__ void optScatter( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S) { const int by = blockIdx.y; const int bx = blockIdx.x; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid=tx+ty*blockDim.x; const int bid=bx+by*gridDim.x; const int numThread=blockDim.x; const int resultID=(bid)*numThread+tid; int targetLoc=0; for(int pos=resultID;pos<rLen;pos+=delta) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) d_S[targetLoc]=d_R[pos]; } } __global__ void optGather( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S, int sLen) { const int by = blockIdx.y; const int bx = blockIdx.x; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid=tx+ty*blockDim.x; const int bid=bx+by*gridDim.x; const int numThread=blockDim.x; const int resultID=(bid)*numThread+tid; int targetLoc=0; for(int pos=resultID;pos<sLen;pos+=delta) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) d_S[pos]=d_R[targetLoc]; } } #ifndef COALESCED __global__ void optScatter_noCoalesced( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S) { int numThread = blockDim.x; int numBlock = gridDim.x; int tid = blockIdx.x*numThread + threadIdx.x; int len = rLen/( numThread*numBlock ); int start = tid*len; int end = start + len; __syncthreads(); int targetLoc = 0; for( int pos = start; pos < end; pos++ ) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) { d_S[targetLoc].x = d_R[pos].x; d_S[targetLoc].y = d_R[pos].y; } __syncthreads(); } } __global__ void optGather_noCoalesced( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S, int sLen) { int numThread = blockDim.x; int numBlock = gridDim.x; int tid = blockIdx.x*numThread + threadIdx.x; int len = rLen/( numThread*numBlock ); int start = tid*len; int end = start + len; //__syncthreads(); int targetLoc = 0; for( int pos = start; pos < end; pos++ ) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) d_S[pos]=d_R[targetLoc]; } } #endif void scatterImpl(Record *d_R, int rLen, int *d_loc, Record *d_S, int numThreadPB=256, int numBlock=512) { int numRun=8; if(rLen<256*1024) numRun=1; else if(rLen<1024*1024) numRun=2; else if(rLen<8192*1024) numRun=4; int runSize=rLen/numRun; if(rLen%numRun!=0) runSize+=1; printf("run, %d\n", numRun); int from, to; int numThreadsPerBlock_x=numThreadPB; int numThreadsPerBlock_y=1; int numBlock_x=numBlock; int numBlock_y=1; int numThread=numBlock_x*numThreadsPerBlock_x; dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1); dim3 grid( numBlock_x, numBlock_y , 1); #ifdef COALESCED printf( "YES, COALESCED, scatter\n" ); #else printf( "NO COALESCED, scatter\n" ); #endif for(int i=0;i<numRun;i++) { from=i*runSize; to=(i+1)*runSize; #ifdef COALESCED hipLaunchKernelGGL(( optScatter), dim3(grid),dim3(thread), 0, 0, d_R,numThread, rLen, d_loc,from, to, d_S); #else hipLaunchKernelGGL(( optScatter_noCoalesced), dim3(grid),dim3(thread), 0, 0, d_R,numThread, rLen, d_loc,from, to, d_S); #endif CUDA_SAFE_CALL(hipDeviceSynchronize()); } } void scatterImpl_forPart(Record *d_R, int rLen, int numPart, int *d_loc, Record *d_S) { int numRun=8; if(numPart<=8) numRun=1; else if(numPart<=16) numRun=2; else if(numPart<=32) numRun=4; else numRun=8; int runSize=rLen/numRun; if(rLen%numRun!=0) runSize+=1; printf("run, %d\n", numRun); int from, to; int numThreadsPerBlock_x=256; int numThreadsPerBlock_y=1; int numBlock_x=512; int numBlock_y=1; int numThread=numBlock_x*numThreadsPerBlock_x; dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1); dim3 grid( numBlock_x, numBlock_y , 1); for(int i=0;i<numRun;i++) { from=i*runSize; to=(i+1)*runSize; hipLaunchKernelGGL(( optScatter), dim3(grid),dim3(thread), 0, 0, d_R,numThread, rLen, d_loc,from, to, d_S); } } void gatherImpl(Record *d_R, int rLen, int *d_loc, Record *d_S, int sLen, int numThreadsPerBlock_x = 32, int numBlock_x = 64) { int numRun=8; if(sLen<256*1024) numRun=1; else if(sLen<1024*1024) numRun=2; else if(sLen<8192*1024) numRun=4; printf("run, %d\n", numRun); int runSize=rLen/numRun; if(rLen%numRun!=0) runSize+=1; int from, to; //int numThreadsPerBlock_x=256; int numThreadsPerBlock_y=1; //int numBlock_x=512; int numBlock_y=1; int numThread=numBlock_x*numThreadsPerBlock_x; dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1); dim3 grid( numBlock_x, numBlock_y , 1); #ifdef COALESCED printf( "YES, COALESCED, \n" ); #else printf( "NO COALESCED, \n" ); #endif for(int i=0;i<numRun;i++) { from=i*runSize; to=(i+1)*runSize; #ifdef COALESCED hipLaunchKernelGGL(( optGather), dim3(grid),dim3(thread), 0, 0, d_R,numThread, rLen, d_loc,from, to, d_S,sLen); #else hipLaunchKernelGGL(( optGather_noCoalesced), dim3(grid),dim3(thread), 0, 0, d_R,numThread, rLen, d_loc,from, to, d_S,sLen); #endif CUDA_SAFE_CALL(hipDeviceSynchronize()); } } #endif
4a5c36f07a9f532b99e7853a10aab5b29dea8524.cu
#ifndef SCATTER_IMPL_CU #define SCATTER_IMPL_CU //#include <cutil.h> #include "QP_Utility.cuh" #include "scatterImpl.cuh" #include "GPUPrimitive_Def.cu" __global__ void optScatter( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S) { const int by = blockIdx.y; const int bx = blockIdx.x; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid=tx+ty*blockDim.x; const int bid=bx+by*gridDim.x; const int numThread=blockDim.x; const int resultID=(bid)*numThread+tid; int targetLoc=0; for(int pos=resultID;pos<rLen;pos+=delta) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) d_S[targetLoc]=d_R[pos]; } } __global__ void optGather( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S, int sLen) { const int by = blockIdx.y; const int bx = blockIdx.x; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tid=tx+ty*blockDim.x; const int bid=bx+by*gridDim.x; const int numThread=blockDim.x; const int resultID=(bid)*numThread+tid; int targetLoc=0; for(int pos=resultID;pos<sLen;pos+=delta) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) d_S[pos]=d_R[targetLoc]; } } #ifndef COALESCED __global__ void optScatter_noCoalesced( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S) { int numThread = blockDim.x; int numBlock = gridDim.x; int tid = blockIdx.x*numThread + threadIdx.x; int len = rLen/( numThread*numBlock ); int start = tid*len; int end = start + len; __syncthreads(); int targetLoc = 0; for( int pos = start; pos < end; pos++ ) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) { d_S[targetLoc].x = d_R[pos].x; d_S[targetLoc].y = d_R[pos].y; } __syncthreads(); } } __global__ void optGather_noCoalesced( Record *d_R, int delta, int rLen, int *loc, int from, int to, Record *d_S, int sLen) { int numThread = blockDim.x; int numBlock = gridDim.x; int tid = blockIdx.x*numThread + threadIdx.x; int len = rLen/( numThread*numBlock ); int start = tid*len; int end = start + len; //__syncthreads(); int targetLoc = 0; for( int pos = start; pos < end; pos++ ) { targetLoc=loc[pos]; if(targetLoc>=from && targetLoc<to) d_S[pos]=d_R[targetLoc]; } } #endif void scatterImpl(Record *d_R, int rLen, int *d_loc, Record *d_S, int numThreadPB=256, int numBlock=512) { int numRun=8; if(rLen<256*1024) numRun=1; else if(rLen<1024*1024) numRun=2; else if(rLen<8192*1024) numRun=4; int runSize=rLen/numRun; if(rLen%numRun!=0) runSize+=1; printf("run, %d\n", numRun); int from, to; int numThreadsPerBlock_x=numThreadPB; int numThreadsPerBlock_y=1; int numBlock_x=numBlock; int numBlock_y=1; int numThread=numBlock_x*numThreadsPerBlock_x; dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1); dim3 grid( numBlock_x, numBlock_y , 1); #ifdef COALESCED printf( "YES, COALESCED, scatter\n" ); #else printf( "NO COALESCED, scatter\n" ); #endif for(int i=0;i<numRun;i++) { from=i*runSize; to=(i+1)*runSize; #ifdef COALESCED optScatter<<<grid,thread>>>(d_R,numThread, rLen, d_loc,from, to, d_S); #else optScatter_noCoalesced<<<grid,thread>>>(d_R,numThread, rLen, d_loc,from, to, d_S); #endif CUDA_SAFE_CALL(cudaThreadSynchronize()); } } void scatterImpl_forPart(Record *d_R, int rLen, int numPart, int *d_loc, Record *d_S) { int numRun=8; if(numPart<=8) numRun=1; else if(numPart<=16) numRun=2; else if(numPart<=32) numRun=4; else numRun=8; int runSize=rLen/numRun; if(rLen%numRun!=0) runSize+=1; printf("run, %d\n", numRun); int from, to; int numThreadsPerBlock_x=256; int numThreadsPerBlock_y=1; int numBlock_x=512; int numBlock_y=1; int numThread=numBlock_x*numThreadsPerBlock_x; dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1); dim3 grid( numBlock_x, numBlock_y , 1); for(int i=0;i<numRun;i++) { from=i*runSize; to=(i+1)*runSize; optScatter<<<grid,thread>>>(d_R,numThread, rLen, d_loc,from, to, d_S); } } void gatherImpl(Record *d_R, int rLen, int *d_loc, Record *d_S, int sLen, int numThreadsPerBlock_x = 32, int numBlock_x = 64) { int numRun=8; if(sLen<256*1024) numRun=1; else if(sLen<1024*1024) numRun=2; else if(sLen<8192*1024) numRun=4; printf("run, %d\n", numRun); int runSize=rLen/numRun; if(rLen%numRun!=0) runSize+=1; int from, to; //int numThreadsPerBlock_x=256; int numThreadsPerBlock_y=1; //int numBlock_x=512; int numBlock_y=1; int numThread=numBlock_x*numThreadsPerBlock_x; dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1); dim3 grid( numBlock_x, numBlock_y , 1); #ifdef COALESCED printf( "YES, COALESCED, \n" ); #else printf( "NO COALESCED, \n" ); #endif for(int i=0;i<numRun;i++) { from=i*runSize; to=(i+1)*runSize; #ifdef COALESCED optGather<<<grid,thread>>>(d_R,numThread, rLen, d_loc,from, to, d_S,sLen); #else optGather_noCoalesced<<<grid,thread>>>(d_R,numThread, rLen, d_loc,from, to, d_S,sLen); #endif CUDA_SAFE_CALL(cudaThreadSynchronize()); } } #endif
f312e13890a0f28ef659f1d50d52c2296e5ebdf2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cum_minmax_impl.cuh" #include <hipcub/hipcub.hpp> #include <thrust/functional.h> #include <thrust/tuple.h> #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> #include <algorithm> #include <limits> #include "include/hip/hip_fp16.h" #include "plugin/device/cpu/kernel/nnacl/op_base.h" namespace { template <typename DataType> DataType NumericMax() { return std::numeric_limits<DataType>::max(); } template <typename DataType> DataType NumericMin() { return std::numeric_limits<DataType>::lowest(); } template <> half NumericMax<half>() { constexpr uint16_t x = 0x7BFF; return half(__half_raw{x}); } template <> half NumericMin<half>() { constexpr uint16_t x = 0xFBFF; return half(__half_raw{x}); } int GetMaxGridDimY(const uint32_t &device_id) { int max_size = 1 << 16; (void)hipDeviceGetAttribute(&max_size, hipDeviceAttributeMaxGridDimY, static_cast<int>(device_id)); return max_size; } } // namespace template <typename DataType> __device__ __forceinline__ bool IsNan(const DataType &x) { return isnan(x); } __device__ __forceinline__ bool IsNan(const half &x) { return __hisnan(x); } template <typename BinaryOp, typename DataType> struct BinaryFunctor { BinaryOp op_; __device__ __forceinline__ bool operator()(DataType lhs, DataType rhs) { return (IsNan(lhs) || !op_(rhs, lhs)) && !IsNan(rhs); } }; template <typename BinaryFunctor, typename TupleType> struct BlockScanFunctor { BinaryFunctor functor_; explicit BlockScanFunctor(BinaryFunctor functor) : functor_(functor) {} __device__ __forceinline__ TupleType operator()(TupleType lhs, TupleType rhs) { return functor_(thrust::get<0>(lhs), thrust::get<0>(rhs)) ? lhs : rhs; } }; // Inspired by cub documentation. template <typename BlockScanFunctor, typename TupleType> struct BlockPrefixCallbackFunctor { BlockScanFunctor functor_; TupleType block_aggregate_; // Constructor __device__ BlockPrefixCallbackFunctor(BlockScanFunctor functor, TupleType block_aggregate) : functor_(functor), block_aggregate_(block_aggregate) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ __forceinline__ TupleType operator()(TupleType block_aggregate) { TupleType old_block_aggregate = block_aggregate_; block_aggregate_ = functor_(old_block_aggregate, block_aggregate); return old_block_aggregate; } }; #ifndef _WIN32 template <typename BlockScanFunctor, typename ValueType, typename IndexType, uint BlockDim> __global__ void LargeBlockScanKernel(BlockScanFunctor functor, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, uint axis_size, uint inner_size, uint axis_inner_size, uint outer_inner_size, ValueType init) { typedef thrust::tuple<ValueType, IndexType> DataType; typedef hipcub::BlockScan<DataType, BlockDim> BlockScan; __shared__ typename BlockScan::TempStorage share_data; for (uint bid = blockIdx.x; bid < outer_inner_size; bid += gridDim.x) { uint outer_idx = bid / inner_size; uint inner_idx = bid % inner_size; DataType init_data{init, 0}; BlockPrefixCallbackFunctor<BlockScanFunctor, DataType> cb_functor{functor, init_data}; uint axis_idx = threadIdx.x; uint axis_offset = outer_idx * axis_inner_size + inner_idx + axis_idx * inner_size; for (uint block_offset = 0; block_offset < axis_size; block_offset += BlockDim) { DataType thread_data = init_data; if (axis_idx < axis_size) { thread_data = thrust::make_tuple(input_ptr[axis_offset], axis_idx); } BlockScan(share_data).template InclusiveScan(thread_data, thread_data, functor, cb_functor); __syncthreads(); if (axis_idx < axis_size) { thrust::tie(value_ptr[axis_offset], index_ptr[axis_offset]) = thread_data; } axis_idx += BlockDim; axis_offset += BlockDim * inner_size; } } } #endif template <typename BlockScanFunctor, typename ValueType, typename IndexType, uint BlockDimX, uint BlockDimY> __global__ void ScanInnerMostDimKernel(BlockScanFunctor functor, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, uint outer_size, uint axis_size, ValueType init) { typedef thrust::tuple<ValueType, IndexType> DataType; constexpr uint scan_per_block = BlockDimX * 2; __shared__ ValueType share_value[BlockDimY][scan_per_block]; __shared__ IndexType share_index[BlockDimY][scan_per_block]; auto share_value_ptr = share_value[threadIdx.y]; auto share_index_ptr = share_index[threadIdx.y]; for (uint bid = blockIdx.x * blockDim.y; bid < outer_size; bid += gridDim.x * blockDim.y) { uint outer_idx = bid + threadIdx.y; bool is_valid = outer_idx < outer_size; uint offset = outer_idx * axis_size; DataType block_data{init, 0}; // The following parallel scan algorithm refers to: // Figure 9.7 from David B. Kirk, et al. 'Programming Massively Parallel Processors'. for (uint i = 0; i < axis_size; i += scan_per_block) { // Initializing share memory with input value, and each thread process two elements. uint idx1 = threadIdx.x + i; uint idx2 = idx1 + BlockDimX; if (is_valid) { if (idx1 < axis_size) { share_value_ptr[threadIdx.x] = input_ptr[offset + idx1]; share_index_ptr[threadIdx.x] = idx1; } else { share_value_ptr[threadIdx.x] = init; } if (idx2 < axis_size) { share_value_ptr[threadIdx.x + BlockDimX] = input_ptr[offset + idx2]; share_index_ptr[threadIdx.x + BlockDimX] = idx2; } else { share_value_ptr[threadIdx.x + BlockDimX] = init; } // update with previous block result. if (threadIdx.x == 0) { thrust::tie(share_value_ptr[0], share_index_ptr[0]) = functor(thrust::make_tuple(share_value_ptr[0], share_index_ptr[0]), block_data); } } // up-sweep for (uint stride = 1; stride < scan_per_block; stride <<= 1) { uint index = (threadIdx.x + 1) * (stride << 1) - 1; if (is_valid && index < scan_per_block) { thrust::tie(share_value_ptr[index], share_index_ptr[index]) = functor(thrust::make_tuple(share_value_ptr[index - stride], share_index_ptr[index - stride]), thrust::make_tuple(share_value_ptr[index], share_index_ptr[index])); } } // down-sweep for (uint stride = scan_per_block >> 2; stride > 0; stride >>= 1) { uint index = (threadIdx.x + 1) * (stride << 1) - 1; if (is_valid && index + stride < scan_per_block) { thrust::tie(share_value_ptr[index + stride], share_index_ptr[index + stride]) = functor(thrust::make_tuple(share_value_ptr[index], share_index_ptr[index]), thrust::make_tuple(share_value_ptr[index + stride], share_index_ptr[index + stride])); } } // write to output. if (is_valid) { if (idx1 < axis_size) { value_ptr[offset + idx1] = share_value_ptr[threadIdx.x]; index_ptr[offset + idx1] = share_index_ptr[threadIdx.x]; } if (idx2 < axis_size) { value_ptr[offset + idx2] = share_value_ptr[threadIdx.x + BlockDimX]; index_ptr[offset + idx2] = share_index_ptr[threadIdx.x + BlockDimX]; } // update block_data block_data = thrust::make_tuple(share_value_ptr[scan_per_block - 1], share_index_ptr[scan_per_block - 1]); } } } } template <typename BlockScanFunctor, typename ValueType, typename IndexType> __global__ void ScanOuterDimKernel(BlockScanFunctor functor, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, uint axis_size, uint inner_size, uint axis_inner_size, uint outer_inner_size, ValueType init) { typedef thrust::tuple<ValueType, IndexType> DataType; for (uint bid = blockIdx.x * blockDim.x + threadIdx.x; bid < outer_inner_size; bid += gridDim.x * blockDim.x) { uint outer_idx = bid / inner_size; uint inner_idx = bid % inner_size; DataType out{init, 0}; uint offset = outer_idx * axis_inner_size + inner_idx; for (uint i = 0; i < axis_size; i++) { DataType thread_data = thrust::make_tuple(input_ptr[offset], i); out = functor(out, thread_data); thrust::tie(value_ptr[offset], index_ptr[offset]) = out; offset += inner_size; } } } template <typename BinaryFunctor, typename ValueType, typename IndexType> void KernelHelper(BinaryFunctor functor, ValueType init, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, size_t outer_size_st, size_t axis_size_st, size_t inner_size_st, const uint32_t &device_id, hipStream_t cuda_stream) { auto outer_size = static_cast<uint>(outer_size_st); auto inner_size = static_cast<uint>(inner_size_st); auto axis_size = static_cast<uint>(axis_size_st); auto outer_inner_size = outer_size * inner_size; auto axis_inner_size = axis_size * inner_size; uint max_grid_size = GetMaxGridDimY(device_id); typedef BlockScanFunctor<BinaryFunctor, thrust::tuple<ValueType, IndexType>> BlockScanFunctor; BlockScanFunctor scan_op{functor}; #if defined(CUB_VERSION) && (CUB_VERSION > 100800) && !defined(_WIN32) // Special case where only one dimension that needs to compute, so using cub library is the most efficient way. if (outer_size == 1 && inner_size == 1) { // Using thrust::zip_iterator to make an iterator for (ValueType, IndexType). hipcub::CountingInputIterator<IndexType> count_iter(0); typedef typename thrust::detail::normal_iterator<const ValueType *> InputValueIterator; typedef hipcub::CountingInputIterator<IndexType> InputIndexIterator; typedef thrust::zip_iterator<thrust::tuple<InputValueIterator, InputIndexIterator>> InputZipIterator; InputZipIterator input_iter(thrust::make_tuple(input_ptr, count_iter)); typedef typename thrust::detail::normal_iterator<ValueType *> OutputValueIterator; typedef typename thrust::detail::normal_iterator<IndexType *> OutputIndexIterator; typedef thrust::zip_iterator<thrust::tuple<OutputValueIterator, OutputIndexIterator>> OutputZipIterator; OutputZipIterator output_iter(thrust::make_tuple(value_ptr, index_ptr)); // Calculate the size of temporary storage. size_t temp_storage_bytes = 0; (void)hipcub::DeviceScan::InclusiveScan(nullptr, temp_storage_bytes, input_iter, output_iter, scan_op, axis_size, cuda_stream); // Allocate temporary storage. char *temp_storage_ptr = nullptr; (void)hipMalloc(&temp_storage_ptr, temp_storage_bytes); // Core computation process. (void)hipcub::DeviceScan::InclusiveScan(temp_storage_ptr, temp_storage_bytes, input_iter, output_iter, scan_op, axis_size, cuda_stream); (void)hipFree(temp_storage_ptr); return; } // When computing capacity of CUDA is not recommended (<7), we instead use self-implemented scan algorithm. // Otherwise, we use hipcub::BlockScan, which is faster than self-implemented one. const int major_sm = GET_MAJOR_SM; const bool check_sm = mindspore::device::gpu::CudaCommon::GetInstance().check_sm(); constexpr uint threshold_large_scan_dim = 500; if (!(check_sm && major_sm < RECOMMEND_SM) && axis_size > threshold_large_scan_dim) { constexpr uint block_dim = 512; uint grid_x = ::min(outer_inner_size, max_grid_size); dim3 block{block_dim}; dim3 grid{grid_x}; hipLaunchKernelGGL(( LargeBlockScanKernel<BlockScanFunctor, ValueType, IndexType, block_dim>), dim3(grid), dim3(block), 0, cuda_stream, scan_op, input_ptr, value_ptr, index_ptr, axis_size, inner_size, axis_inner_size, outer_inner_size, init); return; } #endif if (inner_size == 1) { constexpr uint block_dim_x = 32; constexpr uint block_dim_y = 16; // The reason why x-dimension of block is set to 32: // Each thread process 2 elements, so each x-dimension of block process 64 elements. An obvious advantage is no // bank conflict. In addition, we don't need `__syncthreads`, since 32 is equal to warp size. uint grid_x = ::min(UP_DIV(outer_size, block_dim_y), max_grid_size); dim3 block = {block_dim_x, block_dim_y}; dim3 grid = {grid_x}; hipLaunchKernelGGL(( ScanInnerMostDimKernel<BlockScanFunctor, ValueType, IndexType, block_dim_x, block_dim_y>) , dim3(grid), dim3(block), 0, cuda_stream, scan_op, input_ptr, value_ptr, index_ptr, outer_size, axis_size, init); } else { constexpr uint block_dim = 512; uint grid_x = ::min(UP_DIV(outer_inner_size, block_dim), max_grid_size); dim3 block{block_dim}; dim3 grid{grid_x}; hipLaunchKernelGGL(( ScanOuterDimKernel), dim3(grid), dim3(block), 0, cuda_stream, scan_op, input_ptr, value_ptr, index_ptr, axis_size, inner_size, axis_inner_size, outer_inner_size, init); } } template <typename DataType, typename IndexType> hipError_t CumMinMax(CumOpType cum_op_type, const DataType *input_ptr, DataType *value_ptr, IndexType *index_ptr, size_t outer_size_st, size_t axis_size_st, size_t inner_size_st, const uint32_t &device_id, hipStream_t cuda_stream) { switch (cum_op_type) { case CUMMIN: { KernelHelper(BinaryFunctor<thrust::less_equal<DataType>, DataType>{}, NumericMax<DataType>(), input_ptr, value_ptr, index_ptr, outer_size_st, axis_size_st, inner_size_st, device_id, cuda_stream); break; } case CUMMAX: { KernelHelper(BinaryFunctor<thrust::greater_equal<DataType>, DataType>{}, NumericMin<DataType>(), input_ptr, value_ptr, index_ptr, outer_size_st, axis_size_st, inner_size_st, device_id, cuda_stream); break; } default: break; } CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT hipError_t CumMinMax<int8_t, int32_t>(CumOpType cum_op_type, const int8_t *input_ptr, int8_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<int16_t, int32_t>(CumOpType cum_op_type, const int16_t *input_ptr, int16_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<int32_t, int32_t>(CumOpType cum_op_type, const int32_t *input_ptr, int32_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<int64_t, int32_t>(CumOpType cum_op_type, const int64_t *input_ptr, int64_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint8_t, int32_t>(CumOpType cum_op_type, const uint8_t *input_ptr, uint8_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint16_t, int32_t>(CumOpType cum_op_type, const uint16_t *input_ptr, uint16_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint32_t, int32_t>(CumOpType cum_op_type, const uint32_t *input_ptr, uint32_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint64_t, int32_t>(CumOpType cum_op_type, const uint64_t *input_ptr, uint64_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<half, int32_t>(CumOpType cum_op_type, const half *input_ptr, half *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<float, int32_t>(CumOpType cum_op_type, const float *input_ptr, float *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<double, int32_t>(CumOpType cum_op_type, const double *input_ptr, double *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<int8_t, int64_t>(CumOpType cum_op_type, const int8_t *input_ptr, int8_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<int16_t, int64_t>(CumOpType cum_op_type, const int16_t *input_ptr, int16_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<int32_t, int64_t>(CumOpType cum_op_type, const int32_t *input_ptr, int32_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<int64_t, int64_t>(CumOpType cum_op_type, const int64_t *input_ptr, int64_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint8_t, int64_t>(CumOpType cum_op_type, const uint8_t *input_ptr, uint8_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint16_t, int64_t>(CumOpType cum_op_type, const uint16_t *input_ptr, uint16_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint32_t, int64_t>(CumOpType cum_op_type, const uint32_t *input_ptr, uint32_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<uint64_t, int64_t>(CumOpType cum_op_type, const uint64_t *input_ptr, uint64_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<half, int64_t>(CumOpType cum_op_type, const half *input_ptr, half *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<float, int64_t>(CumOpType cum_op_type, const float *input_ptr, float *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT hipError_t CumMinMax<double, int64_t>(CumOpType cum_op_type, const double *input_ptr, double *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, hipStream_t cuda_stream);
f312e13890a0f28ef659f1d50d52c2296e5ebdf2.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/cum_minmax_impl.cuh" #include <cub/cub.cuh> #include <thrust/functional.h> #include <thrust/tuple.h> #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> #include <algorithm> #include <limits> #include "include/cuda_fp16.h" #include "plugin/device/cpu/kernel/nnacl/op_base.h" namespace { template <typename DataType> DataType NumericMax() { return std::numeric_limits<DataType>::max(); } template <typename DataType> DataType NumericMin() { return std::numeric_limits<DataType>::lowest(); } template <> half NumericMax<half>() { constexpr uint16_t x = 0x7BFF; return half(__half_raw{x}); } template <> half NumericMin<half>() { constexpr uint16_t x = 0xFBFF; return half(__half_raw{x}); } int GetMaxGridDimY(const uint32_t &device_id) { int max_size = 1 << 16; (void)cudaDeviceGetAttribute(&max_size, cudaDevAttrMaxGridDimY, static_cast<int>(device_id)); return max_size; } } // namespace template <typename DataType> __device__ __forceinline__ bool IsNan(const DataType &x) { return isnan(x); } __device__ __forceinline__ bool IsNan(const half &x) { return __hisnan(x); } template <typename BinaryOp, typename DataType> struct BinaryFunctor { BinaryOp op_; __device__ __forceinline__ bool operator()(DataType lhs, DataType rhs) { return (IsNan(lhs) || !op_(rhs, lhs)) && !IsNan(rhs); } }; template <typename BinaryFunctor, typename TupleType> struct BlockScanFunctor { BinaryFunctor functor_; explicit BlockScanFunctor(BinaryFunctor functor) : functor_(functor) {} __device__ __forceinline__ TupleType operator()(TupleType lhs, TupleType rhs) { return functor_(thrust::get<0>(lhs), thrust::get<0>(rhs)) ? lhs : rhs; } }; // Inspired by cub documentation. template <typename BlockScanFunctor, typename TupleType> struct BlockPrefixCallbackFunctor { BlockScanFunctor functor_; TupleType block_aggregate_; // Constructor __device__ BlockPrefixCallbackFunctor(BlockScanFunctor functor, TupleType block_aggregate) : functor_(functor), block_aggregate_(block_aggregate) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ __forceinline__ TupleType operator()(TupleType block_aggregate) { TupleType old_block_aggregate = block_aggregate_; block_aggregate_ = functor_(old_block_aggregate, block_aggregate); return old_block_aggregate; } }; #ifndef _WIN32 template <typename BlockScanFunctor, typename ValueType, typename IndexType, uint BlockDim> __global__ void LargeBlockScanKernel(BlockScanFunctor functor, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, uint axis_size, uint inner_size, uint axis_inner_size, uint outer_inner_size, ValueType init) { typedef thrust::tuple<ValueType, IndexType> DataType; typedef cub::BlockScan<DataType, BlockDim> BlockScan; __shared__ typename BlockScan::TempStorage share_data; for (uint bid = blockIdx.x; bid < outer_inner_size; bid += gridDim.x) { uint outer_idx = bid / inner_size; uint inner_idx = bid % inner_size; DataType init_data{init, 0}; BlockPrefixCallbackFunctor<BlockScanFunctor, DataType> cb_functor{functor, init_data}; uint axis_idx = threadIdx.x; uint axis_offset = outer_idx * axis_inner_size + inner_idx + axis_idx * inner_size; for (uint block_offset = 0; block_offset < axis_size; block_offset += BlockDim) { DataType thread_data = init_data; if (axis_idx < axis_size) { thread_data = thrust::make_tuple(input_ptr[axis_offset], axis_idx); } BlockScan(share_data).template InclusiveScan(thread_data, thread_data, functor, cb_functor); __syncthreads(); if (axis_idx < axis_size) { thrust::tie(value_ptr[axis_offset], index_ptr[axis_offset]) = thread_data; } axis_idx += BlockDim; axis_offset += BlockDim * inner_size; } } } #endif template <typename BlockScanFunctor, typename ValueType, typename IndexType, uint BlockDimX, uint BlockDimY> __global__ void ScanInnerMostDimKernel(BlockScanFunctor functor, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, uint outer_size, uint axis_size, ValueType init) { typedef thrust::tuple<ValueType, IndexType> DataType; constexpr uint scan_per_block = BlockDimX * 2; __shared__ ValueType share_value[BlockDimY][scan_per_block]; __shared__ IndexType share_index[BlockDimY][scan_per_block]; auto share_value_ptr = share_value[threadIdx.y]; auto share_index_ptr = share_index[threadIdx.y]; for (uint bid = blockIdx.x * blockDim.y; bid < outer_size; bid += gridDim.x * blockDim.y) { uint outer_idx = bid + threadIdx.y; bool is_valid = outer_idx < outer_size; uint offset = outer_idx * axis_size; DataType block_data{init, 0}; // The following parallel scan algorithm refers to: // Figure 9.7 from David B. Kirk, et al. 'Programming Massively Parallel Processors'. for (uint i = 0; i < axis_size; i += scan_per_block) { // Initializing share memory with input value, and each thread process two elements. uint idx1 = threadIdx.x + i; uint idx2 = idx1 + BlockDimX; if (is_valid) { if (idx1 < axis_size) { share_value_ptr[threadIdx.x] = input_ptr[offset + idx1]; share_index_ptr[threadIdx.x] = idx1; } else { share_value_ptr[threadIdx.x] = init; } if (idx2 < axis_size) { share_value_ptr[threadIdx.x + BlockDimX] = input_ptr[offset + idx2]; share_index_ptr[threadIdx.x + BlockDimX] = idx2; } else { share_value_ptr[threadIdx.x + BlockDimX] = init; } // update with previous block result. if (threadIdx.x == 0) { thrust::tie(share_value_ptr[0], share_index_ptr[0]) = functor(thrust::make_tuple(share_value_ptr[0], share_index_ptr[0]), block_data); } } // up-sweep for (uint stride = 1; stride < scan_per_block; stride <<= 1) { uint index = (threadIdx.x + 1) * (stride << 1) - 1; if (is_valid && index < scan_per_block) { thrust::tie(share_value_ptr[index], share_index_ptr[index]) = functor(thrust::make_tuple(share_value_ptr[index - stride], share_index_ptr[index - stride]), thrust::make_tuple(share_value_ptr[index], share_index_ptr[index])); } } // down-sweep for (uint stride = scan_per_block >> 2; stride > 0; stride >>= 1) { uint index = (threadIdx.x + 1) * (stride << 1) - 1; if (is_valid && index + stride < scan_per_block) { thrust::tie(share_value_ptr[index + stride], share_index_ptr[index + stride]) = functor(thrust::make_tuple(share_value_ptr[index], share_index_ptr[index]), thrust::make_tuple(share_value_ptr[index + stride], share_index_ptr[index + stride])); } } // write to output. if (is_valid) { if (idx1 < axis_size) { value_ptr[offset + idx1] = share_value_ptr[threadIdx.x]; index_ptr[offset + idx1] = share_index_ptr[threadIdx.x]; } if (idx2 < axis_size) { value_ptr[offset + idx2] = share_value_ptr[threadIdx.x + BlockDimX]; index_ptr[offset + idx2] = share_index_ptr[threadIdx.x + BlockDimX]; } // update block_data block_data = thrust::make_tuple(share_value_ptr[scan_per_block - 1], share_index_ptr[scan_per_block - 1]); } } } } template <typename BlockScanFunctor, typename ValueType, typename IndexType> __global__ void ScanOuterDimKernel(BlockScanFunctor functor, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, uint axis_size, uint inner_size, uint axis_inner_size, uint outer_inner_size, ValueType init) { typedef thrust::tuple<ValueType, IndexType> DataType; for (uint bid = blockIdx.x * blockDim.x + threadIdx.x; bid < outer_inner_size; bid += gridDim.x * blockDim.x) { uint outer_idx = bid / inner_size; uint inner_idx = bid % inner_size; DataType out{init, 0}; uint offset = outer_idx * axis_inner_size + inner_idx; for (uint i = 0; i < axis_size; i++) { DataType thread_data = thrust::make_tuple(input_ptr[offset], i); out = functor(out, thread_data); thrust::tie(value_ptr[offset], index_ptr[offset]) = out; offset += inner_size; } } } template <typename BinaryFunctor, typename ValueType, typename IndexType> void KernelHelper(BinaryFunctor functor, ValueType init, const ValueType *input_ptr, ValueType *value_ptr, IndexType *index_ptr, size_t outer_size_st, size_t axis_size_st, size_t inner_size_st, const uint32_t &device_id, cudaStream_t cuda_stream) { auto outer_size = static_cast<uint>(outer_size_st); auto inner_size = static_cast<uint>(inner_size_st); auto axis_size = static_cast<uint>(axis_size_st); auto outer_inner_size = outer_size * inner_size; auto axis_inner_size = axis_size * inner_size; uint max_grid_size = GetMaxGridDimY(device_id); typedef BlockScanFunctor<BinaryFunctor, thrust::tuple<ValueType, IndexType>> BlockScanFunctor; BlockScanFunctor scan_op{functor}; #if defined(CUB_VERSION) && (CUB_VERSION > 100800) && !defined(_WIN32) // Special case where only one dimension that needs to compute, so using cub library is the most efficient way. if (outer_size == 1 && inner_size == 1) { // Using thrust::zip_iterator to make an iterator for (ValueType, IndexType). cub::CountingInputIterator<IndexType> count_iter(0); typedef typename thrust::detail::normal_iterator<const ValueType *> InputValueIterator; typedef cub::CountingInputIterator<IndexType> InputIndexIterator; typedef thrust::zip_iterator<thrust::tuple<InputValueIterator, InputIndexIterator>> InputZipIterator; InputZipIterator input_iter(thrust::make_tuple(input_ptr, count_iter)); typedef typename thrust::detail::normal_iterator<ValueType *> OutputValueIterator; typedef typename thrust::detail::normal_iterator<IndexType *> OutputIndexIterator; typedef thrust::zip_iterator<thrust::tuple<OutputValueIterator, OutputIndexIterator>> OutputZipIterator; OutputZipIterator output_iter(thrust::make_tuple(value_ptr, index_ptr)); // Calculate the size of temporary storage. size_t temp_storage_bytes = 0; (void)cub::DeviceScan::InclusiveScan(nullptr, temp_storage_bytes, input_iter, output_iter, scan_op, axis_size, cuda_stream); // Allocate temporary storage. char *temp_storage_ptr = nullptr; (void)cudaMalloc(&temp_storage_ptr, temp_storage_bytes); // Core computation process. (void)cub::DeviceScan::InclusiveScan(temp_storage_ptr, temp_storage_bytes, input_iter, output_iter, scan_op, axis_size, cuda_stream); (void)cudaFree(temp_storage_ptr); return; } // When computing capacity of CUDA is not recommended (<7), we instead use self-implemented scan algorithm. // Otherwise, we use cub::BlockScan, which is faster than self-implemented one. const int major_sm = GET_MAJOR_SM; const bool check_sm = mindspore::device::gpu::CudaCommon::GetInstance().check_sm(); constexpr uint threshold_large_scan_dim = 500; if (!(check_sm && major_sm < RECOMMEND_SM) && axis_size > threshold_large_scan_dim) { constexpr uint block_dim = 512; uint grid_x = std::min(outer_inner_size, max_grid_size); dim3 block{block_dim}; dim3 grid{grid_x}; LargeBlockScanKernel<BlockScanFunctor, ValueType, IndexType, block_dim><<<grid, block, 0, cuda_stream>>>( scan_op, input_ptr, value_ptr, index_ptr, axis_size, inner_size, axis_inner_size, outer_inner_size, init); return; } #endif if (inner_size == 1) { constexpr uint block_dim_x = 32; constexpr uint block_dim_y = 16; // The reason why x-dimension of block is set to 32: // Each thread process 2 elements, so each x-dimension of block process 64 elements. An obvious advantage is no // bank conflict. In addition, we don't need `__syncthreads`, since 32 is equal to warp size. uint grid_x = std::min(UP_DIV(outer_size, block_dim_y), max_grid_size); dim3 block = {block_dim_x, block_dim_y}; dim3 grid = {grid_x}; ScanInnerMostDimKernel<BlockScanFunctor, ValueType, IndexType, block_dim_x, block_dim_y> <<<grid, block, 0, cuda_stream>>>(scan_op, input_ptr, value_ptr, index_ptr, outer_size, axis_size, init); } else { constexpr uint block_dim = 512; uint grid_x = std::min(UP_DIV(outer_inner_size, block_dim), max_grid_size); dim3 block{block_dim}; dim3 grid{grid_x}; ScanOuterDimKernel<<<grid, block, 0, cuda_stream>>>(scan_op, input_ptr, value_ptr, index_ptr, axis_size, inner_size, axis_inner_size, outer_inner_size, init); } } template <typename DataType, typename IndexType> cudaError_t CumMinMax(CumOpType cum_op_type, const DataType *input_ptr, DataType *value_ptr, IndexType *index_ptr, size_t outer_size_st, size_t axis_size_st, size_t inner_size_st, const uint32_t &device_id, cudaStream_t cuda_stream) { switch (cum_op_type) { case CUMMIN: { KernelHelper(BinaryFunctor<thrust::less_equal<DataType>, DataType>{}, NumericMax<DataType>(), input_ptr, value_ptr, index_ptr, outer_size_st, axis_size_st, inner_size_st, device_id, cuda_stream); break; } case CUMMAX: { KernelHelper(BinaryFunctor<thrust::greater_equal<DataType>, DataType>{}, NumericMin<DataType>(), input_ptr, value_ptr, index_ptr, outer_size_st, axis_size_st, inner_size_st, device_id, cuda_stream); break; } default: break; } CHECK_CUDA_LAUNCH_SUCCESS(); } template CUDA_LIB_EXPORT cudaError_t CumMinMax<int8_t, int32_t>(CumOpType cum_op_type, const int8_t *input_ptr, int8_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<int16_t, int32_t>(CumOpType cum_op_type, const int16_t *input_ptr, int16_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<int32_t, int32_t>(CumOpType cum_op_type, const int32_t *input_ptr, int32_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<int64_t, int32_t>(CumOpType cum_op_type, const int64_t *input_ptr, int64_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint8_t, int32_t>(CumOpType cum_op_type, const uint8_t *input_ptr, uint8_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint16_t, int32_t>(CumOpType cum_op_type, const uint16_t *input_ptr, uint16_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint32_t, int32_t>(CumOpType cum_op_type, const uint32_t *input_ptr, uint32_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint64_t, int32_t>(CumOpType cum_op_type, const uint64_t *input_ptr, uint64_t *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<half, int32_t>(CumOpType cum_op_type, const half *input_ptr, half *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<float, int32_t>(CumOpType cum_op_type, const float *input_ptr, float *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<double, int32_t>(CumOpType cum_op_type, const double *input_ptr, double *value_ptr, int32_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<int8_t, int64_t>(CumOpType cum_op_type, const int8_t *input_ptr, int8_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<int16_t, int64_t>(CumOpType cum_op_type, const int16_t *input_ptr, int16_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<int32_t, int64_t>(CumOpType cum_op_type, const int32_t *input_ptr, int32_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<int64_t, int64_t>(CumOpType cum_op_type, const int64_t *input_ptr, int64_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint8_t, int64_t>(CumOpType cum_op_type, const uint8_t *input_ptr, uint8_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint16_t, int64_t>(CumOpType cum_op_type, const uint16_t *input_ptr, uint16_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint32_t, int64_t>(CumOpType cum_op_type, const uint32_t *input_ptr, uint32_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<uint64_t, int64_t>(CumOpType cum_op_type, const uint64_t *input_ptr, uint64_t *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<half, int64_t>(CumOpType cum_op_type, const half *input_ptr, half *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<float, int64_t>(CumOpType cum_op_type, const float *input_ptr, float *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT cudaError_t CumMinMax<double, int64_t>(CumOpType cum_op_type, const double *input_ptr, double *value_ptr, int64_t *index_ptr, size_t outer_size, size_t axis_size, size_t inner_size, const uint32_t &device_id, cudaStream_t cuda_stream);
ba9d2597bdd3e8f23190352f0d2e4b3155328887.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #include<math.h> #include<sys/time.h> #include<stdlib.h> __global__ void Matadd(char* A,int N) { } int main() { char newline = '\n'; FILE *fp; fp = fopen("HW7_1-1.txt","w"); for(int j=0;j<=25;j++) { hipEvent_t start1,stop1; float time1; int i; int N = pow(2,j); size_t size = N; printf ("\n The value of N is %d",N); hipEventCreate(&start1); hipEventCreate(&stop1); //allocate input matrices hA, hB, hC,refC in host memory char* hA = (char*)malloc(size); for(i=0;i<N;i++) { hA[i] = rand()%20-10; } //allocate memory on the device (GPU) char* dA; hipMalloc((void**) &dA,size); //timing start for inclusive timing hipEventRecord(start1, 0); //copy vectors from host memory to devie memory hipMemcpy(dA, hA, size, hipMemcpyHostToDevice); hipEventRecord(stop1, 0); hipEventSynchronize(stop1); //invoke GPU kernel, with two blocks each having eight threads int threadsperblock = 16; int blockspergrid = (N + threadsperblock - 1)/ threadsperblock; //timing start for exclusive timing //hipEventRecord(start2, 0);hipLaunchKernelGGL(( Matadd), dim3(blockspergrid),dim3(threadsperblock), 0, 0, dA,N); hipMemcpy(hA, dA, size, hipMemcpyDeviceToHost); //hipEventRecord(stop1, 0); //hipEventSynchronize(stop1); hipEventElapsedTime(&time1,start1,stop1); printf("\n The transfer time in microseconds for 2 to power %d is %f respectively \n",j,time1 ); fwrite(&j,sizeof(j),1,fp); fwrite(&time1,sizeof(time1),1,fp); fwrite(&newline,sizeof(newline),1,fp); hipFree(hA); hipFree(dA); } fclose(fp); return 0; }
ba9d2597bdd3e8f23190352f0d2e4b3155328887.cu
#include<stdio.h> #include<cuda.h> #include<math.h> #include<sys/time.h> #include<stdlib.h> __global__ void Matadd(char* A,int N) { } int main() { char newline = '\n'; FILE *fp; fp = fopen("HW7_1-1.txt","w"); for(int j=0;j<=25;j++) { cudaEvent_t start1,stop1; float time1; int i; int N = pow(2,j); size_t size = N; printf ("\n The value of N is %d",N); cudaEventCreate(&start1); cudaEventCreate(&stop1); //allocate input matrices hA, hB, hC,refC in host memory char* hA = (char*)malloc(size); for(i=0;i<N;i++) { hA[i] = rand()%20-10; } //allocate memory on the device (GPU) char* dA; cudaMalloc((void**) &dA,size); //timing start for inclusive timing cudaEventRecord(start1, 0); //copy vectors from host memory to devie memory cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice); cudaEventRecord(stop1, 0); cudaEventSynchronize(stop1); //invoke GPU kernel, with two blocks each having eight threads int threadsperblock = 16; int blockspergrid = (N + threadsperblock - 1)/ threadsperblock; //timing start for exclusive timing //cudaEventRecord(start2, 0); Matadd<<<blockspergrid,threadsperblock>>>(dA,N); cudaMemcpy(hA, dA, size, cudaMemcpyDeviceToHost); //cudaEventRecord(stop1, 0); //cudaEventSynchronize(stop1); cudaEventElapsedTime(&time1,start1,stop1); printf("\n The transfer time in microseconds for 2 to power %d is %f respectively \n",j,time1 ); fwrite(&j,sizeof(j),1,fp); fwrite(&time1,sizeof(time1),1,fp); fwrite(&newline,sizeof(newline),1,fp); cudaFree(hA); cudaFree(dA); } fclose(fp); return 0; }
744a09dbc9ae3531148b34484ea4767ab19d91a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <sys/time.h> #include <stdio.h> #include <math.h> #include "openacc.h" #ifdef _OPENMP #include <omp.h> #endif #include "acc_helper.h" #ifndef _N_ #define _N_ 512 #endif #ifndef VERIFICATION #define VERIFICATION 1 #endif __global__ void MatrixMultiplication_cuda (float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int M, int N, int P); static int N = _N_; static int M = _N_; static int P = _N_; double my_timer () { struct timeval time; gettimeofday (&time, 0); return time.tv_sec + time.tv_usec / 1000000.0; } void MatrixMultiplication_openmp(float * a,float * b, float * c) { int i, j, k ; #pragma omp parallel shared(a,b,c) private(i,j,k) { #ifdef _OPENMP if(omp_get_thread_num() == 0) { printf("Number of OpenMP threads %d\n", omp_get_num_threads()); } #endif #pragma omp for for (i=0; i<M; i++){ for (j=0; j<N; j++) { float sum = 0.0 ; for (k=0; k<P; k++) sum += b[i*P+k]*c[k*N+j] ; a[i*N+j] = sum ; } } } } int main() { float *a, *b, *c; float *a_CPU, *b_CPU, *c_CPU; float *a_GPU, *b_GPU, *c_GPU; int i; double elapsed_time; dim3 dG, dB; //If below function is enabled, CUDA driver API creats a CUDA //context fist. //Otherwise, CUDA runtime API creates a CUDA context first. acc_helper_setup(); a = (float *) malloc(M*N*sizeof(float)); b = (float *) malloc(M*P*sizeof(float)); c = (float *) malloc(P*N*sizeof(float)); a_CPU = (float *) malloc(M*N*sizeof(float)); b_CPU = (float *) malloc(M*P*sizeof(float)); c_CPU = (float *) malloc(P*N*sizeof(float)); hipMalloc((void **)&a_GPU, M*N*sizeof(float)); hipMalloc((void **)&b_GPU, M*P*sizeof(float)); hipMalloc((void **)&c_GPU, P*N*sizeof(float)); for (i = 0; i < M*N; i++) { a[i] = (float) 0.0F; a_CPU[i] = (float) 0.0F; } for (i = 0; i < M*P; i++) { b[i] = (float) i; b_CPU[i] = (float) i; } for (i = 0; i < P*N; i++) { c[i] = (float) 1.0F; c_CPU[i] = (float) 1.0F; } #if VERIFICATION == 1 elapsed_time = my_timer(); MatrixMultiplication_openmp(a_CPU,b_CPU,c_CPU); elapsed_time = my_timer() - elapsed_time; printf("CPU Elapsed time = %lf sec\n", elapsed_time); #endif elapsed_time = my_timer(); //Enable below if this is not called previously. //acc_helper_setup(); hipMemcpy(b_GPU, b, M*P*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(c_GPU, c, P*N*sizeof(float), hipMemcpyHostToDevice); dG.x = (int)ceil((((float)(M*N))/32.0F)); dG.y = 1; dG.z = 1; dB.x = 32; dB.y = 1; dB.z = 1; hipLaunchKernelGGL(( MatrixMultiplication_cuda), dim3(dG),dim3(dB), 0, 0, a_GPU,b_GPU,c_GPU, M, N, P); hipMemcpy(a, a_GPU, M*N*sizeof(float), hipMemcpyDeviceToHost); elapsed_time = my_timer() - elapsed_time; printf("Accelerator Elapsed time (CUDA) = %lf sec\n", elapsed_time); elapsed_time = my_timer(); //Enable below if this is not called previously. //acc_helper_setup(); hipMemcpy(b_GPU, b, M*P*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(c_GPU, c, P*N*sizeof(float), hipMemcpyHostToDevice); acc_map_data(a, a_GPU, M*N*sizeof(float)); acc_map_data(b, b_GPU, M*P*sizeof(float)); acc_map_data(c, c_GPU, P*N*sizeof(float)); MatrixMultiplication_openacc(a,b,c); hipMemcpy(a, a_GPU, M*N*sizeof(float), hipMemcpyDeviceToHost); elapsed_time = my_timer() - elapsed_time; printf("Accelerator Elapsed time (OpenACC) = %lf sec\n", elapsed_time); #if VERIFICATION == 1 { double cpu_sum = 0.0; double gpu_sum = 0.0; double rel_err = 0.0; for (i=0; i<M*N; i++){ cpu_sum += a_CPU[i]*a_CPU[i]; gpu_sum += a[i]*a[i]; } cpu_sum = sqrt(cpu_sum); gpu_sum = sqrt(gpu_sum); if( cpu_sum > gpu_sum ) { rel_err = (cpu_sum-gpu_sum)/cpu_sum; } else { rel_err = (gpu_sum-cpu_sum)/cpu_sum; } if(rel_err < 1e-6) { printf("Verification Successful err = %e\n", rel_err); } else { printf("Verification Fail err = %e\n", rel_err); } } #endif free(a_CPU); free(b_CPU); free(c_CPU); free(a); free(b); free(c); return 0; }
744a09dbc9ae3531148b34484ea4767ab19d91a7.cu
#include <stdlib.h> #include <sys/time.h> #include <stdio.h> #include <math.h> #include "openacc.h" #ifdef _OPENMP #include <omp.h> #endif #include "acc_helper.h" #ifndef _N_ #define _N_ 512 #endif #ifndef VERIFICATION #define VERIFICATION 1 #endif __global__ void MatrixMultiplication_cuda (float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int M, int N, int P); static int N = _N_; static int M = _N_; static int P = _N_; double my_timer () { struct timeval time; gettimeofday (&time, 0); return time.tv_sec + time.tv_usec / 1000000.0; } void MatrixMultiplication_openmp(float * a,float * b, float * c) { int i, j, k ; #pragma omp parallel shared(a,b,c) private(i,j,k) { #ifdef _OPENMP if(omp_get_thread_num() == 0) { printf("Number of OpenMP threads %d\n", omp_get_num_threads()); } #endif #pragma omp for for (i=0; i<M; i++){ for (j=0; j<N; j++) { float sum = 0.0 ; for (k=0; k<P; k++) sum += b[i*P+k]*c[k*N+j] ; a[i*N+j] = sum ; } } } } int main() { float *a, *b, *c; float *a_CPU, *b_CPU, *c_CPU; float *a_GPU, *b_GPU, *c_GPU; int i; double elapsed_time; dim3 dG, dB; //If below function is enabled, CUDA driver API creats a CUDA //context fist. //Otherwise, CUDA runtime API creates a CUDA context first. acc_helper_setup(); a = (float *) malloc(M*N*sizeof(float)); b = (float *) malloc(M*P*sizeof(float)); c = (float *) malloc(P*N*sizeof(float)); a_CPU = (float *) malloc(M*N*sizeof(float)); b_CPU = (float *) malloc(M*P*sizeof(float)); c_CPU = (float *) malloc(P*N*sizeof(float)); cudaMalloc((void **)&a_GPU, M*N*sizeof(float)); cudaMalloc((void **)&b_GPU, M*P*sizeof(float)); cudaMalloc((void **)&c_GPU, P*N*sizeof(float)); for (i = 0; i < M*N; i++) { a[i] = (float) 0.0F; a_CPU[i] = (float) 0.0F; } for (i = 0; i < M*P; i++) { b[i] = (float) i; b_CPU[i] = (float) i; } for (i = 0; i < P*N; i++) { c[i] = (float) 1.0F; c_CPU[i] = (float) 1.0F; } #if VERIFICATION == 1 elapsed_time = my_timer(); MatrixMultiplication_openmp(a_CPU,b_CPU,c_CPU); elapsed_time = my_timer() - elapsed_time; printf("CPU Elapsed time = %lf sec\n", elapsed_time); #endif elapsed_time = my_timer(); //Enable below if this is not called previously. //acc_helper_setup(); cudaMemcpy(b_GPU, b, M*P*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(c_GPU, c, P*N*sizeof(float), cudaMemcpyHostToDevice); dG.x = (int)ceil((((float)(M*N))/32.0F)); dG.y = 1; dG.z = 1; dB.x = 32; dB.y = 1; dB.z = 1; MatrixMultiplication_cuda<<<dG,dB>>>(a_GPU,b_GPU,c_GPU, M, N, P); cudaMemcpy(a, a_GPU, M*N*sizeof(float), cudaMemcpyDeviceToHost); elapsed_time = my_timer() - elapsed_time; printf("Accelerator Elapsed time (CUDA) = %lf sec\n", elapsed_time); elapsed_time = my_timer(); //Enable below if this is not called previously. //acc_helper_setup(); cudaMemcpy(b_GPU, b, M*P*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(c_GPU, c, P*N*sizeof(float), cudaMemcpyHostToDevice); acc_map_data(a, a_GPU, M*N*sizeof(float)); acc_map_data(b, b_GPU, M*P*sizeof(float)); acc_map_data(c, c_GPU, P*N*sizeof(float)); MatrixMultiplication_openacc(a,b,c); cudaMemcpy(a, a_GPU, M*N*sizeof(float), cudaMemcpyDeviceToHost); elapsed_time = my_timer() - elapsed_time; printf("Accelerator Elapsed time (OpenACC) = %lf sec\n", elapsed_time); #if VERIFICATION == 1 { double cpu_sum = 0.0; double gpu_sum = 0.0; double rel_err = 0.0; for (i=0; i<M*N; i++){ cpu_sum += a_CPU[i]*a_CPU[i]; gpu_sum += a[i]*a[i]; } cpu_sum = sqrt(cpu_sum); gpu_sum = sqrt(gpu_sum); if( cpu_sum > gpu_sum ) { rel_err = (cpu_sum-gpu_sum)/cpu_sum; } else { rel_err = (gpu_sum-cpu_sum)/cpu_sum; } if(rel_err < 1e-6) { printf("Verification Successful err = %e\n", rel_err); } else { printf("Verification Fail err = %e\n", rel_err); } } #endif free(a_CPU); free(b_CPU); free(c_CPU); free(a); free(b); free(c); return 0; }
6be5f71b3d39b293670ba4fd226d4182da2bd284.hip
// !!! This is a file automatically generated by hipify!!! // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #include "cudakernel/memory/slice.h" #include "cudakernel/common/divmod_fast.h" #include "cudakernel/common/memory_utils.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include <hip/hip_runtime.h> #define MAX_DIM_SIZE SLICE_PARAM_MAX_DIM_SIZE template <typename T> __global__ void ppl_cukernel_slice( int64_t num_elems, int num_dims, SliceKernelParam param, GArray<int64_t> input_strides, const T* input, GArray<int64_t> output_strides, GArray<DivModFast> output_strides_fast, T* output) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; int output_idx[MAX_DIM_SIZE]; int input_idx[MAX_DIM_SIZE]; int idx, remain = index; for (int it = 0; it < num_dims; ++it) { output_strides_fast[it].divmod(remain, idx, remain); output_idx[it] = idx; } // copy output_idx to input_idx for (int it = 0; it < num_dims; ++it) input_idx[it] = output_idx[it]; // calc input_idx according to axes[] for (int it = 0; it < param.axes_num; ++it) { int axis = param.axes[it]; input_idx[axis] = output_idx[axis] * param.steps[it] + param.starts[it]; } int64_t input_offset = 0; int64_t output_offset = 0; for (int it = 0; it < num_dims; ++it) { input_offset += input_idx[it] * input_strides[it]; output_offset += output_idx[it] * output_strides[it]; } output[output_offset] = input[input_offset]; } ppl::common::RetCode PPLCUDASliceForwardImp( hipStream_t stream, SliceKernelParam param, const ppl::nn::TensorShape* input_shape, const void* input, ppl::nn::TensorShape* output_shape, void* output) { if (output_shape->GetElementsIncludingPadding() == 0) return ppl::common::RC_SUCCESS; int block_size = 256; uint64_t num_elems = output_shape->GetElementsExcludingPadding(); int grid_size = (num_elems + block_size - 1) / block_size; int num_dims = output_shape->GetDimCount(); GArray<int64_t> input_strides(num_dims); GArray<int64_t> output_strides(num_dims); GArray<DivModFast> output_strides_fast(num_dims); int64_t acc_output_stride = 1; int64_t acc_input_stride = 1; for (int it = num_dims - 1; it >= 0; --it) { input_strides[it] = acc_input_stride; output_strides[it] = acc_output_stride; output_strides_fast[it] = DivModFast(acc_output_stride); acc_input_stride *= input_shape->GetDim(it); acc_output_stride *= output_shape->GetDim(it); } if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8) { acc_output_stride = 1; acc_input_stride = 1; for (int it = num_dims - 1; it >= 0; --it) { if (it == num_dims - 1) { input_strides[1] = acc_input_stride; output_strides[1] = acc_output_stride; acc_input_stride *= input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1); acc_output_stride *= output_shape->GetDim(1) + output_shape->GetPadding0(1) + output_shape->GetPadding1(1); } else if (it == 0) { input_strides[it] = acc_input_stride; output_strides[it] = acc_output_stride; acc_input_stride *= input_shape->GetDim(it); acc_output_stride *= output_shape->GetDim(it); } else { input_strides[it + 1] = acc_input_stride; output_strides[it + 1] = acc_output_stride; acc_input_stride *= input_shape->GetDim(it + 1); acc_output_stride *= output_shape->GetDim(it + 1); } } } #define SWITCH_CASE(TYPE) \ case sizeof(TYPE): { \ hipLaunchKernelGGL(( ppl_cukernel_slice), dim3(grid_size), dim3(block_size), 0, stream, \ num_elems, num_dims, param, input_strides, (const TYPE*)input, output_strides, output_strides_fast, (TYPE*)output); \ return ppl::common::RC_SUCCESS; \ } switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) { SWITCH_CASE(int8_t); SWITCH_CASE(int16_t); SWITCH_CASE(int32_t); SWITCH_CASE(int64_t); default: return ppl::common::RC_UNSUPPORTED; } #undef SWITCH_CASE }
6be5f71b3d39b293670ba4fd226d4182da2bd284.cu
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #include "cudakernel/memory/slice.h" #include "cudakernel/common/divmod_fast.h" #include "cudakernel/common/memory_utils.h" #include "ppl/nn/common/tensor_shape.h" #include "ppl/common/retcode.h" #include <cuda_runtime.h> #define MAX_DIM_SIZE SLICE_PARAM_MAX_DIM_SIZE template <typename T> __global__ void ppl_cukernel_slice( int64_t num_elems, int num_dims, SliceKernelParam param, GArray<int64_t> input_strides, const T* input, GArray<int64_t> output_strides, GArray<DivModFast> output_strides_fast, T* output) { int64_t index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= num_elems) return; int output_idx[MAX_DIM_SIZE]; int input_idx[MAX_DIM_SIZE]; int idx, remain = index; for (int it = 0; it < num_dims; ++it) { output_strides_fast[it].divmod(remain, idx, remain); output_idx[it] = idx; } // copy output_idx to input_idx for (int it = 0; it < num_dims; ++it) input_idx[it] = output_idx[it]; // calc input_idx according to axes[] for (int it = 0; it < param.axes_num; ++it) { int axis = param.axes[it]; input_idx[axis] = output_idx[axis] * param.steps[it] + param.starts[it]; } int64_t input_offset = 0; int64_t output_offset = 0; for (int it = 0; it < num_dims; ++it) { input_offset += input_idx[it] * input_strides[it]; output_offset += output_idx[it] * output_strides[it]; } output[output_offset] = input[input_offset]; } ppl::common::RetCode PPLCUDASliceForwardImp( cudaStream_t stream, SliceKernelParam param, const ppl::nn::TensorShape* input_shape, const void* input, ppl::nn::TensorShape* output_shape, void* output) { if (output_shape->GetElementsIncludingPadding() == 0) return ppl::common::RC_SUCCESS; int block_size = 256; uint64_t num_elems = output_shape->GetElementsExcludingPadding(); int grid_size = (num_elems + block_size - 1) / block_size; int num_dims = output_shape->GetDimCount(); GArray<int64_t> input_strides(num_dims); GArray<int64_t> output_strides(num_dims); GArray<DivModFast> output_strides_fast(num_dims); int64_t acc_output_stride = 1; int64_t acc_input_stride = 1; for (int it = num_dims - 1; it >= 0; --it) { input_strides[it] = acc_input_stride; output_strides[it] = acc_output_stride; output_strides_fast[it] = DivModFast(acc_output_stride); acc_input_stride *= input_shape->GetDim(it); acc_output_stride *= output_shape->GetDim(it); } if (output_shape->GetDataFormat() == ppl::common::DATAFORMAT_NHWC8) { acc_output_stride = 1; acc_input_stride = 1; for (int it = num_dims - 1; it >= 0; --it) { if (it == num_dims - 1) { input_strides[1] = acc_input_stride; output_strides[1] = acc_output_stride; acc_input_stride *= input_shape->GetDim(1) + input_shape->GetPadding0(1) + input_shape->GetPadding1(1); acc_output_stride *= output_shape->GetDim(1) + output_shape->GetPadding0(1) + output_shape->GetPadding1(1); } else if (it == 0) { input_strides[it] = acc_input_stride; output_strides[it] = acc_output_stride; acc_input_stride *= input_shape->GetDim(it); acc_output_stride *= output_shape->GetDim(it); } else { input_strides[it + 1] = acc_input_stride; output_strides[it + 1] = acc_output_stride; acc_input_stride *= input_shape->GetDim(it + 1); acc_output_stride *= output_shape->GetDim(it + 1); } } } #define SWITCH_CASE(TYPE) \ case sizeof(TYPE): { \ ppl_cukernel_slice<<<grid_size, block_size, 0, stream>>>( \ num_elems, num_dims, param, input_strides, (const TYPE*)input, output_strides, output_strides_fast, (TYPE*)output); \ return ppl::common::RC_SUCCESS; \ } switch (ppl::common::GetSizeOfDataType(input_shape->GetDataType())) { SWITCH_CASE(int8_t); SWITCH_CASE(int16_t); SWITCH_CASE(int32_t); SWITCH_CASE(int64_t); default: return ppl::common::RC_UNSUPPORTED; } #undef SWITCH_CASE }
d247ca8e0d2285695331919131198aaaeeacf1d4.hip
// !!! This is a file automatically generated by hipify!!! // BVH traversal kernels based on "Understanding the #include <hip/hip_runtime.h> #include <math_functions.h> #include <hip/hip_vector_types.h> #include <vector_functions.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include "CudaRenderKernel.h" #include "stdio.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "cutil_math.h" // required for float3 #define STACK_SIZE 64 // Size of the traversal stack in local memory. #define M_PI 3.1415926535897932384626422832795028841971f #define TWO_PI 6.2831853071795864769252867665590057683943f #define DYNAMIC_FETCH_THRESHOLD 20 // If fewer than this active, fetch new rays #define samps 1 #define F32_MIN (1.175494351e-38f) #define F32_MAX (3.402823466e+38f) #define HDRwidth 3200 #define HDRheight 1600 #define HDR #define EntrypointSentinel 0x76543210 #define MaxBlockHeight 6 enum Refl_t { DIFF, METAL, SPEC, REFR, COAT }; // material types // CUDA textures containing scene data texture<float4, 1, hipReadModeElementType> bvhNodesTexture; texture<float4, 1, hipReadModeElementType> triWoopTexture; texture<float4, 1, hipReadModeElementType> triNormalsTexture; texture<int, 1, hipReadModeElementType> triIndicesTexture; texture<float4, 1, hipReadModeElementType> HDRtexture; __device__ inline Vec3f absmax3f(const Vec3f& v1, const Vec3f& v2){ return Vec3f(v1.x*v1.x > v2.x*v2.x ? v1.x : v2.x, v1.y*v1.y > v2.y*v2.y ? v1.y : v2.y, v1.z*v1.z > v2.z*v2.z ? v1.z : v2.z); } struct Ray { float3 orig; // ray origin float3 dir; // ray direction __device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {} }; struct Sphere { float rad; // radius float3 pos, emi, col; // position, emission, color Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive) __device__ float intersect(const Ray &r) const { // returns distance, 0 if nohit // ray/sphere intersection float3 op = pos - r.orig; float t, epsilon = 0.01f; float b = dot(op, r.dir); float disc = b*b - dot(op, op) + rad*rad; // discriminant of quadratic formula if (disc<0) return 0; else disc = sqrtf(disc); return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0.0f); } }; __constant__ Sphere spheres[] = { // sun //{ 10000, { 50.0f, 40.8f, -1060 }, { 0.3, 0.3, 0.3 }, { 0.175f, 0.175f, 0.25f }, DIFF }, // sky 0.003, 0.003, 0.003 //{ 4.5, { 0.0f, 12.5, 0 }, { 6, 4, 1 }, { .6f, .6f, 0.6f }, DIFF }, /// lightsource { 10000.02, { 50.0f, -10001.35, 0 }, { 0.0, 0.0, 0 }, { 0.3f, 0.3f, 0.3f }, DIFF }, // ground 300/-301.0 //{ 10000, { 50.0f, -10000.1, 0 }, { 0, 0, 0 }, { 0.3f, 0.3f, 0.3f }, DIFF }, // double shell to prevent light leaking //{ 110000, { 50.0f, -110048.5, 0 }, { 3.6, 2.0, 0.2 }, { 0.f, 0.f, 0.f }, DIFF }, // horizon brightener //{ 0.5, { 30.0f, 180.5, 42 }, { 0, 0, 0 }, { .6f, .6f, 0.6f }, DIFF }, // small sphere 1 //{ 0.8, { 2.0f, 0.f, 0 }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.8f }, SPEC }, // small sphere 2 //{ 0.8, { -3.0f, 0.f, 0 }, { 0.0, 0.0, 0.0 }, { 0.0f, 0.0f, 0.2f }, COAT }, // small sphere 2 { 2.5, { -6.0f, 0.5f, 0.0f }, { 0.0, 0.0, 0.0 }, { 0.9f, 0.9f, 0.9f }, SPEC }, // small sphere 2 //{ 0.6, { -10.0f, -2.f, 1.0f }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.8f }, DIFF }, // small sphere 2 //{ 0.8, { -1.0f, -0.7f, 4.0f }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.8f }, REFR }, // small sphere 2 //{ 9.4, { 9.0f, 0.f, -9.0f }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.f }, DIFF }, // small sphere 2 //{ 22, { 105.0f, 22, 24 }, { 0, 0, 0 }, { 0.9f, 0.9f, 0.9f }, DIFF }, // small sphere 3 }; // RAY BOX INTERSECTION ROUTINES // Experimentally determined best mix of float/int/video minmax instructions for Kepler. // float c0min = spanBeginKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); // Tesla does max4(min, min, min, tmin) // float c0max = spanEndKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); // Tesla does min4(max, max, max, tmax) // Perform min/max operations in hardware // Using Kepler's video instructions, see http://docs.nvidia.com/cuda/parallel-thread-execution/#axzz3jbhbcTZf // : "=r"(v) overwrites v and puts it in a register // see https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html __device__ __inline__ int min_min(int a, int b, int c) { int v; asm("vmin.s32.s32.s32.min %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ int min_max(int a, int b, int c) { int v; asm("vmin.s32.s32.s32.max %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ int max_min(int a, int b, int c) { int v; asm("vmax.s32.s32.s32.min %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ int max_max(int a, int b, int c) { int v; asm("vmax.s32.s32.s32.max %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ float fmin_fmin(float a, float b, float c) { return __int_as_float(min_min(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float fmin_fmax(float a, float b, float c) { return __int_as_float(min_max(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float fmax_fmin(float a, float b, float c) { return __int_as_float(max_min(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float fmax_fmax(float a, float b, float c) { return __int_as_float(max_max(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float spanBeginKepler(float a0, float a1, float b0, float b1, float c0, float c1, float d){ return fmax_fmax(fminf(a0, a1), fminf(b0, b1), fmin_fmax(c0, c1, d)); } __device__ __inline__ float spanEndKepler(float a0, float a1, float b0, float b1, float c0, float c1, float d) { return fmin_fmin(fmaxf(a0, a1), fmaxf(b0, b1), fmax_fmin(c0, c1, d)); } // standard ray box intersection routines (for debugging purposes only) // based on Intersect::RayBox() in original Aila/Laine code __device__ __inline__ float spanBeginKepler2(float lo_x, float hi_x, float lo_y, float hi_y, float lo_z, float hi_z, float d){ Vec3f t0 = Vec3f(lo_x, lo_y, lo_z); Vec3f t1 = Vec3f(hi_x, hi_y, hi_z); Vec3f realmin = min3f(t0, t1); float raybox_tmin = realmin.max(); // maxmin //return Vec2f(tmin, tmax); return raybox_tmin; } __device__ __inline__ float spanEndKepler2(float lo_x, float hi_x, float lo_y, float hi_y, float lo_z, float hi_z, float d){ Vec3f t0 = Vec3f(lo_x, lo_y, lo_z); Vec3f t1 = Vec3f(hi_x, hi_y, hi_z); Vec3f realmax = max3f(t0, t1); float raybox_tmax = realmax.min(); /// minmax //return Vec2f(tmin, tmax); return raybox_tmax; } __device__ __inline__ void swap2(int& a, int& b){ int temp = a; a = b; b = temp;} // standard ray triangle intersection routines (for debugging purposes only) // based on Intersect::RayTriangle() in original Aila/Laine code __device__ Vec3f intersectRayTriangle(const Vec3f& v0, const Vec3f& v1, const Vec3f& v2, const Vec4f& rayorig, const Vec4f& raydir){ const Vec3f rayorig3f = Vec3f(rayorig.x, rayorig.y, rayorig.z); const Vec3f raydir3f = Vec3f(raydir.x, raydir.y, raydir.z); const float EPSILON = 0.00001f; // works better const Vec3f miss(F32_MAX, F32_MAX, F32_MAX); float raytmin = rayorig.w; float raytmax = raydir.w; Vec3f edge1 = v1 - v0; Vec3f edge2 = v2 - v0; Vec3f tvec = rayorig3f - v0; Vec3f pvec = cross(raydir3f, edge2); float det = dot(edge1, pvec); float invdet = 1.0f / det; float u = dot(tvec, pvec) * invdet; Vec3f qvec = cross(tvec, edge1); float v = dot(raydir3f, qvec) * invdet; if (det > EPSILON) { if (u < 0.0f || u > 1.0f) return miss; // 1.0 want = det * 1/det if (v < 0.0f || (u + v) > 1.0f) return miss; // if u and v are within these bounds, continue and go to float t = dot(... } else if (det < -EPSILON) { if (u > 0.0f || u < 1.0f) return miss; if (v > 0.0f || (u + v) < 1.0f) return miss; // else continue } else // if det is not larger (more positive) than EPSILON or not smaller (more negative) than -EPSILON, there is a "miss" return miss; float t = dot(edge2, qvec) * invdet; if (t > raytmin && t < raytmax) return Vec3f(u, v, t); // otherwise (t < raytmin or t > raytmax) miss return miss; } // modified intersection routine (uses regular instead of woopified triangles) for debugging purposes __device__ void DEBUGintersectBVHandTriangles(const float4 rayorig, const float4 raydir, const float4* gpuNodes, const float4* gpuTriWoops, const float4* gpuDebugTris, const int* gpuTriIndices, int& hitTriIdx, float& hitdistance, int& debugbingo, Vec3f& trinormal, int leafcount, int tricount, bool needClosestHit){ int traversalStack[STACK_SIZE]; float origx, origy, origz; // Ray origin. float dirx, diry, dirz; // Ray direction. float tmin; // t-value from which the ray starts. Usually 0. float idirx, idiry, idirz; // 1 / dir float oodx, oody, oodz; // orig / dir char* stackPtr; int leafAddr; int nodeAddr; int hitIndex; float hitT; int threadId1; threadId1 = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y)); origx = rayorig.x; origy = rayorig.y; origz = rayorig.z; dirx = raydir.x; diry = raydir.y; dirz = raydir.z; tmin = rayorig.w; // ooeps is very small number, used instead of raydir xyz component when that component is near zero float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number idirx = 1.0f / (fabsf(raydir.x) > ooeps ? raydir.x : copysignf(ooeps, raydir.x)); // inverse ray direction idiry = 1.0f / (fabsf(raydir.y) > ooeps ? raydir.y : copysignf(ooeps, raydir.y)); // inverse ray direction idirz = 1.0f / (fabsf(raydir.z) > ooeps ? raydir.z : copysignf(ooeps, raydir.z)); // inverse ray direction oodx = origx * idirx; // ray origin / ray direction oody = origy * idiry; // ray origin / ray direction oodz = origz * idirz; // ray origin / ray direction traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 is 1985229328 in decimal stackPtr = (char*)&traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel leafAddr = 0; // No postponed leaf. nodeAddr = 0; // Start from the root. hitIndex = -1; // No triangle intersected so far. hitT = raydir.w; while (nodeAddr != EntrypointSentinel) // EntrypointSentinel = 0x76543210 { // Traverse internal nodes until all SIMD lanes have found a leaf. bool searchingLeaf = true; // flag required to increase efficiency of threads in warp while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel) { float4* ptr = (float4*)((char*)gpuNodes + nodeAddr); float4 n0xy = ptr[0]; // childnode 0, xy-bounds (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) float4 n1xy = ptr[1]; // childnode 1. xy-bounds (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y) float4 nz = ptr[2]; // childnodes 0 and 1, z-bounds(c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) // ptr[3] contains indices to 2 childnodes in case of innernode, see below // (childindex = size of array during building, see CudaBVH.cpp) // compute ray intersections with BVH node bounding box float c0lox = n0xy.x * idirx - oodx; // n0xy.x = c0.lo.x, child 0 minbound x float c0hix = n0xy.y * idirx - oodx; // n0xy.y = c0.hi.x, child 0 maxbound x float c0loy = n0xy.z * idiry - oody; // n0xy.z = c0.lo.y, child 0 minbound y float c0hiy = n0xy.w * idiry - oody; // n0xy.w = c0.hi.y, child 0 maxbound y float c0loz = nz.x * idirz - oodz; // nz.x = c0.lo.z, child 0 minbound z float c0hiz = nz.y * idirz - oodz; // nz.y = c0.hi.z, child 0 maxbound z float c1loz = nz.z * idirz - oodz; // nz.z = c1.lo.z, child 1 minbound z float c1hiz = nz.w * idirz - oodz; // nz.w = c1.hi.z, child 1 maxbound z float c0min = spanBeginKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); // Tesla does max4(min, min, min, tmin) float c0max = spanEndKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); // Tesla does min4(max, max, max, tmax) float c1lox = n1xy.x * idirx - oodx; // n1xy.x = c1.lo.x, child 1 minbound x float c1hix = n1xy.y * idirx - oodx; // n1xy.y = c1.hi.x, child 1 maxbound x float c1loy = n1xy.z * idiry - oody; // n1xy.z = c1.lo.y, child 1 minbound y float c1hiy = n1xy.w * idiry - oody; // n1xy.w = c1.hi.y, child 1 maxbound y float c1min = spanBeginKepler2(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin); float c1max = spanEndKepler2(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT); float ray_tmax = 1e20; bool traverseChild0 = (c0min <= c0max) && (c0min >= tmin) && (c0min <= ray_tmax); bool traverseChild1 = (c1min <= c1max) && (c1min >= tmin) && (c1min <= ray_tmax); if (!traverseChild0 && !traverseChild1) { nodeAddr = *(int*)stackPtr; // fetch next node by popping stack stackPtr -= 4; // popping decrements stack by 4 bytes (because stackPtr is a pointer to char) } // Otherwise => fetch child pointers. else // one or both children intersected { int2 cnodes = *(int2*)&ptr[3]; // set nodeAddr equal to intersected childnode (first childnode when both children are intersected) nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y; // Both children were intersected => push the farther one on the stack. if (traverseChild0 && traverseChild1) // store closest child in nodeAddr, swap if necessary { if (c1min < c0min) swap2(nodeAddr, cnodes.y); stackPtr += 4; // pushing increments stack by 4 bytes (stackPtr is a pointer to char) *(int*)stackPtr = cnodes.y; // push furthest node on the stack } } // First leaf => postpone and continue traversal. // leafnodes have a negative index to distinguish them from inner nodes // if nodeAddr less than 0 -> nodeAddr is a leaf if (nodeAddr < 0 && leafAddr >= 0) // if leafAddr >= 0 -> no leaf found yet (first leaf) { searchingLeaf = false; // required for warp efficiency leafAddr = nodeAddr; nodeAddr = *(int*)stackPtr; // pops next node from stack stackPtr -= 4; // decrement by 4 bytes (stackPtr is a pointer to char) } // All SIMD lanes have found a leaf => process them. // NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;". // tried everything with CUDA 4.2 but always got several redundant instructions. // if (!searchingLeaf){ break; } // if (!__any(searchingLeaf)) break; // "__any" keyword: if none of the threads is searching a leaf, in other words // if all threads in the warp found a leafnode, then break from while loop and go to triangle intersection // if(!__any(leafAddr >= 0)) /// als leafAddr in PTX code >= 0, dan is het geen echt leafNode // break; unsigned int mask; // mask replaces searchingLeaf in PTX code asm("{\n" " .reg .pred p; \n" "setp.ge.s32 p, %1, 0; \n" "vote.ballot.b32 %0,p; \n" "}" : "=r"(mask) : "r"(leafAddr)); if (!mask) break; } /////////////////////////////////////// /// LEAF NODE / TRIANGLE INTERSECTION /////////////////////////////////////// while (leafAddr < 0) // if leafAddr is negative, it points to an actual leafnode (when positive or 0 it's an innernode { // leafAddr is stored as negative number, see cidx[i] = ~triWoopData.getSize(); in CudaBVH.cpp for (int triAddr = ~leafAddr;; triAddr += 3) { // no defined upper limit for loop, continues until leaf terminator code 0x80000000 is encountered // Read first 16 bytes of the triangle. // fetch first triangle vertex float4 v0f = gpuDebugTris[triAddr + 0]; // End marker 0x80000000 (= negative zero) => all triangles in leaf processed. --> terminate if (__float_as_int(v0f.x) == 0x80000000) break; float4 v1f = gpuDebugTris[triAddr + 1]; float4 v2f = gpuDebugTris[triAddr + 2]; const Vec3f v0 = Vec3f(v0f.x, v0f.y, v0f.z); const Vec3f v1 = Vec3f(v1f.x, v1f.y, v1f.z); const Vec3f v2 = Vec3f(v2f.x, v2f.y, v2f.z); // convert float4 to Vec4f Vec4f rayorigvec4f = Vec4f(rayorig.x, rayorig.y, rayorig.z, rayorig.w); Vec4f raydirvec4f = Vec4f(raydir.x, raydir.y, raydir.z, raydir.w); Vec3f bary = intersectRayTriangle(v0, v1, v2, rayorigvec4f, raydirvec4f); float t = bary.z; // hit distance along ray if (t > tmin && t < hitT) // if there is a miss, t will be larger than hitT (ray.tmax) { hitIndex = triAddr; hitT = t; /// keeps track of closest hitpoint trinormal = cross(v0 - v1, v0 - v2); if (!needClosestHit){ // shadow rays only require "any" hit with scene geometry, not the closest one nodeAddr = EntrypointSentinel; break; } } } // triangle // Another leaf was postponed => process it as well. leafAddr = nodeAddr; if (nodeAddr < 0) { nodeAddr = *(int*)stackPtr; // pop stack stackPtr -= 4; // decrement with 4 bytes to get the next int (stackPtr is char*) } } // end leaf/triangle intersection loop } // end of node traversal loop // Remap intersected triangle index, and store the result. if (hitIndex != -1){ // remapping tri indices delayed until this point for performance reasons // (slow global memory lookup in de gpuTriIndices array) because multiple triangles per node can potentially be hit hitIndex = gpuTriIndices[hitIndex]; } hitTriIdx = hitIndex; hitdistance = hitT; } __device__ void intersectBVHandTriangles(const float4 rayorig, const float4 raydir, int& hitTriIdx, float& hitdistance, int& debugbingo, Vec3f& trinormal, int leafcount, int tricount, bool anyHit) { // assign a CUDA thread to every pixel by using the threadIndex // global threadId, see richiesams blogspot int thread_index = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; /////////////////////////////////////////// //// KEPLER KERNEL /////////////////////////////////////////// // BVH layout Compact2 for Kepler int traversalStack[STACK_SIZE]; // Live state during traversal, stored in registers. int rayidx; // not used, can be removed float origx, origy, origz; // Ray origin. float dirx, diry, dirz; // Ray direction. float tmin; // t-value from which the ray starts. Usually 0. float idirx, idiry, idirz; // 1 / ray direction float oodx, oody, oodz; // ray origin / ray direction char* stackPtr; // Current position in traversal stack. int leafAddr; // If negative, then first postponed leaf, non-negative if no leaf (innernode). int nodeAddr; int hitIndex; // Triangle index of the closest intersection, -1 if none. float hitT; // t-value of the closest intersection. int threadId1; // ipv rayidx // Initialize (stores local variables in registers) { // Pick ray index. threadId1 = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y)); // Fetch ray. // required when tracing ray batches // float4 o = rays[rayidx * 2 + 0]; // float4 d = rays[rayidx * 2 + 1]; //__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer. origx = rayorig.x; origy = rayorig.y; origz = rayorig.z; dirx = raydir.x; diry = raydir.y; dirz = raydir.z; tmin = rayorig.w; // ooeps is very small number, used instead of raydir xyz component when that component is near zero float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number idirx = 1.0f / (fabsf(raydir.x) > ooeps ? raydir.x : copysignf(ooeps, raydir.x)); // inverse ray direction idiry = 1.0f / (fabsf(raydir.y) > ooeps ? raydir.y : copysignf(ooeps, raydir.y)); // inverse ray direction idirz = 1.0f / (fabsf(raydir.z) > ooeps ? raydir.z : copysignf(ooeps, raydir.z)); // inverse ray direction oodx = origx * idirx; // ray origin / ray direction oody = origy * idiry; // ray origin / ray direction oodz = origz * idirz; // ray origin / ray direction // Setup traversal + initialisation traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 (1985229328 in decimal) stackPtr = (char*)&traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel leafAddr = 0; // No postponed leaf. nodeAddr = 0; // Start from the root. hitIndex = -1; // No triangle intersected so far. hitT = raydir.w; // tmax } // Traversal loop. while (nodeAddr != EntrypointSentinel) { // Traverse internal nodes until all SIMD lanes have found a leaf. bool searchingLeaf = true; // required for warp efficiency while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel) { // Fetch AABBs of the two child nodes. // nodeAddr is an offset in number of bytes (char) in gpuNodes array float4 n0xy = tex1Dfetch(bvhNodesTexture, nodeAddr); // childnode 0, xy-bounds (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) float4 n1xy = tex1Dfetch(bvhNodesTexture, nodeAddr + 1); // childnode 1, xy-bounds (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y) float4 nz = tex1Dfetch(bvhNodesTexture, nodeAddr + 2); // childnode 0 and 1, z-bounds (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) float4 tmp = tex1Dfetch(bvhNodesTexture, nodeAddr + 3); // contains indices to 2 childnodes in case of innernode, see below int2 cnodes = *(int2*)&tmp; // cast first two floats to int // (childindex = size of array during building, see CudaBVH.cpp) // compute ray intersections with BVH node bounding box /// RAY BOX INTERSECTION // Intersect the ray against the child nodes. float c0lox = n0xy.x * idirx - oodx; // n0xy.x = c0.lo.x, child 0 minbound x float c0hix = n0xy.y * idirx - oodx; // n0xy.y = c0.hi.x, child 0 maxbound x float c0loy = n0xy.z * idiry - oody; // n0xy.z = c0.lo.y, child 0 minbound y float c0hiy = n0xy.w * idiry - oody; // n0xy.w = c0.hi.y, child 0 maxbound y float c0loz = nz.x * idirz - oodz; // nz.x = c0.lo.z, child 0 minbound z float c0hiz = nz.y * idirz - oodz; // nz.y = c0.hi.z, child 0 maxbound z float c1loz = nz.z * idirz - oodz; // nz.z = c1.lo.z, child 1 minbound z float c1hiz = nz.w * idirz - oodz; // nz.w = c1.hi.z, child 1 maxbound z float c0min = spanBeginKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); // Tesla does max4(min, min, min, tmin) float c0max = spanEndKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); // Tesla does min4(max, max, max, tmax) float c1lox = n1xy.x * idirx - oodx; // n1xy.x = c1.lo.x, child 1 minbound x float c1hix = n1xy.y * idirx - oodx; // n1xy.y = c1.hi.x, child 1 maxbound x float c1loy = n1xy.z * idiry - oody; // n1xy.z = c1.lo.y, child 1 minbound y float c1hiy = n1xy.w * idiry - oody; // n1xy.w = c1.hi.y, child 1 maxbound y float c1min = spanBeginKepler(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin); float c1max = spanEndKepler(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT); // ray box intersection boundary tests: float ray_tmax = 1e20; bool traverseChild0 = (c0min <= c0max); // && (c0min >= tmin) && (c0min <= ray_tmax); bool traverseChild1 = (c1min <= c1max); // && (c1min >= tmin) && (c1min <= ray_tmax); // Neither child was intersected => pop stack. if (!traverseChild0 && !traverseChild1) { nodeAddr = *(int*)stackPtr; // fetch next node by popping the stack stackPtr -= 4; // popping decrements stackPtr by 4 bytes (because stackPtr is a pointer to char) } // Otherwise, one or both children intersected => fetch child pointers. else { // set nodeAddr equal to intersected childnode index (or first childnode when both children are intersected) nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y; // Both children were intersected => push the farther one on the stack. if (traverseChild0 && traverseChild1) // store closest child in nodeAddr, swap if necessary { if (c1min < c0min) swap2(nodeAddr, cnodes.y); stackPtr += 4; // pushing increments stack by 4 bytes (stackPtr is a pointer to char) *(int*)stackPtr = cnodes.y; // push furthest node on the stack } } // First leaf => postpone and continue traversal. // leafnodes have a negative index to distinguish them from inner nodes // if nodeAddr less than 0 -> nodeAddr is a leaf if (nodeAddr < 0 && leafAddr >= 0) { searchingLeaf = false; // required for warp efficiency leafAddr = nodeAddr; nodeAddr = *(int*)stackPtr; // pops next node from stack stackPtr -= 4; // decrements stackptr by 4 bytes (because stackPtr is a pointer to char) } // All SIMD lanes have found a leaf => process them. // to increase efficiency, check if all the threads in a warp have found a leaf before proceeding to the // ray/triangle intersection routine // this bit of code requires PTX (CUDA assembly) code to work properly // if (!__any(searchingLeaf)) -> "__any" keyword: if none of the threads is searching a leaf, in other words // if all threads in the warp found a leafnode, then break from while loop and go to triangle intersection //if(!__any(leafAddr >= 0)) // break; // if (!__any(searchingLeaf)) // break; /// break from while loop and go to code below, processing leaf nodes // NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;". // tried everything with CUDA 4.2 but always got several redundant instructions. unsigned int mask; // replaces searchingLeaf asm("{\n" " .reg .pred p; \n" "setp.ge.s32 p, %1, 0; \n" "vote.ballot.b32 %0,p; \n" "}" : "=r"(mask) : "r"(leafAddr)); if (!mask) break; } /////////////////////////////////////////// /// TRIANGLE INTERSECTION ////////////////////////////////////// // Process postponed leaf nodes. while (leafAddr < 0) /// if leafAddr is negative, it points to an actual leafnode (when positive or 0 it's an innernode) { // Intersect the ray against each triangle using Sven Woop's algorithm. // Woop ray triangle intersection: Woop triangles are unit triangles. Each ray // must be transformed to "unit triangle space", before testing for intersection for (int triAddr = ~leafAddr;; triAddr += 3) // triAddr is index in triWoop array (and bitwise complement of leafAddr) { // no defined upper limit for loop, continues until leaf terminator code 0x80000000 is encountered // Read first 16 bytes of the triangle. // fetch first precomputed triangle edge float4 v00 = tex1Dfetch(triWoopTexture, triAddr); // End marker 0x80000000 (negative zero) => all triangles in leaf processed --> terminate if (__float_as_int(v00.x) == 0x80000000) break; // Compute and check intersection t-value (hit distance along ray). float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z; // Origin z float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z); // inverse Direction z float t = Oz * invDz; if (t > tmin && t < hitT) { // Compute and check barycentric u. // fetch second precomputed triangle edge float4 v11 = tex1Dfetch(triWoopTexture, triAddr + 1); float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z; // Origin.x float Dx = dirx * v11.x + diry * v11.y + dirz * v11.z; // Direction.x float u = Ox + t * Dx; /// parametric equation of a ray (intersection point) if (u >= 0.0f && u <= 1.0f) { // Compute and check barycentric v. // fetch third precomputed triangle edge float4 v22 = tex1Dfetch(triWoopTexture, triAddr + 2); float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z; float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z; float v = Oy + t*Dy; if (v >= 0.0f && u + v <= 1.0f) { // We've got a hit! // Record intersection. hitT = t; hitIndex = triAddr; // store triangle index for shading // Closest intersection not required => terminate. if (anyHit) // only true for shadow rays { nodeAddr = EntrypointSentinel; break; } // compute normal vector by taking the cross product of two edge vectors // because of Woop transformation, only one set of vectors works //trinormal = cross(Vec3f(v22.x, v22.y, v22.z), Vec3f(v11.x, v11.y, v11.z)); // works trinormal = cross(Vec3f(v11.x, v11.y, v11.z), Vec3f(v22.x, v22.y, v22.z)); } } } } // end triangle intersection // Another leaf was postponed => process it as well. leafAddr = nodeAddr; if (nodeAddr < 0) // nodeAddr is an actual leaf when < 0 { nodeAddr = *(int*)stackPtr; // pop stack stackPtr -= 4; // decrement with 4 bytes to get the next int (stackPtr is char*) } } // end leaf/triangle intersection loop } // end traversal loop (AABB and triangle intersection) // Remap intersected triangle index, and store the result. if (hitIndex != -1){ hitIndex = tex1Dfetch(triIndicesTexture, hitIndex); // remapping tri indices delayed until this point for performance reasons // (slow texture memory lookup in de triIndicesTexture) because multiple triangles per node can potentially be hit } hitTriIdx = hitIndex; hitdistance = hitT; } // union struct required for mapping pixel colours to OpenGL buffer union Colour // 4 bytes = 4 chars = 1 float { float c; uchar4 components; }; __device__ Vec3f renderKernel(hiprandState_t* randstate, const float4* HDRmap, const float4* gpuNodes, const float4* gpuTriWoops, const float4* gpuDebugTris, const int* gpuTriIndices, Vec3f& rayorig, Vec3f& raydir, unsigned int leafcount, unsigned int tricount) { Vec3f mask = Vec3f(1.0f, 1.0f, 1.0f); // colour mask Vec3f accucolor = Vec3f(0.0f, 0.0f, 0.0f); // accumulated colour Vec3f direct = Vec3f(0, 0, 0); for (int bounces = 0; bounces < 4; bounces++){ // iteration up to 4 bounces (instead of recursion in CPU code) int hitSphereIdx = -1; int hitTriIdx = -1; int bestTriIdx = -1; int geomtype = -1; float hitSphereDist = 1e20; float hitDistance = 1e20; float scene_t = 1e20; Vec3f objcol = Vec3f(0, 0, 0); Vec3f emit = Vec3f(0, 0, 0); Vec3f hitpoint; // intersection point Vec3f n; // normal Vec3f nl; // oriented normal Vec3f nextdir; // ray direction of next path segment Vec3f trinormal = Vec3f(0, 0, 0); Refl_t refltype; float ray_tmin = 0.00001f; // set to 0.01f when using refractive material float ray_tmax = 1e20; // intersect all triangles in the scene stored in BVH int debugbingo = 0; intersectBVHandTriangles(make_float4(rayorig.x, rayorig.y, rayorig.z, ray_tmin), make_float4(raydir.x, raydir.y, raydir.z, ray_tmax), bestTriIdx, hitDistance, debugbingo, trinormal, leafcount, tricount, false); //DEBUGintersectBVHandTriangles(make_float4(rayorig.x, rayorig.y, rayorig.z, ray_tmin), make_float4(raydir.x, raydir.y, raydir.z, ray_tmax), //gpuNodes, gpuTriWoops, gpuDebugTris, gpuTriIndices, bestTriIdx, hitDistance, debugbingo, trinormal, leafcount, tricount, false); // intersect all spheres in the scene // float3 required for sphere intersection (to avoid "dynamic allocation not allowed" error) float3 rayorig_flt3 = make_float3(rayorig.x, rayorig.y, rayorig.z); float3 raydir_flt3 = make_float3(raydir.x, raydir.y, raydir.z); float numspheres = sizeof(spheres) / sizeof(Sphere); for (int i = int(numspheres); i--;) // for all spheres in scene // keep track of distance from origin to closest intersection point if ((hitSphereDist = spheres[i].intersect(Ray(rayorig_flt3, raydir_flt3))) && hitSphereDist < scene_t && hitSphereDist > 0.01f){ scene_t = hitSphereDist; hitSphereIdx = i; geomtype = 1; } if (hitDistance < scene_t && hitDistance > ray_tmin) // triangle hit { scene_t = hitDistance; hitTriIdx = bestTriIdx; geomtype = 2; } // sky gradient colour //float t = 0.5f * (raydir.y + 1.2f); //Vec3f skycolor = Vec3f(1.0f, 1.0f, 1.0f) * (1.0f - t) + Vec3f(0.9f, 0.3f, 0.0f) * t; #ifdef HDR // HDR if (scene_t > 1e19) { // if ray misses scene, return sky // HDR environment map code based on Syntopia "Path tracing 3D fractals" // http://blog.hvidtfeldts.net/index.php/2015/01/path-tracing-3d-fractals/ // https://github.com/Syntopia/Fragmentarium/blob/master/Fragmentarium-Source/Examples/Include/IBL-Pathtracer.frag // GLSL code: // vec3 equirectangularMap(sampler2D sampler, vec3 dir) { // dir = normalize(dir); // vec2 longlat = vec2(atan(dir.y, dir.x) + RotateMap, acos(dir.z)); // return texture2D(sampler, longlat / vec2(2.0*PI, PI)).xyz; } // Convert (normalized) dir to spherical coordinates. float longlatX = atan2f(raydir.x, raydir.z); // Y is up, swap x for y and z for x longlatX = longlatX < 0.f ? longlatX + TWO_PI : longlatX; // wrap around full circle if negative float longlatY = acosf(raydir.y); // add RotateMap at some point, see Fragmentarium // map theta and phi to u and v texturecoordinates in [0,1] x [0,1] range float offsetY = 0.5f; float u = longlatX / TWO_PI; // +offsetY; float v = longlatY / M_PI ; // map u, v to integer coordinates int u2 = (int)(u * HDRwidth); //% HDRwidth; int v2 = (int)(v * HDRheight); // % HDRheight; // compute the texel index in the HDR map int HDRtexelidx = u2 + v2 * HDRwidth; //float4 HDRcol = HDRmap[HDRtexelidx]; float4 HDRcol = tex1Dfetch(HDRtexture, HDRtexelidx); // fetch from texture Vec3f HDRcol2 = Vec3f(HDRcol.x, HDRcol.y, HDRcol.z); emit = HDRcol2 * 2.0f; accucolor += (mask * emit); return accucolor; } #endif // end of HDR // SPHERES: if (geomtype == 1){ Sphere &hitsphere = spheres[hitSphereIdx]; // hit object with closest intersection hitpoint = rayorig + raydir * scene_t; // intersection point on object n = Vec3f(hitpoint.x - hitsphere.pos.x, hitpoint.y - hitsphere.pos.y, hitpoint.z - hitsphere.pos.z); // normal n.normalize(); nl = dot(n, raydir) < 0 ? n : n * -1; // correctly oriented normal objcol = Vec3f(hitsphere.col.x, hitsphere.col.y, hitsphere.col.z); // object colour emit = Vec3f(hitsphere.emi.x, hitsphere.emi.y, hitsphere.emi.z); // object emission refltype = hitsphere.refl; accucolor += (mask * emit); } // TRIANGLES: if (geomtype == 2){ //pBestTri = &pTriangles[triangle_id]; hitpoint = rayorig + raydir * scene_t; // intersection point // float4 normal = tex1Dfetch(triNormalsTexture, pBestTriIdx); n = trinormal; n.normalize(); nl = dot(n, raydir) < 0 ? n : n * -1; // correctly oriented normal //Vec3f colour = hitTriIdx->_colorf; Vec3f colour = Vec3f(0.9f, 0.3f, 0.0f); // hardcoded triangle colour .9f, 0.3f, 0.0f refltype = COAT; // objectmaterial objcol = colour; emit = Vec3f(0.0, 0.0, 0); // object emission accucolor += (mask * emit); } // basic material system, all parameters are hard-coded (such as phong exponent, index of refraction) // diffuse material, based on smallpt by Kevin Beason if (refltype == DIFF){ // pick two random numbers float phi = 2 * M_PI * hiprand_uniform(randstate); float r2 = hiprand_uniform(randstate); float r2s = sqrtf(r2); // compute orthonormal coordinate frame uvw with hitpoint as origin Vec3f w = nl; w.normalize(); Vec3f u = cross((fabs(w.x) > .1 ? Vec3f(0, 1, 0) : Vec3f(1, 0, 0)), w); u.normalize(); Vec3f v = cross(w, u); // compute cosine weighted random ray direction on hemisphere nextdir = u*cosf(phi)*r2s + v*sinf(phi)*r2s + w*sqrtf(1 - r2); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // scene size dependent // multiply mask with colour of object mask *= objcol; } // end diffuse material // Phong metal material from "Realistic Ray Tracing", P. Shirley if (refltype == METAL){ // compute random perturbation of ideal reflection vector // the higher the phong exponent, the closer the perturbed vector is to the ideal reflection direction float phi = 2 * M_PI * hiprand_uniform(randstate); float r2 = hiprand_uniform(randstate); float phongexponent = 30; float cosTheta = powf(1 - r2, 1.0f / (phongexponent + 1)); float sinTheta = sqrtf(1 - cosTheta * cosTheta); // create orthonormal basis uvw around reflection vector with hitpoint as origin // w is ray direction for ideal reflection Vec3f w = raydir - n * 2.0f * dot(n, raydir); w.normalize(); Vec3f u = cross((fabs(w.x) > .1 ? Vec3f(0, 1, 0) : Vec3f(1, 0, 0)), w); u.normalize(); Vec3f v = cross(w, u); // v is already normalised because w and u are normalised // compute cosine weighted random ray direction on hemisphere nextdir = u * cosf(phi) * sinTheta + v * sinf(phi) * sinTheta + w * cosTheta; nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.0001f; // scene size dependent // multiply mask with colour of object mask *= objcol; } // ideal specular reflection (mirror) if (refltype == SPEC){ // compute relfected ray direction according to Snell's law nextdir = raydir - n * dot(n, raydir) * 2.0f; nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // multiply mask with colour of object mask *= objcol; } // COAT material based on https://github.com/peterkutz/GPUPathTracer // randomly select diffuse or specular reflection // looks okay-ish but inaccurate (no Fresnel calculation yet) if (refltype == COAT){ float rouletteRandomFloat = hiprand_uniform(randstate); float threshold = 0.05f; Vec3f specularColor = Vec3f(1, 1, 1); // hard-coded bool reflectFromSurface = (rouletteRandomFloat < threshold); //computeFresnel(make_Vec3f(n.x, n.y, n.z), incident, incidentIOR, transmittedIOR, reflectionDirection, transmissionDirection).reflectionCoefficient); if (reflectFromSurface) { // calculate perfectly specular reflection // Ray reflected from the surface. Trace a ray in the reflection direction. // TODO: Use Russian roulette instead of simple multipliers! // (Selecting between diffuse sample and no sample (absorption) in this case.) mask *= specularColor; nextdir = raydir - n * 2.0f * dot(n, raydir); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // scene size dependent } else { // calculate perfectly diffuse reflection float r1 = 2 * M_PI * hiprand_uniform(randstate); float r2 = hiprand_uniform(randstate); float r2s = sqrtf(r2); // compute orthonormal coordinate frame uvw with hitpoint as origin Vec3f w = nl; w.normalize(); Vec3f u = cross((fabs(w.x) > .1 ? Vec3f(0, 1, 0) : Vec3f(1, 0, 0)), w); u.normalize(); Vec3f v = cross(w, u); // compute cosine weighted random ray direction on hemisphere nextdir = u*cosf(r1)*r2s + v*sinf(r1)*r2s + w*sqrtf(1 - r2); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // // scene size dependent // multiply mask with colour of object mask *= objcol; } } // end COAT // perfectly refractive material (glass, water) // set ray_tmin to 0.01 when using refractive material if (refltype == REFR){ bool into = dot(n, nl) > 0; // is ray entering or leaving refractive material? float nc = 1.0f; // Index of Refraction air float nt = 1.4f; // Index of Refraction glass/water float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials float ddn = dot(raydir, nl); float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn); if (cos2t < 0.0f) // total internal reflection { nextdir = raydir - n * 2.0f * dot(n, raydir); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // scene size dependent } else // cos2t > 0 { // compute direction of transmission ray Vec3f tdir = raydir * nnt; tdir -= n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t))); tdir.normalize(); float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc); float c = 1.f - (into ? -ddn : dot(tdir, n)); float Re = R0 + (1.f - R0) * c * c * c * c * c; float Tr = 1 - Re; // Transmission float P = .25f + .5f * Re; float RP = Re / P; float TP = Tr / (1.f - P); // randomly choose reflection or transmission ray if (hiprand_uniform(randstate) < 0.2) // reflection ray { mask *= RP; nextdir = raydir - n * 2.0f * dot(n, raydir); nextdir.normalize(); hitpoint += nl * 0.001f; // scene size dependent } else // transmission ray { mask *= TP; nextdir = tdir; nextdir.normalize(); hitpoint += nl * 0.001f; // epsilon must be small to avoid artefacts } } } // set up origin and direction of next path segment rayorig = hitpoint; raydir = nextdir; } // end bounces for loop return accucolor; } __global__ void PathTracingKernel(Vec3f* output, Vec3f* accumbuffer, const float4* HDRmap, const float4* gpuNodes, const float4* gpuTriWoops, const float4* gpuDebugTris, const int* gpuTriIndices, unsigned int framenumber, unsigned int hashedframenumber, unsigned int leafcount, unsigned int tricount, const Camera* cudaRendercam) { // assign a CUDA thread to every pixel by using the threadIndex unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // global threadId, see richiesams blogspot int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //int pixelx = threadId % scrwidth; // pixel x-coordinate on screen //int pixely = threadId / scrwidth; // pixel y-coordintate on screen // create random number generator and initialise with hashed frame number, see RichieSams blogspot hiprandState_t randState; // state of the random number generator, to prevent repetition hiprand_init(hashedframenumber + threadId, 0, 0, &randState); Vec3f finalcol; // final pixel colour finalcol = Vec3f(0.0f, 0.0f, 0.0f); // reset colour to zero for every pixel //Vec3f rendercampos = Vec3f(0, 0.2, 4.6f); Vec3f rendercampos = Vec3f(cudaRendercam->position.x, cudaRendercam->position.y, cudaRendercam->position.z); int i = (scrheight - y - 1) * scrwidth + x; // pixel index in buffer int pixelx = x; // pixel x-coordinate on screen int pixely = scrheight - y - 1; // pixel y-coordintate on screen Vec3f camdir = Vec3f(0, -0.042612, -1); camdir.normalize(); Vec3f cx = Vec3f(scrwidth * .5135f / scrheight, 0.0f, 0.0f); // ray direction offset along X-axis Vec3f cy = (cross(cx, camdir)).normalize() * .5135f; // ray dir offset along Y-axis, .5135 is FOV angle for (int s = 0; s < samps; s++) { // compute primary ray direction // use camera view of current frame (transformed on CPU side) to create local orthonormal basis Vec3f rendercamview = Vec3f(cudaRendercam->view.x, cudaRendercam->view.y, cudaRendercam->view.z); rendercamview.normalize(); // view is already supposed to be normalized, but normalize it explicitly just in case. Vec3f rendercamup = Vec3f(cudaRendercam->up.x, cudaRendercam->up.y, cudaRendercam->up.z); rendercamup.normalize(); Vec3f horizontalAxis = cross(rendercamview, rendercamup); horizontalAxis.normalize(); // Important to normalize! Vec3f verticalAxis = cross(horizontalAxis, rendercamview); verticalAxis.normalize(); // verticalAxis is normalized by default, but normalize it explicitly just for good measure. Vec3f middle = rendercampos + rendercamview; Vec3f horizontal = horizontalAxis * tanf(cudaRendercam->fov.x * 0.5 * (M_PI / 180)); // Treating FOV as the full FOV, not half, so multiplied by 0.5 Vec3f vertical = verticalAxis * tanf(-cudaRendercam->fov.y * 0.5 * (M_PI / 180)); // Treating FOV as the full FOV, not half, so multiplied by 0.5 // anti-aliasing // calculate center of current pixel and add random number in X and Y dimension // based on https://github.com/peterkutz/GPUPathTracer float jitterValueX = hiprand_uniform(&randState) - 0.5; float jitterValueY = hiprand_uniform(&randState) - 0.5; float sx = (jitterValueX + pixelx) / (cudaRendercam->resolution.x - 1); float sy = (jitterValueY + pixely) / (cudaRendercam->resolution.y - 1); // compute pixel on screen Vec3f pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1)); Vec3f pointOnImagePlane = rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * cudaRendercam->focalDistance); // Important for depth of field! // calculation of depth of field / camera aperture // based on https://github.com/peterkutz/GPUPathTracer Vec3f aperturePoint = Vec3f(0, 0, 0); if (cudaRendercam->apertureRadius > 0.00001) { // the small number is an epsilon value. // generate random numbers for sampling a point on the aperture float random1 = hiprand_uniform(&randState); float random2 = hiprand_uniform(&randState); // randomly pick a point on the circular aperture float angle = TWO_PI * random1; float distance = cudaRendercam->apertureRadius * sqrtf(random2); float apertureX = cos(angle) * distance; float apertureY = sin(angle) * distance; aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY); } else { // zero aperture aperturePoint = rendercampos; } // calculate ray direction of next ray in path Vec3f apertureToImagePlane = pointOnImagePlane - aperturePoint; apertureToImagePlane.normalize(); // ray direction needs to be normalised // ray direction Vec3f rayInWorldSpace = apertureToImagePlane; rayInWorldSpace.normalize(); // ray origin Vec3f originInWorldSpace = aperturePoint; finalcol += renderKernel(&randState, HDRmap, gpuNodes, gpuTriWoops, gpuDebugTris, gpuTriIndices, originInWorldSpace, rayInWorldSpace, leafcount, tricount) * (1.0f / samps); } // add pixel colour to accumulation buffer (accumulates all samples) accumbuffer[i] += finalcol; // averaged colour: divide colour by the number of calculated frames so far Vec3f tempcol = accumbuffer[i] / framenumber; Colour fcolour; Vec3f colour = Vec3f(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f)); // convert from 96-bit to 24-bit colour + perform gamma correction fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / 2.2f) * 255), (unsigned char)(powf(colour.y, 1 / 2.2f) * 255), (unsigned char)(powf(colour.z, 1 / 2.2f) * 255), 1); // store pixel coordinates and pixelcolour in OpenGL readable outputbuffer output[i] = Vec3f(x, y, fcolour.c); } bool firstTime = true; // the gateway to CUDA, called from C++ (in void disp() in main.cpp) void cudaRender(const float4* nodes, const float4* triWoops, const float4* debugTris, const int* triInds, Vec3f* outputbuf, Vec3f* accumbuf, const float4* HDRmap, const unsigned int framenumber, const unsigned int hashedframenumber, const unsigned int nodeSize, const unsigned int leafnodecnt, const unsigned int tricnt, const Camera* cudaRenderCam){ if (firstTime) { // if this is the first time cudarender() is called, // bind the scene data to CUDA textures! firstTime = false; hipChannelFormatDesc channel0desc = hipCreateChannelDesc<int>(); hipBindTexture(NULL, &triIndicesTexture, triInds, &channel0desc, (tricnt * 3 + leafnodecnt) * sizeof(int)); // is tricnt wel juist?? hipChannelFormatDesc channel1desc = hipCreateChannelDesc<float4>(); hipBindTexture(NULL, &triWoopTexture, triWoops, &channel1desc, (tricnt * 3 + leafnodecnt) * sizeof(float4)); hipChannelFormatDesc channel3desc = hipCreateChannelDesc<float4>(); hipBindTexture(NULL, &bvhNodesTexture, nodes, &channel3desc, nodeSize * sizeof(float4)); HDRtexture.filterMode = hipFilterModeLinear; hipChannelFormatDesc channel4desc = hipCreateChannelDesc<float4>(); hipBindTexture(NULL, &HDRtexture, HDRmap, &channel4desc, HDRwidth * HDRheight * sizeof(float4)); // 2k map: printf("CudaWoopTriangles texture initialised, tri count: %d\n", tricnt); } dim3 block(16, 16, 1); // dim3 CUDA specific syntax, block and grid are required to schedule CUDA threads over streaming multiprocessors dim3 grid(scrwidth / block.x, scrheight / block.y, 1); // Configure grid and block sizes: int threadsPerBlock = 256; // Compute the number of blocks required, performing a ceiling operation to make sure there are enough: int fullBlocksPerGrid = ((scrwidth * scrheight) + threadsPerBlock - 1) / threadsPerBlock; // <<<fullBlocksPerGrid, threadsPerBlock>>> PathTracingKernel << <grid, block >> >(outputbuf, accumbuf, HDRmap, nodes, triWoops, debugTris, triInds, framenumber, hashedframenumber, leafnodecnt, tricnt, cudaRenderCam); // texdata, texoffsets }
d247ca8e0d2285695331919131198aaaeeacf1d4.cu
// BVH traversal kernels based on "Understanding the #include <cuda.h> #include <math_functions.h> #include <vector_types.h> #include <vector_functions.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include "CudaRenderKernel.h" #include "stdio.h" #include <curand.h> #include <curand_kernel.h> #include "cutil_math.h" // required for float3 #define STACK_SIZE 64 // Size of the traversal stack in local memory. #define M_PI 3.1415926535897932384626422832795028841971f #define TWO_PI 6.2831853071795864769252867665590057683943f #define DYNAMIC_FETCH_THRESHOLD 20 // If fewer than this active, fetch new rays #define samps 1 #define F32_MIN (1.175494351e-38f) #define F32_MAX (3.402823466e+38f) #define HDRwidth 3200 #define HDRheight 1600 #define HDR #define EntrypointSentinel 0x76543210 #define MaxBlockHeight 6 enum Refl_t { DIFF, METAL, SPEC, REFR, COAT }; // material types // CUDA textures containing scene data texture<float4, 1, cudaReadModeElementType> bvhNodesTexture; texture<float4, 1, cudaReadModeElementType> triWoopTexture; texture<float4, 1, cudaReadModeElementType> triNormalsTexture; texture<int, 1, cudaReadModeElementType> triIndicesTexture; texture<float4, 1, cudaReadModeElementType> HDRtexture; __device__ inline Vec3f absmax3f(const Vec3f& v1, const Vec3f& v2){ return Vec3f(v1.x*v1.x > v2.x*v2.x ? v1.x : v2.x, v1.y*v1.y > v2.y*v2.y ? v1.y : v2.y, v1.z*v1.z > v2.z*v2.z ? v1.z : v2.z); } struct Ray { float3 orig; // ray origin float3 dir; // ray direction __device__ Ray(float3 o_, float3 d_) : orig(o_), dir(d_) {} }; struct Sphere { float rad; // radius float3 pos, emi, col; // position, emission, color Refl_t refl; // reflection type (DIFFuse, SPECular, REFRactive) __device__ float intersect(const Ray &r) const { // returns distance, 0 if nohit // ray/sphere intersection float3 op = pos - r.orig; float t, epsilon = 0.01f; float b = dot(op, r.dir); float disc = b*b - dot(op, op) + rad*rad; // discriminant of quadratic formula if (disc<0) return 0; else disc = sqrtf(disc); return (t = b - disc) > epsilon ? t : ((t = b + disc) > epsilon ? t : 0.0f); } }; __constant__ Sphere spheres[] = { // sun //{ 10000, { 50.0f, 40.8f, -1060 }, { 0.3, 0.3, 0.3 }, { 0.175f, 0.175f, 0.25f }, DIFF }, // sky 0.003, 0.003, 0.003 //{ 4.5, { 0.0f, 12.5, 0 }, { 6, 4, 1 }, { .6f, .6f, 0.6f }, DIFF }, /// lightsource { 10000.02, { 50.0f, -10001.35, 0 }, { 0.0, 0.0, 0 }, { 0.3f, 0.3f, 0.3f }, DIFF }, // ground 300/-301.0 //{ 10000, { 50.0f, -10000.1, 0 }, { 0, 0, 0 }, { 0.3f, 0.3f, 0.3f }, DIFF }, // double shell to prevent light leaking //{ 110000, { 50.0f, -110048.5, 0 }, { 3.6, 2.0, 0.2 }, { 0.f, 0.f, 0.f }, DIFF }, // horizon brightener //{ 0.5, { 30.0f, 180.5, 42 }, { 0, 0, 0 }, { .6f, .6f, 0.6f }, DIFF }, // small sphere 1 //{ 0.8, { 2.0f, 0.f, 0 }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.8f }, SPEC }, // small sphere 2 //{ 0.8, { -3.0f, 0.f, 0 }, { 0.0, 0.0, 0.0 }, { 0.0f, 0.0f, 0.2f }, COAT }, // small sphere 2 { 2.5, { -6.0f, 0.5f, 0.0f }, { 0.0, 0.0, 0.0 }, { 0.9f, 0.9f, 0.9f }, SPEC }, // small sphere 2 //{ 0.6, { -10.0f, -2.f, 1.0f }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.8f }, DIFF }, // small sphere 2 //{ 0.8, { -1.0f, -0.7f, 4.0f }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.8f }, REFR }, // small sphere 2 //{ 9.4, { 9.0f, 0.f, -9.0f }, { 0.0, 0.0, 0.0 }, { 0.8f, 0.8f, 0.f }, DIFF }, // small sphere 2 //{ 22, { 105.0f, 22, 24 }, { 0, 0, 0 }, { 0.9f, 0.9f, 0.9f }, DIFF }, // small sphere 3 }; // RAY BOX INTERSECTION ROUTINES // Experimentally determined best mix of float/int/video minmax instructions for Kepler. // float c0min = spanBeginKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); // Tesla does max4(min, min, min, tmin) // float c0max = spanEndKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); // Tesla does min4(max, max, max, tmax) // Perform min/max operations in hardware // Using Kepler's video instructions, see http://docs.nvidia.com/cuda/parallel-thread-execution/#axzz3jbhbcTZf // : "=r"(v) overwrites v and puts it in a register // see https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html __device__ __inline__ int min_min(int a, int b, int c) { int v; asm("vmin.s32.s32.s32.min %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ int min_max(int a, int b, int c) { int v; asm("vmin.s32.s32.s32.max %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ int max_min(int a, int b, int c) { int v; asm("vmax.s32.s32.s32.min %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ int max_max(int a, int b, int c) { int v; asm("vmax.s32.s32.s32.max %0, %1, %2, %3;" : "=r"(v) : "r"(a), "r"(b), "r"(c)); return v; } __device__ __inline__ float fmin_fmin(float a, float b, float c) { return __int_as_float(min_min(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float fmin_fmax(float a, float b, float c) { return __int_as_float(min_max(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float fmax_fmin(float a, float b, float c) { return __int_as_float(max_min(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float fmax_fmax(float a, float b, float c) { return __int_as_float(max_max(__float_as_int(a), __float_as_int(b), __float_as_int(c))); } __device__ __inline__ float spanBeginKepler(float a0, float a1, float b0, float b1, float c0, float c1, float d){ return fmax_fmax(fminf(a0, a1), fminf(b0, b1), fmin_fmax(c0, c1, d)); } __device__ __inline__ float spanEndKepler(float a0, float a1, float b0, float b1, float c0, float c1, float d) { return fmin_fmin(fmaxf(a0, a1), fmaxf(b0, b1), fmax_fmin(c0, c1, d)); } // standard ray box intersection routines (for debugging purposes only) // based on Intersect::RayBox() in original Aila/Laine code __device__ __inline__ float spanBeginKepler2(float lo_x, float hi_x, float lo_y, float hi_y, float lo_z, float hi_z, float d){ Vec3f t0 = Vec3f(lo_x, lo_y, lo_z); Vec3f t1 = Vec3f(hi_x, hi_y, hi_z); Vec3f realmin = min3f(t0, t1); float raybox_tmin = realmin.max(); // maxmin //return Vec2f(tmin, tmax); return raybox_tmin; } __device__ __inline__ float spanEndKepler2(float lo_x, float hi_x, float lo_y, float hi_y, float lo_z, float hi_z, float d){ Vec3f t0 = Vec3f(lo_x, lo_y, lo_z); Vec3f t1 = Vec3f(hi_x, hi_y, hi_z); Vec3f realmax = max3f(t0, t1); float raybox_tmax = realmax.min(); /// minmax //return Vec2f(tmin, tmax); return raybox_tmax; } __device__ __inline__ void swap2(int& a, int& b){ int temp = a; a = b; b = temp;} // standard ray triangle intersection routines (for debugging purposes only) // based on Intersect::RayTriangle() in original Aila/Laine code __device__ Vec3f intersectRayTriangle(const Vec3f& v0, const Vec3f& v1, const Vec3f& v2, const Vec4f& rayorig, const Vec4f& raydir){ const Vec3f rayorig3f = Vec3f(rayorig.x, rayorig.y, rayorig.z); const Vec3f raydir3f = Vec3f(raydir.x, raydir.y, raydir.z); const float EPSILON = 0.00001f; // works better const Vec3f miss(F32_MAX, F32_MAX, F32_MAX); float raytmin = rayorig.w; float raytmax = raydir.w; Vec3f edge1 = v1 - v0; Vec3f edge2 = v2 - v0; Vec3f tvec = rayorig3f - v0; Vec3f pvec = cross(raydir3f, edge2); float det = dot(edge1, pvec); float invdet = 1.0f / det; float u = dot(tvec, pvec) * invdet; Vec3f qvec = cross(tvec, edge1); float v = dot(raydir3f, qvec) * invdet; if (det > EPSILON) { if (u < 0.0f || u > 1.0f) return miss; // 1.0 want = det * 1/det if (v < 0.0f || (u + v) > 1.0f) return miss; // if u and v are within these bounds, continue and go to float t = dot(... } else if (det < -EPSILON) { if (u > 0.0f || u < 1.0f) return miss; if (v > 0.0f || (u + v) < 1.0f) return miss; // else continue } else // if det is not larger (more positive) than EPSILON or not smaller (more negative) than -EPSILON, there is a "miss" return miss; float t = dot(edge2, qvec) * invdet; if (t > raytmin && t < raytmax) return Vec3f(u, v, t); // otherwise (t < raytmin or t > raytmax) miss return miss; } // modified intersection routine (uses regular instead of woopified triangles) for debugging purposes __device__ void DEBUGintersectBVHandTriangles(const float4 rayorig, const float4 raydir, const float4* gpuNodes, const float4* gpuTriWoops, const float4* gpuDebugTris, const int* gpuTriIndices, int& hitTriIdx, float& hitdistance, int& debugbingo, Vec3f& trinormal, int leafcount, int tricount, bool needClosestHit){ int traversalStack[STACK_SIZE]; float origx, origy, origz; // Ray origin. float dirx, diry, dirz; // Ray direction. float tmin; // t-value from which the ray starts. Usually 0. float idirx, idiry, idirz; // 1 / dir float oodx, oody, oodz; // orig / dir char* stackPtr; int leafAddr; int nodeAddr; int hitIndex; float hitT; int threadId1; threadId1 = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y)); origx = rayorig.x; origy = rayorig.y; origz = rayorig.z; dirx = raydir.x; diry = raydir.y; dirz = raydir.z; tmin = rayorig.w; // ooeps is very small number, used instead of raydir xyz component when that component is near zero float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number idirx = 1.0f / (fabsf(raydir.x) > ooeps ? raydir.x : copysignf(ooeps, raydir.x)); // inverse ray direction idiry = 1.0f / (fabsf(raydir.y) > ooeps ? raydir.y : copysignf(ooeps, raydir.y)); // inverse ray direction idirz = 1.0f / (fabsf(raydir.z) > ooeps ? raydir.z : copysignf(ooeps, raydir.z)); // inverse ray direction oodx = origx * idirx; // ray origin / ray direction oody = origy * idiry; // ray origin / ray direction oodz = origz * idirz; // ray origin / ray direction traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 is 1985229328 in decimal stackPtr = (char*)&traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel leafAddr = 0; // No postponed leaf. nodeAddr = 0; // Start from the root. hitIndex = -1; // No triangle intersected so far. hitT = raydir.w; while (nodeAddr != EntrypointSentinel) // EntrypointSentinel = 0x76543210 { // Traverse internal nodes until all SIMD lanes have found a leaf. bool searchingLeaf = true; // flag required to increase efficiency of threads in warp while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel) { float4* ptr = (float4*)((char*)gpuNodes + nodeAddr); float4 n0xy = ptr[0]; // childnode 0, xy-bounds (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) float4 n1xy = ptr[1]; // childnode 1. xy-bounds (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y) float4 nz = ptr[2]; // childnodes 0 and 1, z-bounds(c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) // ptr[3] contains indices to 2 childnodes in case of innernode, see below // (childindex = size of array during building, see CudaBVH.cpp) // compute ray intersections with BVH node bounding box float c0lox = n0xy.x * idirx - oodx; // n0xy.x = c0.lo.x, child 0 minbound x float c0hix = n0xy.y * idirx - oodx; // n0xy.y = c0.hi.x, child 0 maxbound x float c0loy = n0xy.z * idiry - oody; // n0xy.z = c0.lo.y, child 0 minbound y float c0hiy = n0xy.w * idiry - oody; // n0xy.w = c0.hi.y, child 0 maxbound y float c0loz = nz.x * idirz - oodz; // nz.x = c0.lo.z, child 0 minbound z float c0hiz = nz.y * idirz - oodz; // nz.y = c0.hi.z, child 0 maxbound z float c1loz = nz.z * idirz - oodz; // nz.z = c1.lo.z, child 1 minbound z float c1hiz = nz.w * idirz - oodz; // nz.w = c1.hi.z, child 1 maxbound z float c0min = spanBeginKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); // Tesla does max4(min, min, min, tmin) float c0max = spanEndKepler2(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); // Tesla does min4(max, max, max, tmax) float c1lox = n1xy.x * idirx - oodx; // n1xy.x = c1.lo.x, child 1 minbound x float c1hix = n1xy.y * idirx - oodx; // n1xy.y = c1.hi.x, child 1 maxbound x float c1loy = n1xy.z * idiry - oody; // n1xy.z = c1.lo.y, child 1 minbound y float c1hiy = n1xy.w * idiry - oody; // n1xy.w = c1.hi.y, child 1 maxbound y float c1min = spanBeginKepler2(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin); float c1max = spanEndKepler2(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT); float ray_tmax = 1e20; bool traverseChild0 = (c0min <= c0max) && (c0min >= tmin) && (c0min <= ray_tmax); bool traverseChild1 = (c1min <= c1max) && (c1min >= tmin) && (c1min <= ray_tmax); if (!traverseChild0 && !traverseChild1) { nodeAddr = *(int*)stackPtr; // fetch next node by popping stack stackPtr -= 4; // popping decrements stack by 4 bytes (because stackPtr is a pointer to char) } // Otherwise => fetch child pointers. else // one or both children intersected { int2 cnodes = *(int2*)&ptr[3]; // set nodeAddr equal to intersected childnode (first childnode when both children are intersected) nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y; // Both children were intersected => push the farther one on the stack. if (traverseChild0 && traverseChild1) // store closest child in nodeAddr, swap if necessary { if (c1min < c0min) swap2(nodeAddr, cnodes.y); stackPtr += 4; // pushing increments stack by 4 bytes (stackPtr is a pointer to char) *(int*)stackPtr = cnodes.y; // push furthest node on the stack } } // First leaf => postpone and continue traversal. // leafnodes have a negative index to distinguish them from inner nodes // if nodeAddr less than 0 -> nodeAddr is a leaf if (nodeAddr < 0 && leafAddr >= 0) // if leafAddr >= 0 -> no leaf found yet (first leaf) { searchingLeaf = false; // required for warp efficiency leafAddr = nodeAddr; nodeAddr = *(int*)stackPtr; // pops next node from stack stackPtr -= 4; // decrement by 4 bytes (stackPtr is a pointer to char) } // All SIMD lanes have found a leaf => process them. // NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;". // tried everything with CUDA 4.2 but always got several redundant instructions. // if (!searchingLeaf){ break; } // if (!__any(searchingLeaf)) break; // "__any" keyword: if none of the threads is searching a leaf, in other words // if all threads in the warp found a leafnode, then break from while loop and go to triangle intersection // if(!__any(leafAddr >= 0)) /// als leafAddr in PTX code >= 0, dan is het geen echt leafNode // break; unsigned int mask; // mask replaces searchingLeaf in PTX code asm("{\n" " .reg .pred p; \n" "setp.ge.s32 p, %1, 0; \n" "vote.ballot.b32 %0,p; \n" "}" : "=r"(mask) : "r"(leafAddr)); if (!mask) break; } /////////////////////////////////////// /// LEAF NODE / TRIANGLE INTERSECTION /////////////////////////////////////// while (leafAddr < 0) // if leafAddr is negative, it points to an actual leafnode (when positive or 0 it's an innernode { // leafAddr is stored as negative number, see cidx[i] = ~triWoopData.getSize(); in CudaBVH.cpp for (int triAddr = ~leafAddr;; triAddr += 3) { // no defined upper limit for loop, continues until leaf terminator code 0x80000000 is encountered // Read first 16 bytes of the triangle. // fetch first triangle vertex float4 v0f = gpuDebugTris[triAddr + 0]; // End marker 0x80000000 (= negative zero) => all triangles in leaf processed. --> terminate if (__float_as_int(v0f.x) == 0x80000000) break; float4 v1f = gpuDebugTris[triAddr + 1]; float4 v2f = gpuDebugTris[triAddr + 2]; const Vec3f v0 = Vec3f(v0f.x, v0f.y, v0f.z); const Vec3f v1 = Vec3f(v1f.x, v1f.y, v1f.z); const Vec3f v2 = Vec3f(v2f.x, v2f.y, v2f.z); // convert float4 to Vec4f Vec4f rayorigvec4f = Vec4f(rayorig.x, rayorig.y, rayorig.z, rayorig.w); Vec4f raydirvec4f = Vec4f(raydir.x, raydir.y, raydir.z, raydir.w); Vec3f bary = intersectRayTriangle(v0, v1, v2, rayorigvec4f, raydirvec4f); float t = bary.z; // hit distance along ray if (t > tmin && t < hitT) // if there is a miss, t will be larger than hitT (ray.tmax) { hitIndex = triAddr; hitT = t; /// keeps track of closest hitpoint trinormal = cross(v0 - v1, v0 - v2); if (!needClosestHit){ // shadow rays only require "any" hit with scene geometry, not the closest one nodeAddr = EntrypointSentinel; break; } } } // triangle // Another leaf was postponed => process it as well. leafAddr = nodeAddr; if (nodeAddr < 0) { nodeAddr = *(int*)stackPtr; // pop stack stackPtr -= 4; // decrement with 4 bytes to get the next int (stackPtr is char*) } } // end leaf/triangle intersection loop } // end of node traversal loop // Remap intersected triangle index, and store the result. if (hitIndex != -1){ // remapping tri indices delayed until this point for performance reasons // (slow global memory lookup in de gpuTriIndices array) because multiple triangles per node can potentially be hit hitIndex = gpuTriIndices[hitIndex]; } hitTriIdx = hitIndex; hitdistance = hitT; } __device__ void intersectBVHandTriangles(const float4 rayorig, const float4 raydir, int& hitTriIdx, float& hitdistance, int& debugbingo, Vec3f& trinormal, int leafcount, int tricount, bool anyHit) { // assign a CUDA thread to every pixel by using the threadIndex // global threadId, see richiesams blogspot int thread_index = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; /////////////////////////////////////////// //// KEPLER KERNEL /////////////////////////////////////////// // BVH layout Compact2 for Kepler int traversalStack[STACK_SIZE]; // Live state during traversal, stored in registers. int rayidx; // not used, can be removed float origx, origy, origz; // Ray origin. float dirx, diry, dirz; // Ray direction. float tmin; // t-value from which the ray starts. Usually 0. float idirx, idiry, idirz; // 1 / ray direction float oodx, oody, oodz; // ray origin / ray direction char* stackPtr; // Current position in traversal stack. int leafAddr; // If negative, then first postponed leaf, non-negative if no leaf (innernode). int nodeAddr; int hitIndex; // Triangle index of the closest intersection, -1 if none. float hitT; // t-value of the closest intersection. int threadId1; // ipv rayidx // Initialize (stores local variables in registers) { // Pick ray index. threadId1 = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * (blockIdx.x + gridDim.x * blockIdx.y)); // Fetch ray. // required when tracing ray batches // float4 o = rays[rayidx * 2 + 0]; // float4 d = rays[rayidx * 2 + 1]; //__shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer. origx = rayorig.x; origy = rayorig.y; origz = rayorig.z; dirx = raydir.x; diry = raydir.y; dirz = raydir.z; tmin = rayorig.w; // ooeps is very small number, used instead of raydir xyz component when that component is near zero float ooeps = exp2f(-80.0f); // Avoid div by zero, returns 1/2^80, an extremely small number idirx = 1.0f / (fabsf(raydir.x) > ooeps ? raydir.x : copysignf(ooeps, raydir.x)); // inverse ray direction idiry = 1.0f / (fabsf(raydir.y) > ooeps ? raydir.y : copysignf(ooeps, raydir.y)); // inverse ray direction idirz = 1.0f / (fabsf(raydir.z) > ooeps ? raydir.z : copysignf(ooeps, raydir.z)); // inverse ray direction oodx = origx * idirx; // ray origin / ray direction oody = origy * idiry; // ray origin / ray direction oodz = origz * idirz; // ray origin / ray direction // Setup traversal + initialisation traversalStack[0] = EntrypointSentinel; // Bottom-most entry. 0x76543210 (1985229328 in decimal) stackPtr = (char*)&traversalStack[0]; // point stackPtr to bottom of traversal stack = EntryPointSentinel leafAddr = 0; // No postponed leaf. nodeAddr = 0; // Start from the root. hitIndex = -1; // No triangle intersected so far. hitT = raydir.w; // tmax } // Traversal loop. while (nodeAddr != EntrypointSentinel) { // Traverse internal nodes until all SIMD lanes have found a leaf. bool searchingLeaf = true; // required for warp efficiency while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel) { // Fetch AABBs of the two child nodes. // nodeAddr is an offset in number of bytes (char) in gpuNodes array float4 n0xy = tex1Dfetch(bvhNodesTexture, nodeAddr); // childnode 0, xy-bounds (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) float4 n1xy = tex1Dfetch(bvhNodesTexture, nodeAddr + 1); // childnode 1, xy-bounds (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y) float4 nz = tex1Dfetch(bvhNodesTexture, nodeAddr + 2); // childnode 0 and 1, z-bounds (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) float4 tmp = tex1Dfetch(bvhNodesTexture, nodeAddr + 3); // contains indices to 2 childnodes in case of innernode, see below int2 cnodes = *(int2*)&tmp; // cast first two floats to int // (childindex = size of array during building, see CudaBVH.cpp) // compute ray intersections with BVH node bounding box /// RAY BOX INTERSECTION // Intersect the ray against the child nodes. float c0lox = n0xy.x * idirx - oodx; // n0xy.x = c0.lo.x, child 0 minbound x float c0hix = n0xy.y * idirx - oodx; // n0xy.y = c0.hi.x, child 0 maxbound x float c0loy = n0xy.z * idiry - oody; // n0xy.z = c0.lo.y, child 0 minbound y float c0hiy = n0xy.w * idiry - oody; // n0xy.w = c0.hi.y, child 0 maxbound y float c0loz = nz.x * idirz - oodz; // nz.x = c0.lo.z, child 0 minbound z float c0hiz = nz.y * idirz - oodz; // nz.y = c0.hi.z, child 0 maxbound z float c1loz = nz.z * idirz - oodz; // nz.z = c1.lo.z, child 1 minbound z float c1hiz = nz.w * idirz - oodz; // nz.w = c1.hi.z, child 1 maxbound z float c0min = spanBeginKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); // Tesla does max4(min, min, min, tmin) float c0max = spanEndKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); // Tesla does min4(max, max, max, tmax) float c1lox = n1xy.x * idirx - oodx; // n1xy.x = c1.lo.x, child 1 minbound x float c1hix = n1xy.y * idirx - oodx; // n1xy.y = c1.hi.x, child 1 maxbound x float c1loy = n1xy.z * idiry - oody; // n1xy.z = c1.lo.y, child 1 minbound y float c1hiy = n1xy.w * idiry - oody; // n1xy.w = c1.hi.y, child 1 maxbound y float c1min = spanBeginKepler(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin); float c1max = spanEndKepler(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT); // ray box intersection boundary tests: float ray_tmax = 1e20; bool traverseChild0 = (c0min <= c0max); // && (c0min >= tmin) && (c0min <= ray_tmax); bool traverseChild1 = (c1min <= c1max); // && (c1min >= tmin) && (c1min <= ray_tmax); // Neither child was intersected => pop stack. if (!traverseChild0 && !traverseChild1) { nodeAddr = *(int*)stackPtr; // fetch next node by popping the stack stackPtr -= 4; // popping decrements stackPtr by 4 bytes (because stackPtr is a pointer to char) } // Otherwise, one or both children intersected => fetch child pointers. else { // set nodeAddr equal to intersected childnode index (or first childnode when both children are intersected) nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y; // Both children were intersected => push the farther one on the stack. if (traverseChild0 && traverseChild1) // store closest child in nodeAddr, swap if necessary { if (c1min < c0min) swap2(nodeAddr, cnodes.y); stackPtr += 4; // pushing increments stack by 4 bytes (stackPtr is a pointer to char) *(int*)stackPtr = cnodes.y; // push furthest node on the stack } } // First leaf => postpone and continue traversal. // leafnodes have a negative index to distinguish them from inner nodes // if nodeAddr less than 0 -> nodeAddr is a leaf if (nodeAddr < 0 && leafAddr >= 0) { searchingLeaf = false; // required for warp efficiency leafAddr = nodeAddr; nodeAddr = *(int*)stackPtr; // pops next node from stack stackPtr -= 4; // decrements stackptr by 4 bytes (because stackPtr is a pointer to char) } // All SIMD lanes have found a leaf => process them. // to increase efficiency, check if all the threads in a warp have found a leaf before proceeding to the // ray/triangle intersection routine // this bit of code requires PTX (CUDA assembly) code to work properly // if (!__any(searchingLeaf)) -> "__any" keyword: if none of the threads is searching a leaf, in other words // if all threads in the warp found a leafnode, then break from while loop and go to triangle intersection //if(!__any(leafAddr >= 0)) // break; // if (!__any(searchingLeaf)) // break; /// break from while loop and go to code below, processing leaf nodes // NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;". // tried everything with CUDA 4.2 but always got several redundant instructions. unsigned int mask; // replaces searchingLeaf asm("{\n" " .reg .pred p; \n" "setp.ge.s32 p, %1, 0; \n" "vote.ballot.b32 %0,p; \n" "}" : "=r"(mask) : "r"(leafAddr)); if (!mask) break; } /////////////////////////////////////////// /// TRIANGLE INTERSECTION ////////////////////////////////////// // Process postponed leaf nodes. while (leafAddr < 0) /// if leafAddr is negative, it points to an actual leafnode (when positive or 0 it's an innernode) { // Intersect the ray against each triangle using Sven Woop's algorithm. // Woop ray triangle intersection: Woop triangles are unit triangles. Each ray // must be transformed to "unit triangle space", before testing for intersection for (int triAddr = ~leafAddr;; triAddr += 3) // triAddr is index in triWoop array (and bitwise complement of leafAddr) { // no defined upper limit for loop, continues until leaf terminator code 0x80000000 is encountered // Read first 16 bytes of the triangle. // fetch first precomputed triangle edge float4 v00 = tex1Dfetch(triWoopTexture, triAddr); // End marker 0x80000000 (negative zero) => all triangles in leaf processed --> terminate if (__float_as_int(v00.x) == 0x80000000) break; // Compute and check intersection t-value (hit distance along ray). float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z; // Origin z float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z); // inverse Direction z float t = Oz * invDz; if (t > tmin && t < hitT) { // Compute and check barycentric u. // fetch second precomputed triangle edge float4 v11 = tex1Dfetch(triWoopTexture, triAddr + 1); float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z; // Origin.x float Dx = dirx * v11.x + diry * v11.y + dirz * v11.z; // Direction.x float u = Ox + t * Dx; /// parametric equation of a ray (intersection point) if (u >= 0.0f && u <= 1.0f) { // Compute and check barycentric v. // fetch third precomputed triangle edge float4 v22 = tex1Dfetch(triWoopTexture, triAddr + 2); float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z; float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z; float v = Oy + t*Dy; if (v >= 0.0f && u + v <= 1.0f) { // We've got a hit! // Record intersection. hitT = t; hitIndex = triAddr; // store triangle index for shading // Closest intersection not required => terminate. if (anyHit) // only true for shadow rays { nodeAddr = EntrypointSentinel; break; } // compute normal vector by taking the cross product of two edge vectors // because of Woop transformation, only one set of vectors works //trinormal = cross(Vec3f(v22.x, v22.y, v22.z), Vec3f(v11.x, v11.y, v11.z)); // works trinormal = cross(Vec3f(v11.x, v11.y, v11.z), Vec3f(v22.x, v22.y, v22.z)); } } } } // end triangle intersection // Another leaf was postponed => process it as well. leafAddr = nodeAddr; if (nodeAddr < 0) // nodeAddr is an actual leaf when < 0 { nodeAddr = *(int*)stackPtr; // pop stack stackPtr -= 4; // decrement with 4 bytes to get the next int (stackPtr is char*) } } // end leaf/triangle intersection loop } // end traversal loop (AABB and triangle intersection) // Remap intersected triangle index, and store the result. if (hitIndex != -1){ hitIndex = tex1Dfetch(triIndicesTexture, hitIndex); // remapping tri indices delayed until this point for performance reasons // (slow texture memory lookup in de triIndicesTexture) because multiple triangles per node can potentially be hit } hitTriIdx = hitIndex; hitdistance = hitT; } // union struct required for mapping pixel colours to OpenGL buffer union Colour // 4 bytes = 4 chars = 1 float { float c; uchar4 components; }; __device__ Vec3f renderKernel(curandState* randstate, const float4* HDRmap, const float4* gpuNodes, const float4* gpuTriWoops, const float4* gpuDebugTris, const int* gpuTriIndices, Vec3f& rayorig, Vec3f& raydir, unsigned int leafcount, unsigned int tricount) { Vec3f mask = Vec3f(1.0f, 1.0f, 1.0f); // colour mask Vec3f accucolor = Vec3f(0.0f, 0.0f, 0.0f); // accumulated colour Vec3f direct = Vec3f(0, 0, 0); for (int bounces = 0; bounces < 4; bounces++){ // iteration up to 4 bounces (instead of recursion in CPU code) int hitSphereIdx = -1; int hitTriIdx = -1; int bestTriIdx = -1; int geomtype = -1; float hitSphereDist = 1e20; float hitDistance = 1e20; float scene_t = 1e20; Vec3f objcol = Vec3f(0, 0, 0); Vec3f emit = Vec3f(0, 0, 0); Vec3f hitpoint; // intersection point Vec3f n; // normal Vec3f nl; // oriented normal Vec3f nextdir; // ray direction of next path segment Vec3f trinormal = Vec3f(0, 0, 0); Refl_t refltype; float ray_tmin = 0.00001f; // set to 0.01f when using refractive material float ray_tmax = 1e20; // intersect all triangles in the scene stored in BVH int debugbingo = 0; intersectBVHandTriangles(make_float4(rayorig.x, rayorig.y, rayorig.z, ray_tmin), make_float4(raydir.x, raydir.y, raydir.z, ray_tmax), bestTriIdx, hitDistance, debugbingo, trinormal, leafcount, tricount, false); //DEBUGintersectBVHandTriangles(make_float4(rayorig.x, rayorig.y, rayorig.z, ray_tmin), make_float4(raydir.x, raydir.y, raydir.z, ray_tmax), //gpuNodes, gpuTriWoops, gpuDebugTris, gpuTriIndices, bestTriIdx, hitDistance, debugbingo, trinormal, leafcount, tricount, false); // intersect all spheres in the scene // float3 required for sphere intersection (to avoid "dynamic allocation not allowed" error) float3 rayorig_flt3 = make_float3(rayorig.x, rayorig.y, rayorig.z); float3 raydir_flt3 = make_float3(raydir.x, raydir.y, raydir.z); float numspheres = sizeof(spheres) / sizeof(Sphere); for (int i = int(numspheres); i--;) // for all spheres in scene // keep track of distance from origin to closest intersection point if ((hitSphereDist = spheres[i].intersect(Ray(rayorig_flt3, raydir_flt3))) && hitSphereDist < scene_t && hitSphereDist > 0.01f){ scene_t = hitSphereDist; hitSphereIdx = i; geomtype = 1; } if (hitDistance < scene_t && hitDistance > ray_tmin) // triangle hit { scene_t = hitDistance; hitTriIdx = bestTriIdx; geomtype = 2; } // sky gradient colour //float t = 0.5f * (raydir.y + 1.2f); //Vec3f skycolor = Vec3f(1.0f, 1.0f, 1.0f) * (1.0f - t) + Vec3f(0.9f, 0.3f, 0.0f) * t; #ifdef HDR // HDR if (scene_t > 1e19) { // if ray misses scene, return sky // HDR environment map code based on Syntopia "Path tracing 3D fractals" // http://blog.hvidtfeldts.net/index.php/2015/01/path-tracing-3d-fractals/ // https://github.com/Syntopia/Fragmentarium/blob/master/Fragmentarium-Source/Examples/Include/IBL-Pathtracer.frag // GLSL code: // vec3 equirectangularMap(sampler2D sampler, vec3 dir) { // dir = normalize(dir); // vec2 longlat = vec2(atan(dir.y, dir.x) + RotateMap, acos(dir.z)); // return texture2D(sampler, longlat / vec2(2.0*PI, PI)).xyz; } // Convert (normalized) dir to spherical coordinates. float longlatX = atan2f(raydir.x, raydir.z); // Y is up, swap x for y and z for x longlatX = longlatX < 0.f ? longlatX + TWO_PI : longlatX; // wrap around full circle if negative float longlatY = acosf(raydir.y); // add RotateMap at some point, see Fragmentarium // map theta and phi to u and v texturecoordinates in [0,1] x [0,1] range float offsetY = 0.5f; float u = longlatX / TWO_PI; // +offsetY; float v = longlatY / M_PI ; // map u, v to integer coordinates int u2 = (int)(u * HDRwidth); //% HDRwidth; int v2 = (int)(v * HDRheight); // % HDRheight; // compute the texel index in the HDR map int HDRtexelidx = u2 + v2 * HDRwidth; //float4 HDRcol = HDRmap[HDRtexelidx]; float4 HDRcol = tex1Dfetch(HDRtexture, HDRtexelidx); // fetch from texture Vec3f HDRcol2 = Vec3f(HDRcol.x, HDRcol.y, HDRcol.z); emit = HDRcol2 * 2.0f; accucolor += (mask * emit); return accucolor; } #endif // end of HDR // SPHERES: if (geomtype == 1){ Sphere &hitsphere = spheres[hitSphereIdx]; // hit object with closest intersection hitpoint = rayorig + raydir * scene_t; // intersection point on object n = Vec3f(hitpoint.x - hitsphere.pos.x, hitpoint.y - hitsphere.pos.y, hitpoint.z - hitsphere.pos.z); // normal n.normalize(); nl = dot(n, raydir) < 0 ? n : n * -1; // correctly oriented normal objcol = Vec3f(hitsphere.col.x, hitsphere.col.y, hitsphere.col.z); // object colour emit = Vec3f(hitsphere.emi.x, hitsphere.emi.y, hitsphere.emi.z); // object emission refltype = hitsphere.refl; accucolor += (mask * emit); } // TRIANGLES: if (geomtype == 2){ //pBestTri = &pTriangles[triangle_id]; hitpoint = rayorig + raydir * scene_t; // intersection point // float4 normal = tex1Dfetch(triNormalsTexture, pBestTriIdx); n = trinormal; n.normalize(); nl = dot(n, raydir) < 0 ? n : n * -1; // correctly oriented normal //Vec3f colour = hitTriIdx->_colorf; Vec3f colour = Vec3f(0.9f, 0.3f, 0.0f); // hardcoded triangle colour .9f, 0.3f, 0.0f refltype = COAT; // objectmaterial objcol = colour; emit = Vec3f(0.0, 0.0, 0); // object emission accucolor += (mask * emit); } // basic material system, all parameters are hard-coded (such as phong exponent, index of refraction) // diffuse material, based on smallpt by Kevin Beason if (refltype == DIFF){ // pick two random numbers float phi = 2 * M_PI * curand_uniform(randstate); float r2 = curand_uniform(randstate); float r2s = sqrtf(r2); // compute orthonormal coordinate frame uvw with hitpoint as origin Vec3f w = nl; w.normalize(); Vec3f u = cross((fabs(w.x) > .1 ? Vec3f(0, 1, 0) : Vec3f(1, 0, 0)), w); u.normalize(); Vec3f v = cross(w, u); // compute cosine weighted random ray direction on hemisphere nextdir = u*cosf(phi)*r2s + v*sinf(phi)*r2s + w*sqrtf(1 - r2); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // scene size dependent // multiply mask with colour of object mask *= objcol; } // end diffuse material // Phong metal material from "Realistic Ray Tracing", P. Shirley if (refltype == METAL){ // compute random perturbation of ideal reflection vector // the higher the phong exponent, the closer the perturbed vector is to the ideal reflection direction float phi = 2 * M_PI * curand_uniform(randstate); float r2 = curand_uniform(randstate); float phongexponent = 30; float cosTheta = powf(1 - r2, 1.0f / (phongexponent + 1)); float sinTheta = sqrtf(1 - cosTheta * cosTheta); // create orthonormal basis uvw around reflection vector with hitpoint as origin // w is ray direction for ideal reflection Vec3f w = raydir - n * 2.0f * dot(n, raydir); w.normalize(); Vec3f u = cross((fabs(w.x) > .1 ? Vec3f(0, 1, 0) : Vec3f(1, 0, 0)), w); u.normalize(); Vec3f v = cross(w, u); // v is already normalised because w and u are normalised // compute cosine weighted random ray direction on hemisphere nextdir = u * cosf(phi) * sinTheta + v * sinf(phi) * sinTheta + w * cosTheta; nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.0001f; // scene size dependent // multiply mask with colour of object mask *= objcol; } // ideal specular reflection (mirror) if (refltype == SPEC){ // compute relfected ray direction according to Snell's law nextdir = raydir - n * dot(n, raydir) * 2.0f; nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // multiply mask with colour of object mask *= objcol; } // COAT material based on https://github.com/peterkutz/GPUPathTracer // randomly select diffuse or specular reflection // looks okay-ish but inaccurate (no Fresnel calculation yet) if (refltype == COAT){ float rouletteRandomFloat = curand_uniform(randstate); float threshold = 0.05f; Vec3f specularColor = Vec3f(1, 1, 1); // hard-coded bool reflectFromSurface = (rouletteRandomFloat < threshold); //computeFresnel(make_Vec3f(n.x, n.y, n.z), incident, incidentIOR, transmittedIOR, reflectionDirection, transmissionDirection).reflectionCoefficient); if (reflectFromSurface) { // calculate perfectly specular reflection // Ray reflected from the surface. Trace a ray in the reflection direction. // TODO: Use Russian roulette instead of simple multipliers! // (Selecting between diffuse sample and no sample (absorption) in this case.) mask *= specularColor; nextdir = raydir - n * 2.0f * dot(n, raydir); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // scene size dependent } else { // calculate perfectly diffuse reflection float r1 = 2 * M_PI * curand_uniform(randstate); float r2 = curand_uniform(randstate); float r2s = sqrtf(r2); // compute orthonormal coordinate frame uvw with hitpoint as origin Vec3f w = nl; w.normalize(); Vec3f u = cross((fabs(w.x) > .1 ? Vec3f(0, 1, 0) : Vec3f(1, 0, 0)), w); u.normalize(); Vec3f v = cross(w, u); // compute cosine weighted random ray direction on hemisphere nextdir = u*cosf(r1)*r2s + v*sinf(r1)*r2s + w*sqrtf(1 - r2); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // // scene size dependent // multiply mask with colour of object mask *= objcol; } } // end COAT // perfectly refractive material (glass, water) // set ray_tmin to 0.01 when using refractive material if (refltype == REFR){ bool into = dot(n, nl) > 0; // is ray entering or leaving refractive material? float nc = 1.0f; // Index of Refraction air float nt = 1.4f; // Index of Refraction glass/water float nnt = into ? nc / nt : nt / nc; // IOR ratio of refractive materials float ddn = dot(raydir, nl); float cos2t = 1.0f - nnt*nnt * (1.f - ddn*ddn); if (cos2t < 0.0f) // total internal reflection { nextdir = raydir - n * 2.0f * dot(n, raydir); nextdir.normalize(); // offset origin next path segment to prevent self intersection hitpoint += nl * 0.001f; // scene size dependent } else // cos2t > 0 { // compute direction of transmission ray Vec3f tdir = raydir * nnt; tdir -= n * ((into ? 1 : -1) * (ddn*nnt + sqrtf(cos2t))); tdir.normalize(); float R0 = (nt - nc)*(nt - nc) / (nt + nc)*(nt + nc); float c = 1.f - (into ? -ddn : dot(tdir, n)); float Re = R0 + (1.f - R0) * c * c * c * c * c; float Tr = 1 - Re; // Transmission float P = .25f + .5f * Re; float RP = Re / P; float TP = Tr / (1.f - P); // randomly choose reflection or transmission ray if (curand_uniform(randstate) < 0.2) // reflection ray { mask *= RP; nextdir = raydir - n * 2.0f * dot(n, raydir); nextdir.normalize(); hitpoint += nl * 0.001f; // scene size dependent } else // transmission ray { mask *= TP; nextdir = tdir; nextdir.normalize(); hitpoint += nl * 0.001f; // epsilon must be small to avoid artefacts } } } // set up origin and direction of next path segment rayorig = hitpoint; raydir = nextdir; } // end bounces for loop return accucolor; } __global__ void PathTracingKernel(Vec3f* output, Vec3f* accumbuffer, const float4* HDRmap, const float4* gpuNodes, const float4* gpuTriWoops, const float4* gpuDebugTris, const int* gpuTriIndices, unsigned int framenumber, unsigned int hashedframenumber, unsigned int leafcount, unsigned int tricount, const Camera* cudaRendercam) { // assign a CUDA thread to every pixel by using the threadIndex unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // global threadId, see richiesams blogspot int threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //int pixelx = threadId % scrwidth; // pixel x-coordinate on screen //int pixely = threadId / scrwidth; // pixel y-coordintate on screen // create random number generator and initialise with hashed frame number, see RichieSams blogspot curandState randState; // state of the random number generator, to prevent repetition curand_init(hashedframenumber + threadId, 0, 0, &randState); Vec3f finalcol; // final pixel colour finalcol = Vec3f(0.0f, 0.0f, 0.0f); // reset colour to zero for every pixel //Vec3f rendercampos = Vec3f(0, 0.2, 4.6f); Vec3f rendercampos = Vec3f(cudaRendercam->position.x, cudaRendercam->position.y, cudaRendercam->position.z); int i = (scrheight - y - 1) * scrwidth + x; // pixel index in buffer int pixelx = x; // pixel x-coordinate on screen int pixely = scrheight - y - 1; // pixel y-coordintate on screen Vec3f camdir = Vec3f(0, -0.042612, -1); camdir.normalize(); Vec3f cx = Vec3f(scrwidth * .5135f / scrheight, 0.0f, 0.0f); // ray direction offset along X-axis Vec3f cy = (cross(cx, camdir)).normalize() * .5135f; // ray dir offset along Y-axis, .5135 is FOV angle for (int s = 0; s < samps; s++) { // compute primary ray direction // use camera view of current frame (transformed on CPU side) to create local orthonormal basis Vec3f rendercamview = Vec3f(cudaRendercam->view.x, cudaRendercam->view.y, cudaRendercam->view.z); rendercamview.normalize(); // view is already supposed to be normalized, but normalize it explicitly just in case. Vec3f rendercamup = Vec3f(cudaRendercam->up.x, cudaRendercam->up.y, cudaRendercam->up.z); rendercamup.normalize(); Vec3f horizontalAxis = cross(rendercamview, rendercamup); horizontalAxis.normalize(); // Important to normalize! Vec3f verticalAxis = cross(horizontalAxis, rendercamview); verticalAxis.normalize(); // verticalAxis is normalized by default, but normalize it explicitly just for good measure. Vec3f middle = rendercampos + rendercamview; Vec3f horizontal = horizontalAxis * tanf(cudaRendercam->fov.x * 0.5 * (M_PI / 180)); // Treating FOV as the full FOV, not half, so multiplied by 0.5 Vec3f vertical = verticalAxis * tanf(-cudaRendercam->fov.y * 0.5 * (M_PI / 180)); // Treating FOV as the full FOV, not half, so multiplied by 0.5 // anti-aliasing // calculate center of current pixel and add random number in X and Y dimension // based on https://github.com/peterkutz/GPUPathTracer float jitterValueX = curand_uniform(&randState) - 0.5; float jitterValueY = curand_uniform(&randState) - 0.5; float sx = (jitterValueX + pixelx) / (cudaRendercam->resolution.x - 1); float sy = (jitterValueY + pixely) / (cudaRendercam->resolution.y - 1); // compute pixel on screen Vec3f pointOnPlaneOneUnitAwayFromEye = middle + (horizontal * ((2 * sx) - 1)) + (vertical * ((2 * sy) - 1)); Vec3f pointOnImagePlane = rendercampos + ((pointOnPlaneOneUnitAwayFromEye - rendercampos) * cudaRendercam->focalDistance); // Important for depth of field! // calculation of depth of field / camera aperture // based on https://github.com/peterkutz/GPUPathTracer Vec3f aperturePoint = Vec3f(0, 0, 0); if (cudaRendercam->apertureRadius > 0.00001) { // the small number is an epsilon value. // generate random numbers for sampling a point on the aperture float random1 = curand_uniform(&randState); float random2 = curand_uniform(&randState); // randomly pick a point on the circular aperture float angle = TWO_PI * random1; float distance = cudaRendercam->apertureRadius * sqrtf(random2); float apertureX = cos(angle) * distance; float apertureY = sin(angle) * distance; aperturePoint = rendercampos + (horizontalAxis * apertureX) + (verticalAxis * apertureY); } else { // zero aperture aperturePoint = rendercampos; } // calculate ray direction of next ray in path Vec3f apertureToImagePlane = pointOnImagePlane - aperturePoint; apertureToImagePlane.normalize(); // ray direction needs to be normalised // ray direction Vec3f rayInWorldSpace = apertureToImagePlane; rayInWorldSpace.normalize(); // ray origin Vec3f originInWorldSpace = aperturePoint; finalcol += renderKernel(&randState, HDRmap, gpuNodes, gpuTriWoops, gpuDebugTris, gpuTriIndices, originInWorldSpace, rayInWorldSpace, leafcount, tricount) * (1.0f / samps); } // add pixel colour to accumulation buffer (accumulates all samples) accumbuffer[i] += finalcol; // averaged colour: divide colour by the number of calculated frames so far Vec3f tempcol = accumbuffer[i] / framenumber; Colour fcolour; Vec3f colour = Vec3f(clamp(tempcol.x, 0.0f, 1.0f), clamp(tempcol.y, 0.0f, 1.0f), clamp(tempcol.z, 0.0f, 1.0f)); // convert from 96-bit to 24-bit colour + perform gamma correction fcolour.components = make_uchar4((unsigned char)(powf(colour.x, 1 / 2.2f) * 255), (unsigned char)(powf(colour.y, 1 / 2.2f) * 255), (unsigned char)(powf(colour.z, 1 / 2.2f) * 255), 1); // store pixel coordinates and pixelcolour in OpenGL readable outputbuffer output[i] = Vec3f(x, y, fcolour.c); } bool firstTime = true; // the gateway to CUDA, called from C++ (in void disp() in main.cpp) void cudaRender(const float4* nodes, const float4* triWoops, const float4* debugTris, const int* triInds, Vec3f* outputbuf, Vec3f* accumbuf, const float4* HDRmap, const unsigned int framenumber, const unsigned int hashedframenumber, const unsigned int nodeSize, const unsigned int leafnodecnt, const unsigned int tricnt, const Camera* cudaRenderCam){ if (firstTime) { // if this is the first time cudarender() is called, // bind the scene data to CUDA textures! firstTime = false; cudaChannelFormatDesc channel0desc = cudaCreateChannelDesc<int>(); cudaBindTexture(NULL, &triIndicesTexture, triInds, &channel0desc, (tricnt * 3 + leafnodecnt) * sizeof(int)); // is tricnt wel juist?? cudaChannelFormatDesc channel1desc = cudaCreateChannelDesc<float4>(); cudaBindTexture(NULL, &triWoopTexture, triWoops, &channel1desc, (tricnt * 3 + leafnodecnt) * sizeof(float4)); cudaChannelFormatDesc channel3desc = cudaCreateChannelDesc<float4>(); cudaBindTexture(NULL, &bvhNodesTexture, nodes, &channel3desc, nodeSize * sizeof(float4)); HDRtexture.filterMode = cudaFilterModeLinear; cudaChannelFormatDesc channel4desc = cudaCreateChannelDesc<float4>(); cudaBindTexture(NULL, &HDRtexture, HDRmap, &channel4desc, HDRwidth * HDRheight * sizeof(float4)); // 2k map: printf("CudaWoopTriangles texture initialised, tri count: %d\n", tricnt); } dim3 block(16, 16, 1); // dim3 CUDA specific syntax, block and grid are required to schedule CUDA threads over streaming multiprocessors dim3 grid(scrwidth / block.x, scrheight / block.y, 1); // Configure grid and block sizes: int threadsPerBlock = 256; // Compute the number of blocks required, performing a ceiling operation to make sure there are enough: int fullBlocksPerGrid = ((scrwidth * scrheight) + threadsPerBlock - 1) / threadsPerBlock; // <<<fullBlocksPerGrid, threadsPerBlock>>> PathTracingKernel << <grid, block >> >(outputbuf, accumbuf, HDRmap, nodes, triWoops, debugTris, triInds, framenumber, hashedframenumber, leafnodecnt, tricnt, cudaRenderCam); // texdata, texoffsets }
37d97acb6259dc4a51432435ed21abd472d2955a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h> __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x64_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 900 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x64_rf_sm80` is for sm80-sm90, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x128_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 900 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x128_rf_sm80` is for sm80-sm90, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_32x128_gmem_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 900 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_32x128_gmem_sm80` is for sm80-sm90, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
37d97acb6259dc4a51432435ed21abd472d2955a.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h> __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x64_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 900 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x64_rf_sm80` is for sm80-sm90, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_64x128_rf_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 900 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_64x128_rf_sm80` is for sm80-sm90, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_bf16_aligned_32x128_gmem_sm80(typename AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 900 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::bfloat16_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_bf16_aligned_32x128_gmem_sm80` is for sm80-sm90, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
318c917c6ad20e04d05e37c145ff5855901cc203.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "exec_time.h" #define DATASET_SIZE 1024 #define THREADS_PER_BLOCK 256 // Kernel function to add the elements of two arrays __global__ void add(int n, float *d_x, float *d_y) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_y[index] = d_x[index] + d_y[index]; } } int main_func(int argc, char **argv) { float *h_x, *h_y; float *d_x, *d_y; hipError_t hipError_t; int i; struct timeval start, stop; // Disable buffering entirely setbuf(stdout, NULL); // Allocating arrays on host printf("Allocating arrays h_x and h_y on host..."); gettimeofday(&start, NULL); h_x = (float*)malloc(DATASET_SIZE*sizeof(float)); h_y = (float*)malloc(DATASET_SIZE*sizeof(float)); // check malloc memory allocation if (h_x == NULL || h_y == NULL) { printf("Error: malloc unable to allocate memory on host."); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Allocating array on device printf("Allocating array d_x and d_y on device..."); gettimeofday(&start, NULL); hipError_t = hipMalloc(&d_x, DATASET_SIZE*sizeof(float)); // check hipMalloc memory allocation if (hipError_t != hipSuccess) { printf("hipMalloc d_x returned error %s (code %d)\n", hipGetErrorString(hipError_t), hipError_t); return 1; } hipError_t = hipMalloc(&d_y, DATASET_SIZE*sizeof(float)); // check hipMalloc memory allocation if (hipError_t != hipSuccess) { printf("hipMalloc d_y returned error %s (code %d)\n", hipGetErrorString(hipError_t), hipError_t); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Initialize host memory printf("Initializing array h_x and h_y on host..."); gettimeofday(&start, NULL); for (i =0; i < DATASET_SIZE; ++i) { h_x[i] = 1.0f; h_y[i] = 2.0f; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from host to device printf("Copying arrays from host to device..."); gettimeofday(&start, NULL); hipError_t = hipMemcpy(d_x, h_x, DATASET_SIZE*sizeof(float), hipMemcpyHostToDevice); if (hipError_t != hipSuccess) { printf("hipMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } hipError_t = hipMemcpy(d_y, h_y, DATASET_SIZE*sizeof(float), hipMemcpyHostToDevice); if (hipError_t != hipSuccess) { printf("hipMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Run kernel on elements on the GPU printf("Running kernel on elemnts of d_x and d_y..."); gettimeofday(&start, NULL); int blockSize = THREADS_PER_BLOCK; int numBlocks = (DATASET_SIZE + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, DATASET_SIZE, d_x, d_y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from device to host printf("Copying array from device (d_y) to host (h_y)..."); gettimeofday(&start, NULL); hipError_t = hipMemcpy(h_y, d_y, DATASET_SIZE*sizeof(float), hipMemcpyDeviceToHost); if (hipError_t != hipSuccess) { printf("hipMemcpy (d_y -> h_y) returned error %s (code %d), line(%d)\n", hipGetErrorString(hipError_t), hipError_t, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Check for errors (all values should be 3.0f) printf("Checking for processing errors..."); gettimeofday(&start, NULL); float maxError = 0.0f; float diffError = 0.0f; for (i = 0; i < DATASET_SIZE; i++) { maxError = (maxError > (diffError=fabs(h_y[i]-3.0f)))? maxError : diffError; //printf("%d -> %f\n", i, h_y[i]); } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); printf("Max error: %f\n", maxError); // Free memory printf("Freeing memory..."); gettimeofday(&start, NULL); hipFree(d_x); hipFree(d_y); free(h_x); free(h_y); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); return 0; }
318c917c6ad20e04d05e37c145ff5855901cc203.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "exec_time.h" #define DATASET_SIZE 1024 #define THREADS_PER_BLOCK 256 // Kernel function to add the elements of two arrays __global__ void add(int n, float *d_x, float *d_y) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { d_y[index] = d_x[index] + d_y[index]; } } int main_func(int argc, char **argv) { float *h_x, *h_y; float *d_x, *d_y; cudaError_t cudaError; int i; struct timeval start, stop; // Disable buffering entirely setbuf(stdout, NULL); // Allocating arrays on host printf("Allocating arrays h_x and h_y on host..."); gettimeofday(&start, NULL); h_x = (float*)malloc(DATASET_SIZE*sizeof(float)); h_y = (float*)malloc(DATASET_SIZE*sizeof(float)); // check malloc memory allocation if (h_x == NULL || h_y == NULL) { printf("Error: malloc unable to allocate memory on host."); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Allocating array on device printf("Allocating array d_x and d_y on device..."); gettimeofday(&start, NULL); cudaError = cudaMalloc(&d_x, DATASET_SIZE*sizeof(float)); // check cudaMalloc memory allocation if (cudaError != cudaSuccess) { printf("cudaMalloc d_x returned error %s (code %d)\n", cudaGetErrorString(cudaError), cudaError); return 1; } cudaError = cudaMalloc(&d_y, DATASET_SIZE*sizeof(float)); // check cudaMalloc memory allocation if (cudaError != cudaSuccess) { printf("cudaMalloc d_y returned error %s (code %d)\n", cudaGetErrorString(cudaError), cudaError); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Initialize host memory printf("Initializing array h_x and h_y on host..."); gettimeofday(&start, NULL); for (i =0; i < DATASET_SIZE; ++i) { h_x[i] = 1.0f; h_y[i] = 2.0f; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from host to device printf("Copying arrays from host to device..."); gettimeofday(&start, NULL); cudaError = cudaMemcpy(d_x, h_x, DATASET_SIZE*sizeof(float), cudaMemcpyHostToDevice); if (cudaError != cudaSuccess) { printf("cudaMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } cudaError = cudaMemcpy(d_y, h_y, DATASET_SIZE*sizeof(float), cudaMemcpyHostToDevice); if (cudaError != cudaSuccess) { printf("cudaMemcpy (h_x -> d_x) returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Run kernel on elements on the GPU printf("Running kernel on elemnts of d_x and d_y..."); gettimeofday(&start, NULL); int blockSize = THREADS_PER_BLOCK; int numBlocks = (DATASET_SIZE + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(DATASET_SIZE, d_x, d_y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Copy array from device to host printf("Copying array from device (d_y) to host (h_y)..."); gettimeofday(&start, NULL); cudaError = cudaMemcpy(h_y, d_y, DATASET_SIZE*sizeof(float), cudaMemcpyDeviceToHost); if (cudaError != cudaSuccess) { printf("cudaMemcpy (d_y -> h_y) returned error %s (code %d), line(%d)\n", cudaGetErrorString(cudaError), cudaError, __LINE__); return 1; } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); // Check for errors (all values should be 3.0f) printf("Checking for processing errors..."); gettimeofday(&start, NULL); float maxError = 0.0f; float diffError = 0.0f; for (i = 0; i < DATASET_SIZE; i++) { maxError = (maxError > (diffError=fabs(h_y[i]-3.0f)))? maxError : diffError; //printf("%d -> %f\n", i, h_y[i]); } gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); printf("Max error: %f\n", maxError); // Free memory printf("Freeing memory..."); gettimeofday(&start, NULL); cudaFree(d_x); cudaFree(d_y); free(h_x); free(h_y); gettimeofday(&stop, NULL); printf("%f ms\n", timedifference_msec(start, stop)); return 0; }
9dff727720c56f42e13cf3ac47314a52a1ecb800.hip
// !!! This is a file automatically generated by hipify!!! // *********************************************************************** // // Rundemanen: CUDA C++ parallel program for community detection // Md Naim ([email protected]), Fredrik Manne ([email protected]) // University of Bergen // // *********************************************************************** // // Copyright (2016) University of Bergen // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // ************************************************************************ #include"communityGPU.h" #include"hostconstants.h" #include"thrust/reduce.h" #include"thrust/count.h" #include"fstream" #include <thrust/gather.h> void Community::compute_next_graph(hipStream_t *streams, int nrStreams, hipEvent_t &start, hipEvent_t &stop) { //std::cout << "\nCompute_next_graph() \n"; int new_nb_comm = g_next.nb_nodes; bool hostPrint = false; int sc; sc = 0; //std::cin>>sc; hostPrint = (sc > 1); //hipEvent_t start, stop; //hipEventCreate(&start); //hipEventCreate(&stop); //Save a copy of "pos_ptr_of_new_comm" thrust::device_vector<int> super_node_ptrs(pos_ptr_of_new_comm); /* if (hostPrint) { print_vector(super_node_ptrs, "Super Node Ptrs: "); print_vector(n2c_new, " n2c_new; before group_nodes_based_on_new_CID"); } */ //Place nodes of same community together comm_nodes.resize(g.nb_nodes); hipEventRecord(start, 0); int load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / PHY_WRP_SZ); int nr_of_block = (community_size + load_per_blk - 1) / load_per_blk; group_nodes_based_on_new_CID << < nr_of_block, NR_THREAD_PER_BLOCK>>> (thrust::raw_pointer_cast(comm_nodes.data()), thrust::raw_pointer_cast(pos_ptr_of_new_comm.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(n2c.data()), g.nb_nodes); report_time(start, stop, "group_nodes_based_on_new_CID"); this->pos_ptr_of_new_comm.clear(); ////////////////////////////////////////////////////////////////////////////// thrust::device_vector<int> degree_per_node(super_node_ptrs.size()); thrust::transform(thrust::device, super_node_ptrs.begin() + 1, super_node_ptrs.end(), super_node_ptrs.begin(), degree_per_node.begin(), thrust::minus<int>()); int largestCommSize = *thrust::max_element(degree_per_node.begin(), degree_per_node.end()); //std::cout << "-----------------------------------largestCommSize: " << largestCommSize << std::endl; ///////////////////////////////////////////////////////////////////// //print_vector(comm_nodes, " comm_nodes : "); /* if(0){ thrust::host_vector<int> allnodes = comm_nodes; //unsigned int lastone = 777; for (unsigned int i = 0; i < g.nb_nodes; i++) { if(lastone != n2c[allnodes[i]]){ std::cout<<std::endl; } lastone = n2c[allnodes[i]]; if( n2c_new[n2c[allnodes[i]]] ==63 ) std::cout << allnodes[i] << ":" << n2c[ allnodes[i]] <<":" << n2c_new[n2c[allnodes[i]]] <<" "; } } */ /* if (0) { std::cout << std::endl << "Node:Community " << std::endl; for (int i = 0; i < g.nb_nodes; i++) { if( (i >=320 && i <=329) || (i >= 2945 && i <= 2949) ) std::cout << i << "(" << n2c_new[n2c[i]] << ") "; } std::cout << std::endl; } if (hostPrint) { print_vector(super_node_ptrs, "Super Node Ptrs: "); } */ // construct next Graph g_next.indices.resize(new_nb_comm + 1); //-------Estimate the size of neighborhood of each new community------// thrust::device_vector<int> estimatedSizeOfNeighborhoods(new_nb_comm + 1, -1); unsigned int wrpSz = PHY_WRP_SZ; //1; int nr_block_needed = (new_nb_comm + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz); hipEventRecord(start, 0); computeBoundOfNeighoodSize << < nr_block_needed, NR_THREAD_PER_BLOCK>>>( thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), wrpSz); report_time(start, stop, "estimate_size_of_neighborhoods"); /* if (hostPrint) { print_vector(estimatedSizeOfNeighborhoods, "estimatedSizeOfNeighborhoods: "); } */ // /* if (0) { thrust::host_vector<int> commNodesHost = comm_nodes; thrust::host_vector<int> superPtrsHost = super_node_ptrs; for (int i = 0; i < superPtrsHost.size() - 1; i++) { if(i==63){ thrust::sort(commNodesHost.begin() + superPtrsHost[i], commNodesHost.begin() + superPtrsHost[i + 1]); std::cout << "RNR "; for (int j = superPtrsHost[i]; j < superPtrsHost[i + 1]; j++) { std::cout << commNodesHost[j] << " "; } std::cout << std::endl; } } commNodesHost.clear(); superPtrsHost.clear(); } */ hipEventRecord(start, 0); unsigned int bucketSizePerWarp = WARP_TABLE_SIZE_1; IsGreaterThanLimit<int, int> filterForBlkGMem(SHARED_TABLE_SIZE); IsInRange<int, int> filterForBlkSMem(WARP_TABLE_SIZE_1 + 1, SHARED_TABLE_SIZE); IsInRange<int, int> filterForWrp(0, WARP_TABLE_SIZE_1); //------------Filter communities to be processed by block based on upper bound---------// //Count First int nrCforBlkGbMem = thrust::count_if(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), filterForBlkGMem); int nrCforBlkShMem = thrust::count_if(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), filterForBlkSMem); int nrCforWrp = thrust::count_if(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), filterForWrp); // std::cout << "#Community Processed By Warp = " << nrCforWrp << std::endl; //Lets copy all community ids in g_next.links g_next.links.resize(new_nb_comm, 0); thrust::sequence(g_next.links.begin(), g_next.links.end(), 0); //Use g_next.indices to copy community ids with decreasing sizes of neighborhood g_next.indices.resize(new_nb_comm, -1); //Community ids with larger UpperBound on SoN first thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), estimatedSizeOfNeighborhoods.begin(), g_next.indices.begin(), filterForBlkGMem); //^^copied first "nrCforBlkGbMem" thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), estimatedSizeOfNeighborhoods.begin(), g_next.indices.begin() + nrCforBlkGbMem, filterForBlkSMem); //^^copied next "nrCforBlkShMem" //Then community ids with smaller UpperBound on SoN thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), estimatedSizeOfNeighborhoods.begin(), g_next.indices.begin() + nrCforBlkGbMem + nrCforBlkShMem, filterForWrp); /* std::cout << "new_nb_comm = " << new_nb_comm << std::endl; std::cout << "nrCforBlkGbMem = " << nrCforBlkGbMem << std::endl; std::cout << "nrCforBlkShMem = " << nrCforBlkShMem << std::endl; std::cout << "nrCforWrp = " << nrCforWrp << std::endl; */ assert((nrCforBlkGbMem + nrCforBlkShMem + nrCforWrp) == new_nb_comm); /* if (0) { thrust::host_vector<int> esSizes = estimatedSizeOfNeighborhoods; thrust::host_vector<int> bigCommunites = g_next.indices; for (int k = 0; k < thrust::min<int>(5, bigCommunites.size()); k++) { std::cout << bigCommunites[k] << ":::" << esSizes[bigCommunites[k]] << std::endl; } esSizes.clear(); bigCommunites.clear(); }*/ // Now, use g_next.links to copy sizes of neighborhood according to order given by g_next.indices g_next.links.resize(g_next.indices.size(), 0); thrust::gather(thrust::device, g_next.indices.begin(), g_next.indices.end(), estimatedSizeOfNeighborhoods.begin(), g_next.links.begin()); report_time(start, stop, "FilterGather"); // std::cout<<"Gathered\n"; // std::cin>>sc; /* if (0) { thrust::host_vector<unsigned int> gnlinks = g_next.links; thrust::host_vector<int> bigCommunites = g_next.indices; for (int i = 0; i < thrust::min(5, nrCforBlkGbMem); i++) { std::cout << bigCommunites[i] << "*" << gnlinks[i] << std::endl; } gnlinks.clear(); bigCommunites.clear(); } */ sc = 0; //std::cin>>sc; //Sort according to size of neighborhood ; only first nrCforBlkGbMem if ((nrCforBlkGbMem + nrCforBlkShMem) > 0) { int sortLen = nrCforBlkGbMem + nrCforBlkShMem; //std::cout<<"Sorting "<<sortLen <<" entries"<<std::endl; thrust::sort_by_key(g_next.links.begin(), g_next.links.begin() + sortLen, g_next.indices.begin(), thrust::greater<unsigned int>()); } /* if (1) { thrust::host_vector<int> esSizes = estimatedSizeOfNeighborhoods; thrust::host_vector<int> bigCommunites = g_next.indices; for (int k = 0; k < thrust::min<int>(bigCommunites.size(), 8); k++) { std::cout << bigCommunites[k] << "::" << esSizes[bigCommunites[k]] << std::endl; } esSizes.clear(); bigCommunites.clear(); } */ sc = 0; //std::cin>>sc; int nrBlockForLargeNhoods = 150; nrBlockForLargeNhoods = thrust::min(thrust::max(nrCforBlkGbMem, nrCforBlkShMem), nrBlockForLargeNhoods); thrust::device_vector<int> hashTablePtrs(nrBlockForLargeNhoods + 1, 0); ////////////////////////////////////////////////// //void preComputePrimes(int *primes,int nrPrimes, int* thresholds, int nrBigBlock, int *selectedPrimes, int WARP_SIZE); wrpSz = PHY_WRP_SZ; ; nr_block_needed = (nrBlockForLargeNhoods + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz); if (nrBlockForLargeNhoods > 0) preComputePrimes << < nr_block_needed, NR_THREAD_PER_BLOCK >>> (thrust::raw_pointer_cast(devPrimes.data()), nb_prime, thrust::raw_pointer_cast(g_next.links.data()), nrBlockForLargeNhoods, thrust::raw_pointer_cast(hashTablePtrs.data()) + 1, wrpSz); /* if (1) { thrust::host_vector<int> esSizes = hashTablePtrs; thrust::host_vector<unsigned int> thresholds= g_next.links; int spaceReq=0; for (int k = 0; k < nrBlockForLargeNhoods; k++) { std::cout << thresholds[k]<<" nearest Prime -> " <<esSizes[k+1]<<std::endl; if(thresholds[k]> esSizes[k+1]) std::cout<<"PROBLEM in HOST, call to prime computation"<<std::endl; spaceReq += esSizes[k+1]; } std::cout<< "Total Space requried: "<< spaceReq <<std::endl; esSizes.clear(); } */ thrust::inclusive_scan(hashTablePtrs.begin(), hashTablePtrs.begin() + (nrBlockForLargeNhoods + 1), hashTablePtrs.begin(), thrust::plus<int>()); /////////////////////////////////////////////////// //g_next.links contains sizes of big neighborhoods /*---------------> thrust::inclusive_scan(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods, hashTablePtrs.begin() + 1, thrust::plus<int>()); */ /********************/ /* if (1) { thrust::host_vector<int> esSizes = hashTablePtrs; for (int k = 0; k < nrBlockForLargeNhoods; k++) { std::cout << esSizes[k]<<" "; } std::cout<<std::endl; esSizes.clear(); } */ //------->thrust::transform( hashTablePtrs.begin(), hashTablePtrs.end(), hashTablePtrs.begin(), hashTablePtrs.begin(),thrust::plus<int>()); /* if (1) { thrust::host_vector<int> esSizes = hashTablePtrs; for (int k = 0; k < nrBlockForLargeNhoods/8; k++) { std::cout <<k<<":"<< esSizes[k]<<" "; } std::cout<<std::endl; esSizes.clear(); } */ //thrust::transform( hashTablePtrs.begin(), hashTablePtrs.end(), hashTablePtrs.begin(), hashTablePtrs.begin(),thrust::plus<int>()); /* if (0) { thrust::host_vector<int> esSizes = hashTablePtrs; for (int k = 0; k < nrBlockForLargeNhoods; k++) { std::cout << esSizes[k]<<" "; } std::cout<<std::endl; esSizes.clear(); } */ thrust::device_vector<HashItem> globalHashTable(hashTablePtrs.back()); /*********************/ // thrust::device_vector<HashItem> globalHashTable(3 * hashTablePtrs.back()); int szHTmem = thrust::reduce(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods, (int) 0); //std::cout << globalHashTable.size() << ":" << 2 * szHTmem << std::endl; //-------Prefix sum on estimate the size of neighborhoods to determine global positions for new communities-----// hipEventRecord(start, 0); thrust::exclusive_scan(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), estimatedSizeOfNeighborhoods.begin(), (int) 0, thrust::plus<int>()); report_time(start, stop, "thrust::exclusive_scan"); int upperBoundonTotalSize = estimatedSizeOfNeighborhoods.back(); /* if (hostPrint) { print_vector(estimatedSizeOfNeighborhoods, "estimatedSizeOfNeighborhoods: "); } std::cout << "Before big allocation; UpperBoundOnTotalSize = " << upperBoundonTotalSize << std::endl; */ //--------------Allocate memory for new links and weights-------------// thrust::device_vector<unsigned int> member_count_per_new_comm(new_nb_comm + 1, 0); // exact count thrust::device_vector<unsigned int> new_nighbor_lists(upperBoundonTotalSize); thrust::device_vector<float> new_weight_lists(upperBoundonTotalSize); //std::cout << "nrBlockForLargeNhoods: " << nrBlockForLargeNhoods << std::endl; /* if (hostPrint) { print_vector(member_count_per_new_comm, "Exact #neighbor per community:"); } */ wrpSz = PHY_WRP_SZ; hipEventRecord(start, 0); if (nrCforBlkGbMem > 0) findNewNeighodByBlock << < nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK/*, 0, streams[0]*/>>> (thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(new_weight_lists.data()), thrust::raw_pointer_cast(new_nighbor_lists.data()), thrust::raw_pointer_cast(member_count_per_new_comm.data()), // puts zero in position zero for prefix sum thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(g.links.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(n2c.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), g.type, bucketSizePerWarp, thrust::raw_pointer_cast(g_next.indices.data()), //int* candidateComms nrCforBlkGbMem, //int nrCandidateComms thrust::raw_pointer_cast(globalHashTable.data()), thrust::raw_pointer_cast(hashTablePtrs.data()), thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz); report_time(start, stop, "findNewNeighodByBlock(GlobalMemory)"); /* int sum_MC = thrust::reduce(member_count_per_new_comm.begin(), member_count_per_new_comm.end(), (int) 0); std::cout << "sum_MC(blkGlb): " << sum_MC << std::endl; */ sc = 0; //std::cin>>sc; hipEventRecord(start, 0); if (nrCforBlkShMem > 0) findNewNeighodByBlock << < nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK/*, 0, streams[1]*/>>> (thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(new_weight_lists.data()), thrust::raw_pointer_cast(new_nighbor_lists.data()), thrust::raw_pointer_cast(member_count_per_new_comm.data()), // puts zero in position zero for prefix sum thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(g.links.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(n2c.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), g.type, bucketSizePerWarp, thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGbMem, //int* candidateComms nrCforBlkShMem, //int nrCandidateComms thrust::raw_pointer_cast(globalHashTable.data()), thrust::raw_pointer_cast(hashTablePtrs.data()), thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz); report_time(start, stop, "findNewNeighodByBlock(SharedMemory)"); /* sum_MC = thrust::reduce(member_count_per_new_comm.begin(), member_count_per_new_comm.end(), (int) 0); std::cout << "sum_MC(blkShrd): " << sum_MC << std::endl; */ //------------Compute neighborhood of new communities-----------------// sc = 0; //std::cin>>sc; //if( new_nb_comm!=12525 ){ sc = 0; //std::cin>>sc; //std::cout << "Pre: nr_block_needed:" << nr_block_needed << std::endl; wrpSz = PHY_WRP_SZ; nr_block_needed = (nrCforWrp + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz); //std::cout << "Post: nr_block_needed:" << nr_block_needed << std::endl; nr_block_needed = thrust::min(nr_block_needed, 1920); unsigned int sharedMemSzPerBlock = WARP_TABLE_SIZE_1 * sizeof (HashItem) * NR_THREAD_PER_BLOCK / wrpSz; hipEventRecord(start, 0); if (nrCforWrp) determineNewNeighborhood << < nr_block_needed, NR_THREAD_PER_BLOCK, sharedMemSzPerBlock/*, streams[2]*/>>> (thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(new_weight_lists.data()), thrust::raw_pointer_cast(new_nighbor_lists.data()), thrust::raw_pointer_cast(member_count_per_new_comm.data()), // puts zero in position zero for prefix sum thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(g.links.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(n2c.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), g.type, WARP_TABLE_SIZE_1, thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGbMem + nrCforBlkShMem, nrCforWrp, wrpSz); report_time(start, stop, "determine_neighbors_of_new_comms"); //print_vector(member_count_per_new_comm, "Neighbor Counts( per new community): ", "MC"); hashTablePtrs.clear(); globalHashTable.clear(); estimatedSizeOfNeighborhoods.clear(); n2c.clear(); n2c_new.clear(); comm_nodes.clear(); /**************** OKAY ?****************/ g.indices.clear(); g.links.clear(); g.weights.clear(); /********************************/ std::cout << "#New Community: " << new_nb_comm << std::endl; /* sum_MC = thrust::reduce(member_count_per_new_comm.begin(), member_count_per_new_comm.end(), (int) 0); std::cout << "sum_MC(warp): " << sum_MC << std::endl; if (hostPrint) { std::cout << "#New Community: " << new_nb_comm << std::endl; print_vector(new_weight_lists, "New Weights: "); print_vector(new_nighbor_lists, "New Neighbors: "); print_vector(super_node_ptrs, "Super Node Ptrs: "); } */ //---------Put data accordingly to new graph-------------------// g_next.type = WEIGHTED; g_next.indices.resize(member_count_per_new_comm.size(), 0); thrust::inclusive_scan(thrust::device, member_count_per_new_comm.begin(), member_count_per_new_comm.end(), g_next.indices.begin(), thrust::plus<int>()); member_count_per_new_comm.clear(); int nr_edges_in_new_graph = g_next.indices.back(); g_next.nb_links = (unsigned int) nr_edges_in_new_graph; //std::cout << "#E(New Graph): " << nr_edges_in_new_graph << std::endl; /*if (0) { std::cout << std::endl << "g_next.nb_links: " << g_next.nb_links << std::endl; std::cout << g_next.links.size() << ":" << g_next.weights.size() << std::endl; } */ //Filter out unused spaces from global memory and copy to g_next g_next.links.resize(g_next.nb_links); g_next.weights.resize(g_next.nb_links); sc = 0; //std::cin>>sc; int nrElmntToCpy = thrust::count_if(thrust::device, new_nighbor_lists.begin(), new_nighbor_lists.end(), IsLessLimit<unsigned int, unsigned int>((unsigned int) new_nb_comm)); //std::cout << "NNS: " << new_nighbor_lists.size() << " , #E2C = " << nrElmntToCpy << std::endl; /* if (hostPrint) { std::cout << "-----------------------WE NE--------------" << std::endl; print_vector(new_weight_lists, "WE:"); print_vector(new_nighbor_lists, "NE:"); } */ thrust::copy_if(thrust::device, new_nighbor_lists.begin(), new_nighbor_lists.end(), g_next.links.begin(), IsLessLimit<unsigned int, unsigned int>((unsigned int) new_nb_comm)); new_nighbor_lists.clear(); thrust::copy_if(thrust::device, new_weight_lists.begin(), new_weight_lists.end(), g_next.weights.begin(), Is_Non_Negative<float, float>()); new_weight_lists.clear(); /* std::cin>>sc; if (hostPrint) { std::cout << std::endl << "#Node: " << g_next.nb_nodes << std::endl; std::cout << std::endl << "#Edge: " << g_next.nb_links << std::endl; print_vector(g_next.indices, "Copied Indices: "); print_vector(g_next.links, "Copied NE: "); print_vector(g_next.weights, "Copied WE: "); } */ /*if (0) { thrust::host_vector<unsigned int> gnlinks = g_next.links; thrust::host_vector<int> gnIndices = g_next.indices; thrust::host_vector<float> gnWeights = g_next.weights; std::ofstream ofs; ofs.open ("n2c.txt", std::ofstream::out | std::ofstream::app); for (unsigned int i = 0; i < new_nb_comm; i++) { unsigned int startNbr = gnIndices[i]; unsigned int endNbr = gnIndices[i + 1]; //thrust::sort(gnlinks.begin() + startNbr, gnlinks.begin() + endNbr); thrust::sort_by_key(gnlinks.begin() + startNbr, gnlinks.begin() + endNbr, gnWeights.begin() + startNbr); //std::cout << (i+1) <<"[" << (endNbr -startNbr) << "]"<< ":"; ofs<<i<<": "; for (unsigned int j = startNbr; j < endNbr; j++) { //std::cout << " " << (gnlinks[j]+1)<<"("<<gnWeights[j]<<")"; ofs<< (gnlinks[j])<<"("<<gnWeights[j]<<")"<<" "; } ofs<<"\n"; //std::cout << std::endl; } ofs.close(); }*/ //hipEventDestroy(start); //hipEventDestroy(stop); }
9dff727720c56f42e13cf3ac47314a52a1ecb800.cu
// *********************************************************************** // // Rundemanen: CUDA C++ parallel program for community detection // Md Naim ([email protected]), Fredrik Manne ([email protected]) // University of Bergen // // *********************************************************************** // // Copyright (2016) University of Bergen // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE // COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // ************************************************************************ #include"communityGPU.h" #include"hostconstants.h" #include"thrust/reduce.h" #include"thrust/count.h" #include"fstream" #include <thrust/gather.h> void Community::compute_next_graph(cudaStream_t *streams, int nrStreams, cudaEvent_t &start, cudaEvent_t &stop) { //std::cout << "\nCompute_next_graph() \n"; int new_nb_comm = g_next.nb_nodes; bool hostPrint = false; int sc; sc = 0; //std::cin>>sc; hostPrint = (sc > 1); //cudaEvent_t start, stop; //cudaEventCreate(&start); //cudaEventCreate(&stop); //Save a copy of "pos_ptr_of_new_comm" thrust::device_vector<int> super_node_ptrs(pos_ptr_of_new_comm); /* if (hostPrint) { print_vector(super_node_ptrs, "Super Node Ptrs: "); print_vector(n2c_new, " n2c_new; before group_nodes_based_on_new_CID"); } */ //Place nodes of same community together comm_nodes.resize(g.nb_nodes); cudaEventRecord(start, 0); int load_per_blk = CHUNK_PER_WARP * (NR_THREAD_PER_BLOCK / PHY_WRP_SZ); int nr_of_block = (community_size + load_per_blk - 1) / load_per_blk; group_nodes_based_on_new_CID << < nr_of_block, NR_THREAD_PER_BLOCK>>> (thrust::raw_pointer_cast(comm_nodes.data()), thrust::raw_pointer_cast(pos_ptr_of_new_comm.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(n2c.data()), g.nb_nodes); report_time(start, stop, "group_nodes_based_on_new_CID"); this->pos_ptr_of_new_comm.clear(); ////////////////////////////////////////////////////////////////////////////// thrust::device_vector<int> degree_per_node(super_node_ptrs.size()); thrust::transform(thrust::device, super_node_ptrs.begin() + 1, super_node_ptrs.end(), super_node_ptrs.begin(), degree_per_node.begin(), thrust::minus<int>()); int largestCommSize = *thrust::max_element(degree_per_node.begin(), degree_per_node.end()); //std::cout << "-----------------------------------largestCommSize: " << largestCommSize << std::endl; ///////////////////////////////////////////////////////////////////// //print_vector(comm_nodes, " comm_nodes : "); /* if(0){ thrust::host_vector<int> allnodes = comm_nodes; //unsigned int lastone = 777; for (unsigned int i = 0; i < g.nb_nodes; i++) { if(lastone != n2c[allnodes[i]]){ std::cout<<std::endl; } lastone = n2c[allnodes[i]]; if( n2c_new[n2c[allnodes[i]]] ==63 ) std::cout << allnodes[i] << ":" << n2c[ allnodes[i]] <<":" << n2c_new[n2c[allnodes[i]]] <<" "; } } */ /* if (0) { std::cout << std::endl << "Node:Community " << std::endl; for (int i = 0; i < g.nb_nodes; i++) { if( (i >=320 && i <=329) || (i >= 2945 && i <= 2949) ) std::cout << i << "(" << n2c_new[n2c[i]] << ") "; } std::cout << std::endl; } if (hostPrint) { print_vector(super_node_ptrs, "Super Node Ptrs: "); } */ // construct next Graph g_next.indices.resize(new_nb_comm + 1); //-------Estimate the size of neighborhood of each new community------// thrust::device_vector<int> estimatedSizeOfNeighborhoods(new_nb_comm + 1, -1); unsigned int wrpSz = PHY_WRP_SZ; //1; int nr_block_needed = (new_nb_comm + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz); cudaEventRecord(start, 0); computeBoundOfNeighoodSize << < nr_block_needed, NR_THREAD_PER_BLOCK>>>( thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), wrpSz); report_time(start, stop, "estimate_size_of_neighborhoods"); /* if (hostPrint) { print_vector(estimatedSizeOfNeighborhoods, "estimatedSizeOfNeighborhoods: "); } */ // /* if (0) { thrust::host_vector<int> commNodesHost = comm_nodes; thrust::host_vector<int> superPtrsHost = super_node_ptrs; for (int i = 0; i < superPtrsHost.size() - 1; i++) { if(i==63){ thrust::sort(commNodesHost.begin() + superPtrsHost[i], commNodesHost.begin() + superPtrsHost[i + 1]); std::cout << "RNR "; for (int j = superPtrsHost[i]; j < superPtrsHost[i + 1]; j++) { std::cout << commNodesHost[j] << " "; } std::cout << std::endl; } } commNodesHost.clear(); superPtrsHost.clear(); } */ cudaEventRecord(start, 0); unsigned int bucketSizePerWarp = WARP_TABLE_SIZE_1; IsGreaterThanLimit<int, int> filterForBlkGMem(SHARED_TABLE_SIZE); IsInRange<int, int> filterForBlkSMem(WARP_TABLE_SIZE_1 + 1, SHARED_TABLE_SIZE); IsInRange<int, int> filterForWrp(0, WARP_TABLE_SIZE_1); //------------Filter communities to be processed by block based on upper bound---------// //Count First int nrCforBlkGbMem = thrust::count_if(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), filterForBlkGMem); int nrCforBlkShMem = thrust::count_if(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), filterForBlkSMem); int nrCforWrp = thrust::count_if(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), filterForWrp); // std::cout << "#Community Processed By Warp = " << nrCforWrp << std::endl; //Lets copy all community ids in g_next.links g_next.links.resize(new_nb_comm, 0); thrust::sequence(g_next.links.begin(), g_next.links.end(), 0); //Use g_next.indices to copy community ids with decreasing sizes of neighborhood g_next.indices.resize(new_nb_comm, -1); //Community ids with larger UpperBound on SoN first thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), estimatedSizeOfNeighborhoods.begin(), g_next.indices.begin(), filterForBlkGMem); //^^copied first "nrCforBlkGbMem" thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), estimatedSizeOfNeighborhoods.begin(), g_next.indices.begin() + nrCforBlkGbMem, filterForBlkSMem); //^^copied next "nrCforBlkShMem" //Then community ids with smaller UpperBound on SoN thrust::copy_if(thrust::device, g_next.links.begin(), g_next.links.end(), estimatedSizeOfNeighborhoods.begin(), g_next.indices.begin() + nrCforBlkGbMem + nrCforBlkShMem, filterForWrp); /* std::cout << "new_nb_comm = " << new_nb_comm << std::endl; std::cout << "nrCforBlkGbMem = " << nrCforBlkGbMem << std::endl; std::cout << "nrCforBlkShMem = " << nrCforBlkShMem << std::endl; std::cout << "nrCforWrp = " << nrCforWrp << std::endl; */ assert((nrCforBlkGbMem + nrCforBlkShMem + nrCforWrp) == new_nb_comm); /* if (0) { thrust::host_vector<int> esSizes = estimatedSizeOfNeighborhoods; thrust::host_vector<int> bigCommunites = g_next.indices; for (int k = 0; k < thrust::min<int>(5, bigCommunites.size()); k++) { std::cout << bigCommunites[k] << ":::" << esSizes[bigCommunites[k]] << std::endl; } esSizes.clear(); bigCommunites.clear(); }*/ // Now, use g_next.links to copy sizes of neighborhood according to order given by g_next.indices g_next.links.resize(g_next.indices.size(), 0); thrust::gather(thrust::device, g_next.indices.begin(), g_next.indices.end(), estimatedSizeOfNeighborhoods.begin(), g_next.links.begin()); report_time(start, stop, "FilterGather"); // std::cout<<"Gathered\n"; // std::cin>>sc; /* if (0) { thrust::host_vector<unsigned int> gnlinks = g_next.links; thrust::host_vector<int> bigCommunites = g_next.indices; for (int i = 0; i < thrust::min(5, nrCforBlkGbMem); i++) { std::cout << bigCommunites[i] << "*" << gnlinks[i] << std::endl; } gnlinks.clear(); bigCommunites.clear(); } */ sc = 0; //std::cin>>sc; //Sort according to size of neighborhood ; only first nrCforBlkGbMem if ((nrCforBlkGbMem + nrCforBlkShMem) > 0) { int sortLen = nrCforBlkGbMem + nrCforBlkShMem; //std::cout<<"Sorting "<<sortLen <<" entries"<<std::endl; thrust::sort_by_key(g_next.links.begin(), g_next.links.begin() + sortLen, g_next.indices.begin(), thrust::greater<unsigned int>()); } /* if (1) { thrust::host_vector<int> esSizes = estimatedSizeOfNeighborhoods; thrust::host_vector<int> bigCommunites = g_next.indices; for (int k = 0; k < thrust::min<int>(bigCommunites.size(), 8); k++) { std::cout << bigCommunites[k] << "::" << esSizes[bigCommunites[k]] << std::endl; } esSizes.clear(); bigCommunites.clear(); } */ sc = 0; //std::cin>>sc; int nrBlockForLargeNhoods = 150; nrBlockForLargeNhoods = thrust::min(thrust::max(nrCforBlkGbMem, nrCforBlkShMem), nrBlockForLargeNhoods); thrust::device_vector<int> hashTablePtrs(nrBlockForLargeNhoods + 1, 0); ////////////////////////////////////////////////// //void preComputePrimes(int *primes,int nrPrimes, int* thresholds, int nrBigBlock, int *selectedPrimes, int WARP_SIZE); wrpSz = PHY_WRP_SZ; ; nr_block_needed = (nrBlockForLargeNhoods + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz); if (nrBlockForLargeNhoods > 0) preComputePrimes << < nr_block_needed, NR_THREAD_PER_BLOCK >>> (thrust::raw_pointer_cast(devPrimes.data()), nb_prime, thrust::raw_pointer_cast(g_next.links.data()), nrBlockForLargeNhoods, thrust::raw_pointer_cast(hashTablePtrs.data()) + 1, wrpSz); /* if (1) { thrust::host_vector<int> esSizes = hashTablePtrs; thrust::host_vector<unsigned int> thresholds= g_next.links; int spaceReq=0; for (int k = 0; k < nrBlockForLargeNhoods; k++) { std::cout << thresholds[k]<<" nearest Prime -> " <<esSizes[k+1]<<std::endl; if(thresholds[k]> esSizes[k+1]) std::cout<<"PROBLEM in HOST, call to prime computation"<<std::endl; spaceReq += esSizes[k+1]; } std::cout<< "Total Space requried: "<< spaceReq <<std::endl; esSizes.clear(); } */ thrust::inclusive_scan(hashTablePtrs.begin(), hashTablePtrs.begin() + (nrBlockForLargeNhoods + 1), hashTablePtrs.begin(), thrust::plus<int>()); /////////////////////////////////////////////////// //g_next.links contains sizes of big neighborhoods /*---------------> thrust::inclusive_scan(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods, hashTablePtrs.begin() + 1, thrust::plus<int>()); */ /********************/ /* if (1) { thrust::host_vector<int> esSizes = hashTablePtrs; for (int k = 0; k < nrBlockForLargeNhoods; k++) { std::cout << esSizes[k]<<" "; } std::cout<<std::endl; esSizes.clear(); } */ //------->thrust::transform( hashTablePtrs.begin(), hashTablePtrs.end(), hashTablePtrs.begin(), hashTablePtrs.begin(),thrust::plus<int>()); /* if (1) { thrust::host_vector<int> esSizes = hashTablePtrs; for (int k = 0; k < nrBlockForLargeNhoods/8; k++) { std::cout <<k<<":"<< esSizes[k]<<" "; } std::cout<<std::endl; esSizes.clear(); } */ //thrust::transform( hashTablePtrs.begin(), hashTablePtrs.end(), hashTablePtrs.begin(), hashTablePtrs.begin(),thrust::plus<int>()); /* if (0) { thrust::host_vector<int> esSizes = hashTablePtrs; for (int k = 0; k < nrBlockForLargeNhoods; k++) { std::cout << esSizes[k]<<" "; } std::cout<<std::endl; esSizes.clear(); } */ thrust::device_vector<HashItem> globalHashTable(hashTablePtrs.back()); /*********************/ // thrust::device_vector<HashItem> globalHashTable(3 * hashTablePtrs.back()); int szHTmem = thrust::reduce(g_next.links.begin(), g_next.links.begin() + nrBlockForLargeNhoods, (int) 0); //std::cout << globalHashTable.size() << ":" << 2 * szHTmem << std::endl; //-------Prefix sum on estimate the size of neighborhoods to determine global positions for new communities-----// cudaEventRecord(start, 0); thrust::exclusive_scan(thrust::device, estimatedSizeOfNeighborhoods.begin(), estimatedSizeOfNeighborhoods.end(), estimatedSizeOfNeighborhoods.begin(), (int) 0, thrust::plus<int>()); report_time(start, stop, "thrust::exclusive_scan"); int upperBoundonTotalSize = estimatedSizeOfNeighborhoods.back(); /* if (hostPrint) { print_vector(estimatedSizeOfNeighborhoods, "estimatedSizeOfNeighborhoods: "); } std::cout << "Before big allocation; UpperBoundOnTotalSize = " << upperBoundonTotalSize << std::endl; */ //--------------Allocate memory for new links and weights-------------// thrust::device_vector<unsigned int> member_count_per_new_comm(new_nb_comm + 1, 0); // exact count thrust::device_vector<unsigned int> new_nighbor_lists(upperBoundonTotalSize); thrust::device_vector<float> new_weight_lists(upperBoundonTotalSize); //std::cout << "nrBlockForLargeNhoods: " << nrBlockForLargeNhoods << std::endl; /* if (hostPrint) { print_vector(member_count_per_new_comm, "Exact #neighbor per community:"); } */ wrpSz = PHY_WRP_SZ; cudaEventRecord(start, 0); if (nrCforBlkGbMem > 0) findNewNeighodByBlock << < nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK/*, 0, streams[0]*/>>> (thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(new_weight_lists.data()), thrust::raw_pointer_cast(new_nighbor_lists.data()), thrust::raw_pointer_cast(member_count_per_new_comm.data()), // puts zero in position zero for prefix sum thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(g.links.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(n2c.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), g.type, bucketSizePerWarp, thrust::raw_pointer_cast(g_next.indices.data()), //int* candidateComms nrCforBlkGbMem, //int nrCandidateComms thrust::raw_pointer_cast(globalHashTable.data()), thrust::raw_pointer_cast(hashTablePtrs.data()), thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz); report_time(start, stop, "findNewNeighodByBlock(GlobalMemory)"); /* int sum_MC = thrust::reduce(member_count_per_new_comm.begin(), member_count_per_new_comm.end(), (int) 0); std::cout << "sum_MC(blkGlb): " << sum_MC << std::endl; */ sc = 0; //std::cin>>sc; cudaEventRecord(start, 0); if (nrCforBlkShMem > 0) findNewNeighodByBlock << < nrBlockForLargeNhoods, NR_THREAD_PER_BLOCK/*, 0, streams[1]*/>>> (thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(new_weight_lists.data()), thrust::raw_pointer_cast(new_nighbor_lists.data()), thrust::raw_pointer_cast(member_count_per_new_comm.data()), // puts zero in position zero for prefix sum thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(g.links.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(n2c.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), g.type, bucketSizePerWarp, thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGbMem, //int* candidateComms nrCforBlkShMem, //int nrCandidateComms thrust::raw_pointer_cast(globalHashTable.data()), thrust::raw_pointer_cast(hashTablePtrs.data()), thrust::raw_pointer_cast(devPrimes.data()), nb_prime, wrpSz); report_time(start, stop, "findNewNeighodByBlock(SharedMemory)"); /* sum_MC = thrust::reduce(member_count_per_new_comm.begin(), member_count_per_new_comm.end(), (int) 0); std::cout << "sum_MC(blkShrd): " << sum_MC << std::endl; */ //------------Compute neighborhood of new communities-----------------// sc = 0; //std::cin>>sc; //if( new_nb_comm!=12525 ){ sc = 0; //std::cin>>sc; //std::cout << "Pre: nr_block_needed:" << nr_block_needed << std::endl; wrpSz = PHY_WRP_SZ; nr_block_needed = (nrCforWrp + (NR_THREAD_PER_BLOCK / wrpSz) - 1) / (NR_THREAD_PER_BLOCK / wrpSz); //std::cout << "Post: nr_block_needed:" << nr_block_needed << std::endl; nr_block_needed = thrust::min(nr_block_needed, 1920); unsigned int sharedMemSzPerBlock = WARP_TABLE_SIZE_1 * sizeof (HashItem) * NR_THREAD_PER_BLOCK / wrpSz; cudaEventRecord(start, 0); if (nrCforWrp) determineNewNeighborhood << < nr_block_needed, NR_THREAD_PER_BLOCK, sharedMemSzPerBlock/*, streams[2]*/>>> (thrust::raw_pointer_cast(super_node_ptrs.data()), thrust::raw_pointer_cast(new_weight_lists.data()), thrust::raw_pointer_cast(new_nighbor_lists.data()), thrust::raw_pointer_cast(member_count_per_new_comm.data()), // puts zero in position zero for prefix sum thrust::raw_pointer_cast(g.indices.data()), thrust::raw_pointer_cast(g.weights.data()), thrust::raw_pointer_cast(g.links.data()), thrust::raw_pointer_cast(comm_nodes.data()), new_nb_comm, thrust::raw_pointer_cast(n2c.data()), thrust::raw_pointer_cast(n2c_new.data()), thrust::raw_pointer_cast(estimatedSizeOfNeighborhoods.data()), g.type, WARP_TABLE_SIZE_1, thrust::raw_pointer_cast(g_next.indices.data()) + nrCforBlkGbMem + nrCforBlkShMem, nrCforWrp, wrpSz); report_time(start, stop, "determine_neighbors_of_new_comms"); //print_vector(member_count_per_new_comm, "Neighbor Counts( per new community): ", "MC"); hashTablePtrs.clear(); globalHashTable.clear(); estimatedSizeOfNeighborhoods.clear(); n2c.clear(); n2c_new.clear(); comm_nodes.clear(); /**************** OKAY ?****************/ g.indices.clear(); g.links.clear(); g.weights.clear(); /********************************/ std::cout << "#New Community: " << new_nb_comm << std::endl; /* sum_MC = thrust::reduce(member_count_per_new_comm.begin(), member_count_per_new_comm.end(), (int) 0); std::cout << "sum_MC(warp): " << sum_MC << std::endl; if (hostPrint) { std::cout << "#New Community: " << new_nb_comm << std::endl; print_vector(new_weight_lists, "New Weights: "); print_vector(new_nighbor_lists, "New Neighbors: "); print_vector(super_node_ptrs, "Super Node Ptrs: "); } */ //---------Put data accordingly to new graph-------------------// g_next.type = WEIGHTED; g_next.indices.resize(member_count_per_new_comm.size(), 0); thrust::inclusive_scan(thrust::device, member_count_per_new_comm.begin(), member_count_per_new_comm.end(), g_next.indices.begin(), thrust::plus<int>()); member_count_per_new_comm.clear(); int nr_edges_in_new_graph = g_next.indices.back(); g_next.nb_links = (unsigned int) nr_edges_in_new_graph; //std::cout << "#E(New Graph): " << nr_edges_in_new_graph << std::endl; /*if (0) { std::cout << std::endl << "g_next.nb_links: " << g_next.nb_links << std::endl; std::cout << g_next.links.size() << ":" << g_next.weights.size() << std::endl; } */ //Filter out unused spaces from global memory and copy to g_next g_next.links.resize(g_next.nb_links); g_next.weights.resize(g_next.nb_links); sc = 0; //std::cin>>sc; int nrElmntToCpy = thrust::count_if(thrust::device, new_nighbor_lists.begin(), new_nighbor_lists.end(), IsLessLimit<unsigned int, unsigned int>((unsigned int) new_nb_comm)); //std::cout << "NNS: " << new_nighbor_lists.size() << " , #E2C = " << nrElmntToCpy << std::endl; /* if (hostPrint) { std::cout << "-----------------------WE NE--------------" << std::endl; print_vector(new_weight_lists, "WE:"); print_vector(new_nighbor_lists, "NE:"); } */ thrust::copy_if(thrust::device, new_nighbor_lists.begin(), new_nighbor_lists.end(), g_next.links.begin(), IsLessLimit<unsigned int, unsigned int>((unsigned int) new_nb_comm)); new_nighbor_lists.clear(); thrust::copy_if(thrust::device, new_weight_lists.begin(), new_weight_lists.end(), g_next.weights.begin(), Is_Non_Negative<float, float>()); new_weight_lists.clear(); /* std::cin>>sc; if (hostPrint) { std::cout << std::endl << "#Node: " << g_next.nb_nodes << std::endl; std::cout << std::endl << "#Edge: " << g_next.nb_links << std::endl; print_vector(g_next.indices, "Copied Indices: "); print_vector(g_next.links, "Copied NE: "); print_vector(g_next.weights, "Copied WE: "); } */ /*if (0) { thrust::host_vector<unsigned int> gnlinks = g_next.links; thrust::host_vector<int> gnIndices = g_next.indices; thrust::host_vector<float> gnWeights = g_next.weights; std::ofstream ofs; ofs.open ("n2c.txt", std::ofstream::out | std::ofstream::app); for (unsigned int i = 0; i < new_nb_comm; i++) { unsigned int startNbr = gnIndices[i]; unsigned int endNbr = gnIndices[i + 1]; //thrust::sort(gnlinks.begin() + startNbr, gnlinks.begin() + endNbr); thrust::sort_by_key(gnlinks.begin() + startNbr, gnlinks.begin() + endNbr, gnWeights.begin() + startNbr); //std::cout << (i+1) <<"[" << (endNbr -startNbr) << "]"<< ":"; ofs<<i<<": "; for (unsigned int j = startNbr; j < endNbr; j++) { //std::cout << " " << (gnlinks[j]+1)<<"("<<gnWeights[j]<<")"; ofs<< (gnlinks[j])<<"("<<gnWeights[j]<<")"<<" "; } ofs<<"\n"; //std::cout << std::endl; } ofs.close(); }*/ //cudaEventDestroy(start); //cudaEventDestroy(stop); }
d73853eb33d2b152d56619f3ba61e0a4236b3292.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "generate_input.hpp" #include "random_distribution_factory.cuh" #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/filling.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/types.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_int_distribution.h> #include <thrust/random/uniform_real_distribution.h> #include <thrust/scan.h> #include <thrust/tabulate.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <random> #include <utility> #include <vector> /** * @brief Mersenne Twister pseudo-random engine. */ auto deterministic_engine(unsigned seed) { return thrust::minstd_rand{seed}; } /** * Computes the mean value for a distribution of given type and value bounds. */ template <typename T> T get_distribution_mean(distribution_params<T> const& dist) { switch (dist.id) { case distribution_id::NORMAL: case distribution_id::UNIFORM: return (dist.lower_bound / 2.) + (dist.upper_bound / 2.); case distribution_id::GEOMETRIC: { auto const range_size = dist.lower_bound < dist.upper_bound ? dist.upper_bound - dist.lower_bound : dist.lower_bound - dist.upper_bound; auto const p = geometric_dist_p(range_size); if (dist.lower_bound < dist.upper_bound) return dist.lower_bound + (1. / p); else return dist.lower_bound - (1. / p); } default: CUDF_FAIL("Unsupported distribution type."); } } /** * @brief Computes the average element size in a column, given the data profile. * * Random distribution parameters like average string length and maximum list nesting level affect * the element size of non-fixed-width columns. For lists and structs, `avg_element_size` is called * recursively to determine the size of nested columns. */ size_t avg_element_size(data_profile const& profile, cudf::data_type dtype); // Utilities to determine the mean size of an element, given the data profile template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_width<T>())> size_t non_fixed_width_size(data_profile const& profile) { CUDF_FAIL("Should not be called, use `size_of` for this type instead"); } template <typename T, CUDF_ENABLE_IF(!cudf::is_fixed_width<T>())> size_t non_fixed_width_size(data_profile const& profile) { CUDF_FAIL("not implemented!"); } template <> size_t non_fixed_width_size<cudf::string_view>(data_profile const& profile) { auto const dist = profile.get_distribution_params<cudf::string_view>().length_params; return get_distribution_mean(dist); } template <> size_t non_fixed_width_size<cudf::list_view>(data_profile const& profile) { auto const dist_params = profile.get_distribution_params<cudf::list_view>(); auto const single_level_mean = get_distribution_mean(dist_params.length_params); auto const element_size = avg_element_size(profile, cudf::data_type{dist_params.element_type}); return element_size * pow(single_level_mean, dist_params.max_depth); } template <> size_t non_fixed_width_size<cudf::struct_view>(data_profile const& profile) { auto const dist_params = profile.get_distribution_params<cudf::struct_view>(); return std::accumulate(dist_params.leaf_types.cbegin(), dist_params.leaf_types.cend(), 0ul, [&](auto& sum, auto type_id) { return sum + avg_element_size(profile, cudf::data_type{type_id}); }); } struct non_fixed_width_size_fn { template <typename T> size_t operator()(data_profile const& profile) { return non_fixed_width_size<T>(profile); } }; size_t avg_element_size(data_profile const& profile, cudf::data_type dtype) { if (cudf::is_fixed_width(dtype)) { return cudf::size_of(dtype); } return cudf::type_dispatcher(dtype, non_fixed_width_size_fn{}, profile); } /** * @brief bool generator with given probability [0.0 - 1.0] of returning true. */ struct bool_generator { thrust::minstd_rand engine; thrust::uniform_real_distribution<float> dist; double probability_true; bool_generator(thrust::minstd_rand engine, double probability_true) : engine(engine), dist{0, 1}, probability_true{probability_true} { } bool_generator(unsigned seed, double probability_true) : engine(seed), dist{0, 1}, probability_true{probability_true} { } __device__ bool operator()(size_t n) { engine.discard(n); return dist(engine) < probability_true; } }; /** * @brief Functor that computes a random column element with the given data profile. * * The implementation is SFINAEd for different type groups. Currently only used for fixed-width * types. */ template <typename T, typename Enable = void> struct random_value_fn; /** * @brief Creates an random timestamp/duration value */ template <typename T> struct random_value_fn<T, std::enable_if_t<cudf::is_chrono<T>()>> { distribution_fn<int64_t> seconds_gen; distribution_fn<int64_t> nanoseconds_gen; random_value_fn(distribution_params<T> params) { using cuda::std::chrono::duration_cast; std::pair<cudf::duration_s, cudf::duration_s> const range_s = { duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.lower_bound}), duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.upper_bound})}; if (range_s.first != range_s.second) { seconds_gen = make_distribution<int64_t>(params.id, range_s.first.count(), range_s.second.count()); nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM, 0l, 1000000000l); } else { // Don't need a random seconds generator for sub-second intervals seconds_gen = [range_s](thrust::minstd_rand&, size_t size) { rmm::device_uvector<int64_t> result(size, cudf::get_default_stream()); thrust::fill(thrust::device, result.begin(), result.end(), range_s.second.count()); return result; }; std::pair<cudf::duration_ns, cudf::duration_ns> const range_ns = { duration_cast<cudf::duration_ns>(typename T::duration{params.lower_bound}), duration_cast<cudf::duration_ns>(typename T::duration{params.upper_bound})}; nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM, ::min(range_ns.first.count(), 0l), ::max(range_ns.second.count(), 0l)); } } rmm::device_uvector<T> operator()(thrust::minstd_rand& engine, unsigned size) { auto const sec = seconds_gen(engine, size); auto const ns = nanoseconds_gen(engine, size); rmm::device_uvector<T> result(size, cudf::get_default_stream()); thrust::transform( thrust::device, sec.begin(), sec.end(), ns.begin(), result.begin(), [] __device__(int64_t sec_value, int64_t nanoseconds_value) { auto const timestamp_ns = cudf::duration_s{sec_value} + cudf::duration_ns{nanoseconds_value}; // Return value in the type's precision return T(cuda::std::chrono::duration_cast<typename T::duration>(timestamp_ns)); }); return result; } }; /** * @brief Creates an random fixed_point value. */ template <typename T> struct random_value_fn<T, std::enable_if_t<cudf::is_fixed_point<T>()>> { using DeviceType = cudf::device_storage_type_t<T>; DeviceType const lower_bound; DeviceType const upper_bound; distribution_fn<DeviceType> dist; std::optional<numeric::scale_type> scale; random_value_fn(distribution_params<DeviceType> const& desc) : lower_bound{desc.lower_bound}, upper_bound{desc.upper_bound}, dist{make_distribution<DeviceType>(desc.id, desc.lower_bound, desc.upper_bound)} { } [[nodiscard]] numeric::scale_type get_scale(thrust::minstd_rand& engine) { if (not scale.has_value()) { constexpr int max_scale = std::numeric_limits<DeviceType>::digits10; std::uniform_int_distribution<int> scale_dist{-max_scale, max_scale}; std::mt19937 engine_scale(engine()); scale = numeric::scale_type{scale_dist(engine_scale)}; } return scale.value_or(numeric::scale_type{0}); } rmm::device_uvector<DeviceType> operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; /** * @brief Creates an random numeric value with the given distribution. */ template <typename T> struct random_value_fn<T, std::enable_if_t<!std::is_same_v<T, bool> && cudf::is_numeric<T>()>> { T const lower_bound; T const upper_bound; distribution_fn<T> dist; random_value_fn(distribution_params<T> const& desc) : lower_bound{desc.lower_bound}, upper_bound{desc.upper_bound}, dist{make_distribution<T>(desc.id, desc.lower_bound, desc.upper_bound)} { } auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; /** * @brief Creates an boolean value with given probability of returning `true`. */ template <typename T> struct random_value_fn<T, typename std::enable_if_t<std::is_same_v<T, bool>>> { // Bernoulli distribution distribution_fn<bool> dist; random_value_fn(distribution_params<bool> const& desc) : dist{[valid_prob = desc.probability_true](thrust::minstd_rand& engine, size_t size) -> rmm::device_uvector<bool> { rmm::device_uvector<bool> result(size, cudf::get_default_stream()); thrust::tabulate( thrust::device, result.begin(), result.end(), bool_generator(engine, valid_prob)); return result; }} { } auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; auto create_run_length_dist(cudf::size_type avg_run_len) { // Distribution with low probability of generating 0-1 even with a low `avg_run_len` value static constexpr float alpha = 4.f; return std::gamma_distribution<float>{alpha, avg_run_len / alpha}; } /** * @brief Generate indices within range [0 , cardinality) repeating with average run length * `avg_run_len` * * @param avg_run_len Average run length of the generated indices * @param cardinality Number of unique values in the output vector * @param num_rows Number of indices to generate * @param engine Random engine * @return Generated indices of type `cudf::size_type` */ rmm::device_uvector<cudf::size_type> sample_indices_with_run_length(cudf::size_type avg_run_len, cudf::size_type cardinality, cudf::size_type num_rows, thrust::minstd_rand& engine) { auto sample_dist = random_value_fn<cudf::size_type>{ distribution_params<cudf::size_type>{distribution_id::UNIFORM, 0, cardinality - 1}}; if (avg_run_len > 1) { auto avglen_dist = random_value_fn<int>{distribution_params<int>{distribution_id::UNIFORM, 1, 2 * avg_run_len}}; auto const approx_run_len = num_rows / avg_run_len + 1; auto run_lens = avglen_dist(engine, approx_run_len); thrust::inclusive_scan( thrust::device, run_lens.begin(), run_lens.end(), run_lens.begin(), std::plus<int>{}); auto const samples_indices = sample_dist(engine, approx_run_len + 1); // This is gather. auto avg_repeated_sample_indices_iterator = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [rb = run_lens.begin(), re = run_lens.end(), samples_indices = samples_indices.begin()] __device__(cudf::size_type i) { auto sample_idx = thrust::upper_bound(thrust::seq, rb, re, i) - rb; return samples_indices[sample_idx]; }); rmm::device_uvector<cudf::size_type> repeated_sample_indices(num_rows, cudf::get_default_stream()); thrust::copy(thrust::device, avg_repeated_sample_indices_iterator, avg_repeated_sample_indices_iterator + num_rows, repeated_sample_indices.begin()); return repeated_sample_indices; } else { // generate n samples. return sample_dist(engine, num_rows); } } /** * @brief Creates a column with random content of type @ref T. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @tparam T Data type of the output column * @return Column filled with random data */ template <typename T> std::unique_ptr<cudf::column> create_random_column(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { // Bernoulli distribution auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); auto value_dist = random_value_fn<T>{profile.get_distribution_params<T>()}; using DeviceType = cudf::device_storage_type_t<T>; cudf::data_type const dtype = [&]() { if constexpr (cudf::is_fixed_point<T>()) return cudf::data_type{cudf::type_to_id<T>(), value_dist.get_scale(engine)}; else return cudf::data_type{cudf::type_to_id<T>()}; }(); // Distribution for picking elements from the array of samples auto const avg_run_len = profile.get_avg_run_length(); rmm::device_uvector<DeviceType> data(0, cudf::get_default_stream()); rmm::device_uvector<bool> null_mask(0, cudf::get_default_stream()); if (profile.get_cardinality() == 0 and avg_run_len == 1) { data = value_dist(engine, num_rows); null_mask = valid_dist(engine, num_rows); } else { auto const cardinality = [profile_cardinality = profile.get_cardinality(), num_rows] { return (profile_cardinality == 0 or profile_cardinality > num_rows) ? num_rows : profile_cardinality; }(); rmm::device_uvector<bool> samples_null_mask = valid_dist(engine, cardinality); rmm::device_uvector<DeviceType> samples = value_dist(engine, cardinality); // generate n samples and gather. auto const sample_indices = sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine); data = rmm::device_uvector<DeviceType>(num_rows, cudf::get_default_stream()); null_mask = rmm::device_uvector<bool>(num_rows, cudf::get_default_stream()); thrust::gather( thrust::device, sample_indices.begin(), sample_indices.end(), samples.begin(), data.begin()); thrust::gather(thrust::device, sample_indices.begin(), sample_indices.end(), samples_null_mask.begin(), null_mask.begin()); } auto [result_bitmask, null_count] = cudf::detail::valid_if(null_mask.begin(), null_mask.end(), thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); return std::make_unique<cudf::column>( dtype, num_rows, data.release(), profile.get_null_probability().has_value() ? std::move(result_bitmask) : rmm::device_buffer{}, profile.get_null_probability().has_value() ? null_count : 0); } struct valid_or_zero { template <typename T> __device__ T operator()(thrust::tuple<T, bool> len_valid) const { return thrust::get<1>(len_valid) ? thrust::get<0>(len_valid) : T{0}; } }; struct string_generator { char* chars; thrust::minstd_rand engine; thrust::uniform_int_distribution<unsigned char> char_dist; string_generator(char* c, thrust::minstd_rand& engine) : chars(c), engine(engine), char_dist(32, 137) // ~90% ASCII, ~10% UTF-8. // ~80% not-space, ~20% space. // range 32-127 is ASCII; 127-136 will be multi-byte UTF-8 { } __device__ void operator()(thrust::tuple<cudf::size_type, cudf::size_type> str_begin_end) { auto begin = thrust::get<0>(str_begin_end); auto end = thrust::get<1>(str_begin_end); engine.discard(begin); for (auto i = begin; i < end; ++i) { auto ch = char_dist(engine); if (i == end - 1 && ch >= '\x7F') ch = ' '; // last element ASCII only. if (ch >= '\x7F') // x7F is at the top edge of ASCII chars[i++] = '\xC4'; // these characters are assigned two bytes chars[i] = static_cast<char>(ch + (ch >= '\x7F')); } } }; /** * @brief Create a UTF-8 string column with the average length. * */ std::unique_ptr<cudf::column> create_random_utf8_string_column(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto len_dist = random_value_fn<uint32_t>{profile.get_distribution_params<cudf::string_view>().length_params}; auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); auto lengths = len_dist(engine, num_rows + 1); auto null_mask = valid_dist(engine, num_rows + 1); thrust::transform_if( thrust::device, lengths.begin(), lengths.end(), null_mask.begin(), lengths.begin(), [] __device__(auto) { return 0; }, thrust::logical_not<bool>{}); auto valid_lengths = thrust::make_transform_iterator( thrust::make_zip_iterator(thrust::make_tuple(lengths.begin(), null_mask.begin())), valid_or_zero{}); rmm::device_uvector<cudf::size_type> offsets(num_rows + 1, cudf::get_default_stream()); thrust::exclusive_scan( thrust::device, valid_lengths, valid_lengths + lengths.size(), offsets.begin()); // offsets are ready. auto chars_length = *thrust::device_pointer_cast(offsets.end() - 1); rmm::device_uvector<char> chars(chars_length, cudf::get_default_stream()); thrust::for_each_n(thrust::device, thrust::make_zip_iterator(offsets.begin(), offsets.begin() + 1), num_rows, string_generator{chars.data(), engine}); auto [result_bitmask, null_count] = cudf::detail::valid_if(null_mask.begin(), null_mask.end() - 1, thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); return cudf::make_strings_column( num_rows, std::move(offsets), std::move(chars), profile.get_null_probability().has_value() ? std::move(result_bitmask) : rmm::device_buffer{}, null_count); } /** * @brief Creates a string column with random content. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @return Column filled with random strings */ template <> std::unique_ptr<cudf::column> create_random_column<cudf::string_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const cardinality = ::min(profile.get_cardinality(), num_rows); auto const avg_run_len = profile.get_avg_run_length(); auto sample_strings = create_random_utf8_string_column(profile, engine, cardinality == 0 ? num_rows : cardinality); if (cardinality == 0) { return sample_strings; } auto sample_indices = sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine); auto str_table = cudf::detail::gather(cudf::table_view{{sample_strings->view()}}, sample_indices, cudf::out_of_bounds_policy::DONT_CHECK, cudf::detail::negative_index_policy::NOT_ALLOWED, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); return std::move(str_table->release()[0]); } template <> std::unique_ptr<cudf::column> create_random_column<cudf::dictionary32>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { CUDF_FAIL("not implemented yet"); } /** * @brief Functor to dispatch create_random_column calls. */ struct create_rand_col_fn { public: template <typename T> std::unique_ptr<cudf::column> operator()(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { return create_random_column<T>(profile, engine, num_rows); } }; /** * @brief Calculates the number of direct parents needed to generate a struct column hierarchy with * lowest maximum number of children in any nested column. * * Used to generate an "evenly distributed" struct column hierarchy with the given number of leaf * columns and nesting levels. The column tree is considered evenly distributed if all columns have * nearly the same number of child columns (difference not larger than one). */ int num_direct_parents(int num_lvls, int num_leaf_columns) { // Estimated average number of children in the hierarchy; auto const num_children_avg = ::pow(num_leaf_columns, 1. / num_lvls); // Minimum number of children columns for any column in the hierarchy int const num_children_min = ::floor(num_children_avg); // Maximum number of children columns for any column in the hierarchy int const num_children_max = num_children_min + 1; // Minimum number of columns needed so that their number of children does not exceed the maximum int const min_for_current_nesting = ::ceil((double)num_leaf_columns / num_children_max); // Minimum number of columns needed so that columns at the higher levels have at least the minimum // number of children int const min_for_upper_nesting = ::pow(num_children_min, num_lvls - 1); // Both conditions need to be satisfied return ::max(min_for_current_nesting, min_for_upper_nesting); } template <> std::unique_ptr<cudf::column> create_random_column<cudf::struct_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const dist_params = profile.get_distribution_params<cudf::struct_view>(); // Generate leaf columns std::vector<std::unique_ptr<cudf::column>> children; children.reserve(dist_params.leaf_types.size()); std::transform(dist_params.leaf_types.cbegin(), dist_params.leaf_types.cend(), std::back_inserter(children), [&](auto& type_id) { return cudf::type_dispatcher( cudf::data_type(type_id), create_rand_col_fn{}, profile, engine, num_rows); }); auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); // Generate the column bottom-up for (int lvl = dist_params.max_depth; lvl > 0; --lvl) { // Generating the next level std::vector<std::unique_ptr<cudf::column>> parents; parents.resize(num_direct_parents(lvl, children.size())); auto current_child = children.begin(); for (auto current_parent = parents.begin(); current_parent != parents.end(); ++current_parent) { auto [null_mask, null_count] = [&]() { if (profile.get_null_probability().has_value()) { auto valids = valid_dist(engine, num_rows); return cudf::detail::valid_if(valids.begin(), valids.end(), thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); } return std::pair<rmm::device_buffer, cudf::size_type>{}; }(); // Adopt remaining children as evenly as possible auto const num_to_adopt = cudf::util::div_rounding_up_unsafe( std::distance(current_child, children.end()), std::distance(current_parent, parents.end())); CUDF_EXPECTS(num_to_adopt > 0, "No children columns left to adopt"); std::vector<std::unique_ptr<cudf::column>> children_to_adopt; children_to_adopt.insert(children_to_adopt.end(), std::make_move_iterator(current_child), std::make_move_iterator(current_child + num_to_adopt)); current_child += children_to_adopt.size(); *current_parent = cudf::make_structs_column( num_rows, std::move(children_to_adopt), null_count, std::move(null_mask)); } if (lvl == 1) { CUDF_EXPECTS(parents.size() == 1, "There should be one top-level column"); return std::move(parents.front()); } children = std::move(parents); } CUDF_FAIL("Reached unreachable code in struct column creation"); } template <typename T> struct clamp_down : public thrust::unary_function<T, T> { T max; clamp_down(T max) : max(max) {} __host__ __device__ T operator()(T x) const { return min(x, max); } }; /** * @brief Creates a list column with random content. * * The data profile determines the list length distribution, number of nested level, and the data * type of the bottom level. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @return Column filled with random lists */ template <> std::unique_ptr<cudf::column> create_random_column<cudf::list_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const dist_params = profile.get_distribution_params<cudf::list_view>(); auto const single_level_mean = get_distribution_mean(dist_params.length_params); auto const num_elements = num_rows * pow(single_level_mean, dist_params.max_depth); auto leaf_column = cudf::type_dispatcher( cudf::data_type(dist_params.element_type), create_rand_col_fn{}, profile, engine, num_elements); auto len_dist = random_value_fn<uint32_t>{profile.get_distribution_params<cudf::list_view>().length_params}; auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); // Generate the list column bottom-up auto list_column = std::move(leaf_column); for (int lvl = 0; lvl < dist_params.max_depth; ++lvl) { // Generating the next level - offsets point into the current list column auto current_child_column = std::move(list_column); cudf::size_type const num_rows = current_child_column->size() / single_level_mean; auto offsets = len_dist(engine, num_rows + 1); auto valids = valid_dist(engine, num_rows); // to ensure these values <= current_child_column->size() auto output_offsets = thrust::make_transform_output_iterator( offsets.begin(), clamp_down{current_child_column->size()}); thrust::exclusive_scan(thrust::device, offsets.begin(), offsets.end(), output_offsets); thrust::device_pointer_cast(offsets.end())[-1] = current_child_column->size(); // Always include all elements auto offsets_column = std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::INT32}, num_rows + 1, offsets.release(), rmm::device_buffer{}, 0); auto [null_mask, null_count] = cudf::detail::valid_if(valids.begin(), valids.end(), thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); list_column = cudf::make_lists_column( num_rows, std::move(offsets_column), std::move(current_child_column), profile.get_null_probability().has_value() ? null_count : 0, profile.get_null_probability().has_value() ? std::move(null_mask) : rmm::device_buffer{}); } return list_column; // return the top-level column } using columns_vector = std::vector<std::unique_ptr<cudf::column>>; /** * @brief Creates a vector of columns with random content. * * @param profile Parameters for the random generator * @param dtype_ids vector of data type ids, one for each output column * @param engine Pseudo-random engine * @param num_rows Size of the output columns * * @return Column filled with random lists */ columns_vector create_random_columns(data_profile const& profile, std::vector<cudf::type_id> dtype_ids, thrust::minstd_rand engine, cudf::size_type num_rows) { columns_vector output_columns; std::transform( dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) { engine.discard(num_rows); return cudf::type_dispatcher( cudf::data_type(tid), create_rand_col_fn{}, profile, engine, num_rows); }); return output_columns; } /** * @brief Repeats the input data types cyclically order to fill a vector of @ref num_cols * elements. */ std::vector<cudf::type_id> cycle_dtypes(std::vector<cudf::type_id> const& dtype_ids, cudf::size_type num_cols) { if (dtype_ids.size() == static_cast<std::size_t>(num_cols)) { return dtype_ids; } std::vector<cudf::type_id> out_dtypes; out_dtypes.reserve(num_cols); for (cudf::size_type col = 0; col < num_cols; ++col) out_dtypes.push_back(dtype_ids[col % dtype_ids.size()]); return out_dtypes; } std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids, table_size_bytes table_bytes, data_profile const& profile, unsigned seed) { size_t const avg_row_bytes = std::accumulate(dtype_ids.begin(), dtype_ids.end(), 0ul, [&](size_t sum, auto tid) { return sum + avg_element_size(profile, cudf::data_type(tid)); }); cudf::size_type const num_rows = table_bytes.size / avg_row_bytes; return create_random_table(dtype_ids, row_count{num_rows}, profile, seed); } std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids, row_count num_rows, data_profile const& profile, unsigned seed) { auto seed_engine = deterministic_engine(seed); thrust::uniform_int_distribution<unsigned> seed_dist; columns_vector output_columns; std::transform( dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) mutable { return create_random_column(tid, num_rows, profile, seed_dist(seed_engine)); }); return std::make_unique<cudf::table>(std::move(output_columns)); } std::unique_ptr<cudf::column> create_random_column(cudf::type_id dtype_id, row_count num_rows, data_profile const& profile, unsigned seed) { auto engine = deterministic_engine(seed); return cudf::type_dispatcher( cudf::data_type(dtype_id), create_rand_col_fn{}, profile, engine, num_rows.count); } std::unique_ptr<cudf::table> create_sequence_table(std::vector<cudf::type_id> const& dtype_ids, row_count num_rows, std::optional<double> null_probability, unsigned seed) { auto seed_engine = deterministic_engine(seed); thrust::uniform_int_distribution<unsigned> seed_dist; auto columns = std::vector<std::unique_ptr<cudf::column>>(dtype_ids.size()); std::transform(dtype_ids.begin(), dtype_ids.end(), columns.begin(), [&](auto dtype) mutable { auto init = cudf::make_default_constructed_scalar(cudf::data_type{dtype}); auto col = cudf::sequence(num_rows.count, *init); auto [mask, count] = create_random_null_mask(num_rows.count, null_probability, seed_dist(seed_engine)); col->set_null_mask(std::move(mask), count); return col; }); return std::make_unique<cudf::table>(std::move(columns)); } std::pair<rmm::device_buffer, cudf::size_type> create_random_null_mask( cudf::size_type size, std::optional<double> null_probability, unsigned seed) { if (not null_probability.has_value()) { return {rmm::device_buffer{}, 0}; } CUDF_EXPECTS(*null_probability >= 0.0 and *null_probability <= 1.0, "Null probability must be within the range [0.0, 1.0]"); if (*null_probability == 0.0f) { return {cudf::create_null_mask(size, cudf::mask_state::ALL_VALID), 0}; } else if (*null_probability == 1.0) { return {cudf::create_null_mask(size, cudf::mask_state::ALL_NULL), size}; } else { return cudf::detail::valid_if(thrust::make_counting_iterator<cudf::size_type>(0), thrust::make_counting_iterator<cudf::size_type>(size), bool_generator{seed, 1.0 - *null_probability}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); } } std::vector<cudf::type_id> get_type_or_group(int32_t id) { // identity transformation when passing a concrete type_id if (id < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS)) return {static_cast<cudf::type_id>(id)}; // if the value is larger that type_id::NUM_TYPE_IDS, it's a group id type_group_id const group_id = static_cast<type_group_id>(id); using trait_fn = bool (*)(cudf::data_type); trait_fn is_integral = [](cudf::data_type type) { return cudf::is_numeric(type) && !cudf::is_floating_point(type); }; trait_fn is_integral_signed = [](cudf::data_type type) { return cudf::is_numeric(type) && !cudf::is_floating_point(type) && !cudf::is_unsigned(type); }; auto fn = [&]() -> trait_fn { switch (group_id) { case type_group_id::FLOATING_POINT: return cudf::is_floating_point; case type_group_id::INTEGRAL: return is_integral; case type_group_id::INTEGRAL_SIGNED: return is_integral_signed; case type_group_id::NUMERIC: return cudf::is_numeric; case type_group_id::TIMESTAMP: return cudf::is_timestamp; case type_group_id::DURATION: return cudf::is_duration; case type_group_id::FIXED_POINT: return cudf::is_fixed_point; case type_group_id::COMPOUND: return cudf::is_compound; case type_group_id::NESTED: return cudf::is_nested; default: CUDF_FAIL("Invalid data type group"); } }(); std::vector<cudf::type_id> types; for (int type_int = 0; type_int < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS); ++type_int) { auto const type = static_cast<cudf::type_id>(type_int); if (type != cudf::type_id::EMPTY && fn(cudf::data_type(type))) types.push_back(type); } return types; } std::vector<cudf::type_id> get_type_or_group(std::vector<int32_t> const& ids) { std::vector<cudf::type_id> all_type_ids; for (auto& id : ids) { auto const type_ids = get_type_or_group(id); all_type_ids.insert(std::end(all_type_ids), std::cbegin(type_ids), std::cend(type_ids)); } return all_type_ids; }
d73853eb33d2b152d56619f3ba61e0a4236b3292.cu
/* * Copyright (c) 2020-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "generate_input.hpp" #include "random_distribution_factory.cuh" #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/filling.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/types.hpp> #include <cudf/utilities/default_stream.hpp> #include <cudf/utilities/error.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/mr/device/per_device_resource.hpp> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/gather.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_int_distribution.h> #include <thrust/random/uniform_real_distribution.h> #include <thrust/scan.h> #include <thrust/tabulate.h> #include <thrust/transform.h> #include <thrust/tuple.h> #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <random> #include <utility> #include <vector> /** * @brief Mersenne Twister pseudo-random engine. */ auto deterministic_engine(unsigned seed) { return thrust::minstd_rand{seed}; } /** * Computes the mean value for a distribution of given type and value bounds. */ template <typename T> T get_distribution_mean(distribution_params<T> const& dist) { switch (dist.id) { case distribution_id::NORMAL: case distribution_id::UNIFORM: return (dist.lower_bound / 2.) + (dist.upper_bound / 2.); case distribution_id::GEOMETRIC: { auto const range_size = dist.lower_bound < dist.upper_bound ? dist.upper_bound - dist.lower_bound : dist.lower_bound - dist.upper_bound; auto const p = geometric_dist_p(range_size); if (dist.lower_bound < dist.upper_bound) return dist.lower_bound + (1. / p); else return dist.lower_bound - (1. / p); } default: CUDF_FAIL("Unsupported distribution type."); } } /** * @brief Computes the average element size in a column, given the data profile. * * Random distribution parameters like average string length and maximum list nesting level affect * the element size of non-fixed-width columns. For lists and structs, `avg_element_size` is called * recursively to determine the size of nested columns. */ size_t avg_element_size(data_profile const& profile, cudf::data_type dtype); // Utilities to determine the mean size of an element, given the data profile template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_width<T>())> size_t non_fixed_width_size(data_profile const& profile) { CUDF_FAIL("Should not be called, use `size_of` for this type instead"); } template <typename T, CUDF_ENABLE_IF(!cudf::is_fixed_width<T>())> size_t non_fixed_width_size(data_profile const& profile) { CUDF_FAIL("not implemented!"); } template <> size_t non_fixed_width_size<cudf::string_view>(data_profile const& profile) { auto const dist = profile.get_distribution_params<cudf::string_view>().length_params; return get_distribution_mean(dist); } template <> size_t non_fixed_width_size<cudf::list_view>(data_profile const& profile) { auto const dist_params = profile.get_distribution_params<cudf::list_view>(); auto const single_level_mean = get_distribution_mean(dist_params.length_params); auto const element_size = avg_element_size(profile, cudf::data_type{dist_params.element_type}); return element_size * pow(single_level_mean, dist_params.max_depth); } template <> size_t non_fixed_width_size<cudf::struct_view>(data_profile const& profile) { auto const dist_params = profile.get_distribution_params<cudf::struct_view>(); return std::accumulate(dist_params.leaf_types.cbegin(), dist_params.leaf_types.cend(), 0ul, [&](auto& sum, auto type_id) { return sum + avg_element_size(profile, cudf::data_type{type_id}); }); } struct non_fixed_width_size_fn { template <typename T> size_t operator()(data_profile const& profile) { return non_fixed_width_size<T>(profile); } }; size_t avg_element_size(data_profile const& profile, cudf::data_type dtype) { if (cudf::is_fixed_width(dtype)) { return cudf::size_of(dtype); } return cudf::type_dispatcher(dtype, non_fixed_width_size_fn{}, profile); } /** * @brief bool generator with given probability [0.0 - 1.0] of returning true. */ struct bool_generator { thrust::minstd_rand engine; thrust::uniform_real_distribution<float> dist; double probability_true; bool_generator(thrust::minstd_rand engine, double probability_true) : engine(engine), dist{0, 1}, probability_true{probability_true} { } bool_generator(unsigned seed, double probability_true) : engine(seed), dist{0, 1}, probability_true{probability_true} { } __device__ bool operator()(size_t n) { engine.discard(n); return dist(engine) < probability_true; } }; /** * @brief Functor that computes a random column element with the given data profile. * * The implementation is SFINAEd for different type groups. Currently only used for fixed-width * types. */ template <typename T, typename Enable = void> struct random_value_fn; /** * @brief Creates an random timestamp/duration value */ template <typename T> struct random_value_fn<T, std::enable_if_t<cudf::is_chrono<T>()>> { distribution_fn<int64_t> seconds_gen; distribution_fn<int64_t> nanoseconds_gen; random_value_fn(distribution_params<T> params) { using cuda::std::chrono::duration_cast; std::pair<cudf::duration_s, cudf::duration_s> const range_s = { duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.lower_bound}), duration_cast<cuda::std::chrono::seconds>(typename T::duration{params.upper_bound})}; if (range_s.first != range_s.second) { seconds_gen = make_distribution<int64_t>(params.id, range_s.first.count(), range_s.second.count()); nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM, 0l, 1000000000l); } else { // Don't need a random seconds generator for sub-second intervals seconds_gen = [range_s](thrust::minstd_rand&, size_t size) { rmm::device_uvector<int64_t> result(size, cudf::get_default_stream()); thrust::fill(thrust::device, result.begin(), result.end(), range_s.second.count()); return result; }; std::pair<cudf::duration_ns, cudf::duration_ns> const range_ns = { duration_cast<cudf::duration_ns>(typename T::duration{params.lower_bound}), duration_cast<cudf::duration_ns>(typename T::duration{params.upper_bound})}; nanoseconds_gen = make_distribution<int64_t>(distribution_id::UNIFORM, std::min(range_ns.first.count(), 0l), std::max(range_ns.second.count(), 0l)); } } rmm::device_uvector<T> operator()(thrust::minstd_rand& engine, unsigned size) { auto const sec = seconds_gen(engine, size); auto const ns = nanoseconds_gen(engine, size); rmm::device_uvector<T> result(size, cudf::get_default_stream()); thrust::transform( thrust::device, sec.begin(), sec.end(), ns.begin(), result.begin(), [] __device__(int64_t sec_value, int64_t nanoseconds_value) { auto const timestamp_ns = cudf::duration_s{sec_value} + cudf::duration_ns{nanoseconds_value}; // Return value in the type's precision return T(cuda::std::chrono::duration_cast<typename T::duration>(timestamp_ns)); }); return result; } }; /** * @brief Creates an random fixed_point value. */ template <typename T> struct random_value_fn<T, std::enable_if_t<cudf::is_fixed_point<T>()>> { using DeviceType = cudf::device_storage_type_t<T>; DeviceType const lower_bound; DeviceType const upper_bound; distribution_fn<DeviceType> dist; std::optional<numeric::scale_type> scale; random_value_fn(distribution_params<DeviceType> const& desc) : lower_bound{desc.lower_bound}, upper_bound{desc.upper_bound}, dist{make_distribution<DeviceType>(desc.id, desc.lower_bound, desc.upper_bound)} { } [[nodiscard]] numeric::scale_type get_scale(thrust::minstd_rand& engine) { if (not scale.has_value()) { constexpr int max_scale = std::numeric_limits<DeviceType>::digits10; std::uniform_int_distribution<int> scale_dist{-max_scale, max_scale}; std::mt19937 engine_scale(engine()); scale = numeric::scale_type{scale_dist(engine_scale)}; } return scale.value_or(numeric::scale_type{0}); } rmm::device_uvector<DeviceType> operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; /** * @brief Creates an random numeric value with the given distribution. */ template <typename T> struct random_value_fn<T, std::enable_if_t<!std::is_same_v<T, bool> && cudf::is_numeric<T>()>> { T const lower_bound; T const upper_bound; distribution_fn<T> dist; random_value_fn(distribution_params<T> const& desc) : lower_bound{desc.lower_bound}, upper_bound{desc.upper_bound}, dist{make_distribution<T>(desc.id, desc.lower_bound, desc.upper_bound)} { } auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; /** * @brief Creates an boolean value with given probability of returning `true`. */ template <typename T> struct random_value_fn<T, typename std::enable_if_t<std::is_same_v<T, bool>>> { // Bernoulli distribution distribution_fn<bool> dist; random_value_fn(distribution_params<bool> const& desc) : dist{[valid_prob = desc.probability_true](thrust::minstd_rand& engine, size_t size) -> rmm::device_uvector<bool> { rmm::device_uvector<bool> result(size, cudf::get_default_stream()); thrust::tabulate( thrust::device, result.begin(), result.end(), bool_generator(engine, valid_prob)); return result; }} { } auto operator()(thrust::minstd_rand& engine, unsigned size) { return dist(engine, size); } }; auto create_run_length_dist(cudf::size_type avg_run_len) { // Distribution with low probability of generating 0-1 even with a low `avg_run_len` value static constexpr float alpha = 4.f; return std::gamma_distribution<float>{alpha, avg_run_len / alpha}; } /** * @brief Generate indices within range [0 , cardinality) repeating with average run length * `avg_run_len` * * @param avg_run_len Average run length of the generated indices * @param cardinality Number of unique values in the output vector * @param num_rows Number of indices to generate * @param engine Random engine * @return Generated indices of type `cudf::size_type` */ rmm::device_uvector<cudf::size_type> sample_indices_with_run_length(cudf::size_type avg_run_len, cudf::size_type cardinality, cudf::size_type num_rows, thrust::minstd_rand& engine) { auto sample_dist = random_value_fn<cudf::size_type>{ distribution_params<cudf::size_type>{distribution_id::UNIFORM, 0, cardinality - 1}}; if (avg_run_len > 1) { auto avglen_dist = random_value_fn<int>{distribution_params<int>{distribution_id::UNIFORM, 1, 2 * avg_run_len}}; auto const approx_run_len = num_rows / avg_run_len + 1; auto run_lens = avglen_dist(engine, approx_run_len); thrust::inclusive_scan( thrust::device, run_lens.begin(), run_lens.end(), run_lens.begin(), std::plus<int>{}); auto const samples_indices = sample_dist(engine, approx_run_len + 1); // This is gather. auto avg_repeated_sample_indices_iterator = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [rb = run_lens.begin(), re = run_lens.end(), samples_indices = samples_indices.begin()] __device__(cudf::size_type i) { auto sample_idx = thrust::upper_bound(thrust::seq, rb, re, i) - rb; return samples_indices[sample_idx]; }); rmm::device_uvector<cudf::size_type> repeated_sample_indices(num_rows, cudf::get_default_stream()); thrust::copy(thrust::device, avg_repeated_sample_indices_iterator, avg_repeated_sample_indices_iterator + num_rows, repeated_sample_indices.begin()); return repeated_sample_indices; } else { // generate n samples. return sample_dist(engine, num_rows); } } /** * @brief Creates a column with random content of type @ref T. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @tparam T Data type of the output column * @return Column filled with random data */ template <typename T> std::unique_ptr<cudf::column> create_random_column(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { // Bernoulli distribution auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); auto value_dist = random_value_fn<T>{profile.get_distribution_params<T>()}; using DeviceType = cudf::device_storage_type_t<T>; cudf::data_type const dtype = [&]() { if constexpr (cudf::is_fixed_point<T>()) return cudf::data_type{cudf::type_to_id<T>(), value_dist.get_scale(engine)}; else return cudf::data_type{cudf::type_to_id<T>()}; }(); // Distribution for picking elements from the array of samples auto const avg_run_len = profile.get_avg_run_length(); rmm::device_uvector<DeviceType> data(0, cudf::get_default_stream()); rmm::device_uvector<bool> null_mask(0, cudf::get_default_stream()); if (profile.get_cardinality() == 0 and avg_run_len == 1) { data = value_dist(engine, num_rows); null_mask = valid_dist(engine, num_rows); } else { auto const cardinality = [profile_cardinality = profile.get_cardinality(), num_rows] { return (profile_cardinality == 0 or profile_cardinality > num_rows) ? num_rows : profile_cardinality; }(); rmm::device_uvector<bool> samples_null_mask = valid_dist(engine, cardinality); rmm::device_uvector<DeviceType> samples = value_dist(engine, cardinality); // generate n samples and gather. auto const sample_indices = sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine); data = rmm::device_uvector<DeviceType>(num_rows, cudf::get_default_stream()); null_mask = rmm::device_uvector<bool>(num_rows, cudf::get_default_stream()); thrust::gather( thrust::device, sample_indices.begin(), sample_indices.end(), samples.begin(), data.begin()); thrust::gather(thrust::device, sample_indices.begin(), sample_indices.end(), samples_null_mask.begin(), null_mask.begin()); } auto [result_bitmask, null_count] = cudf::detail::valid_if(null_mask.begin(), null_mask.end(), thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); return std::make_unique<cudf::column>( dtype, num_rows, data.release(), profile.get_null_probability().has_value() ? std::move(result_bitmask) : rmm::device_buffer{}, profile.get_null_probability().has_value() ? null_count : 0); } struct valid_or_zero { template <typename T> __device__ T operator()(thrust::tuple<T, bool> len_valid) const { return thrust::get<1>(len_valid) ? thrust::get<0>(len_valid) : T{0}; } }; struct string_generator { char* chars; thrust::minstd_rand engine; thrust::uniform_int_distribution<unsigned char> char_dist; string_generator(char* c, thrust::minstd_rand& engine) : chars(c), engine(engine), char_dist(32, 137) // ~90% ASCII, ~10% UTF-8. // ~80% not-space, ~20% space. // range 32-127 is ASCII; 127-136 will be multi-byte UTF-8 { } __device__ void operator()(thrust::tuple<cudf::size_type, cudf::size_type> str_begin_end) { auto begin = thrust::get<0>(str_begin_end); auto end = thrust::get<1>(str_begin_end); engine.discard(begin); for (auto i = begin; i < end; ++i) { auto ch = char_dist(engine); if (i == end - 1 && ch >= '\x7F') ch = ' '; // last element ASCII only. if (ch >= '\x7F') // x7F is at the top edge of ASCII chars[i++] = '\xC4'; // these characters are assigned two bytes chars[i] = static_cast<char>(ch + (ch >= '\x7F')); } } }; /** * @brief Create a UTF-8 string column with the average length. * */ std::unique_ptr<cudf::column> create_random_utf8_string_column(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto len_dist = random_value_fn<uint32_t>{profile.get_distribution_params<cudf::string_view>().length_params}; auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); auto lengths = len_dist(engine, num_rows + 1); auto null_mask = valid_dist(engine, num_rows + 1); thrust::transform_if( thrust::device, lengths.begin(), lengths.end(), null_mask.begin(), lengths.begin(), [] __device__(auto) { return 0; }, thrust::logical_not<bool>{}); auto valid_lengths = thrust::make_transform_iterator( thrust::make_zip_iterator(thrust::make_tuple(lengths.begin(), null_mask.begin())), valid_or_zero{}); rmm::device_uvector<cudf::size_type> offsets(num_rows + 1, cudf::get_default_stream()); thrust::exclusive_scan( thrust::device, valid_lengths, valid_lengths + lengths.size(), offsets.begin()); // offsets are ready. auto chars_length = *thrust::device_pointer_cast(offsets.end() - 1); rmm::device_uvector<char> chars(chars_length, cudf::get_default_stream()); thrust::for_each_n(thrust::device, thrust::make_zip_iterator(offsets.begin(), offsets.begin() + 1), num_rows, string_generator{chars.data(), engine}); auto [result_bitmask, null_count] = cudf::detail::valid_if(null_mask.begin(), null_mask.end() - 1, thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); return cudf::make_strings_column( num_rows, std::move(offsets), std::move(chars), profile.get_null_probability().has_value() ? std::move(result_bitmask) : rmm::device_buffer{}, null_count); } /** * @brief Creates a string column with random content. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @return Column filled with random strings */ template <> std::unique_ptr<cudf::column> create_random_column<cudf::string_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const cardinality = std::min(profile.get_cardinality(), num_rows); auto const avg_run_len = profile.get_avg_run_length(); auto sample_strings = create_random_utf8_string_column(profile, engine, cardinality == 0 ? num_rows : cardinality); if (cardinality == 0) { return sample_strings; } auto sample_indices = sample_indices_with_run_length(avg_run_len, cardinality, num_rows, engine); auto str_table = cudf::detail::gather(cudf::table_view{{sample_strings->view()}}, sample_indices, cudf::out_of_bounds_policy::DONT_CHECK, cudf::detail::negative_index_policy::NOT_ALLOWED, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); return std::move(str_table->release()[0]); } template <> std::unique_ptr<cudf::column> create_random_column<cudf::dictionary32>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { CUDF_FAIL("not implemented yet"); } /** * @brief Functor to dispatch create_random_column calls. */ struct create_rand_col_fn { public: template <typename T> std::unique_ptr<cudf::column> operator()(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { return create_random_column<T>(profile, engine, num_rows); } }; /** * @brief Calculates the number of direct parents needed to generate a struct column hierarchy with * lowest maximum number of children in any nested column. * * Used to generate an "evenly distributed" struct column hierarchy with the given number of leaf * columns and nesting levels. The column tree is considered evenly distributed if all columns have * nearly the same number of child columns (difference not larger than one). */ int num_direct_parents(int num_lvls, int num_leaf_columns) { // Estimated average number of children in the hierarchy; auto const num_children_avg = std::pow(num_leaf_columns, 1. / num_lvls); // Minimum number of children columns for any column in the hierarchy int const num_children_min = std::floor(num_children_avg); // Maximum number of children columns for any column in the hierarchy int const num_children_max = num_children_min + 1; // Minimum number of columns needed so that their number of children does not exceed the maximum int const min_for_current_nesting = std::ceil((double)num_leaf_columns / num_children_max); // Minimum number of columns needed so that columns at the higher levels have at least the minimum // number of children int const min_for_upper_nesting = std::pow(num_children_min, num_lvls - 1); // Both conditions need to be satisfied return std::max(min_for_current_nesting, min_for_upper_nesting); } template <> std::unique_ptr<cudf::column> create_random_column<cudf::struct_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const dist_params = profile.get_distribution_params<cudf::struct_view>(); // Generate leaf columns std::vector<std::unique_ptr<cudf::column>> children; children.reserve(dist_params.leaf_types.size()); std::transform(dist_params.leaf_types.cbegin(), dist_params.leaf_types.cend(), std::back_inserter(children), [&](auto& type_id) { return cudf::type_dispatcher( cudf::data_type(type_id), create_rand_col_fn{}, profile, engine, num_rows); }); auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); // Generate the column bottom-up for (int lvl = dist_params.max_depth; lvl > 0; --lvl) { // Generating the next level std::vector<std::unique_ptr<cudf::column>> parents; parents.resize(num_direct_parents(lvl, children.size())); auto current_child = children.begin(); for (auto current_parent = parents.begin(); current_parent != parents.end(); ++current_parent) { auto [null_mask, null_count] = [&]() { if (profile.get_null_probability().has_value()) { auto valids = valid_dist(engine, num_rows); return cudf::detail::valid_if(valids.begin(), valids.end(), thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); } return std::pair<rmm::device_buffer, cudf::size_type>{}; }(); // Adopt remaining children as evenly as possible auto const num_to_adopt = cudf::util::div_rounding_up_unsafe( std::distance(current_child, children.end()), std::distance(current_parent, parents.end())); CUDF_EXPECTS(num_to_adopt > 0, "No children columns left to adopt"); std::vector<std::unique_ptr<cudf::column>> children_to_adopt; children_to_adopt.insert(children_to_adopt.end(), std::make_move_iterator(current_child), std::make_move_iterator(current_child + num_to_adopt)); current_child += children_to_adopt.size(); *current_parent = cudf::make_structs_column( num_rows, std::move(children_to_adopt), null_count, std::move(null_mask)); } if (lvl == 1) { CUDF_EXPECTS(parents.size() == 1, "There should be one top-level column"); return std::move(parents.front()); } children = std::move(parents); } CUDF_FAIL("Reached unreachable code in struct column creation"); } template <typename T> struct clamp_down : public thrust::unary_function<T, T> { T max; clamp_down(T max) : max(max) {} __host__ __device__ T operator()(T x) const { return min(x, max); } }; /** * @brief Creates a list column with random content. * * The data profile determines the list length distribution, number of nested level, and the data * type of the bottom level. * * @param profile Parameters for the random generator * @param engine Pseudo-random engine * @param num_rows Size of the output column * * @return Column filled with random lists */ template <> std::unique_ptr<cudf::column> create_random_column<cudf::list_view>(data_profile const& profile, thrust::minstd_rand& engine, cudf::size_type num_rows) { auto const dist_params = profile.get_distribution_params<cudf::list_view>(); auto const single_level_mean = get_distribution_mean(dist_params.length_params); auto const num_elements = num_rows * pow(single_level_mean, dist_params.max_depth); auto leaf_column = cudf::type_dispatcher( cudf::data_type(dist_params.element_type), create_rand_col_fn{}, profile, engine, num_elements); auto len_dist = random_value_fn<uint32_t>{profile.get_distribution_params<cudf::list_view>().length_params}; auto valid_dist = random_value_fn<bool>( distribution_params<bool>{1. - profile.get_null_probability().value_or(0)}); // Generate the list column bottom-up auto list_column = std::move(leaf_column); for (int lvl = 0; lvl < dist_params.max_depth; ++lvl) { // Generating the next level - offsets point into the current list column auto current_child_column = std::move(list_column); cudf::size_type const num_rows = current_child_column->size() / single_level_mean; auto offsets = len_dist(engine, num_rows + 1); auto valids = valid_dist(engine, num_rows); // to ensure these values <= current_child_column->size() auto output_offsets = thrust::make_transform_output_iterator( offsets.begin(), clamp_down{current_child_column->size()}); thrust::exclusive_scan(thrust::device, offsets.begin(), offsets.end(), output_offsets); thrust::device_pointer_cast(offsets.end())[-1] = current_child_column->size(); // Always include all elements auto offsets_column = std::make_unique<cudf::column>(cudf::data_type{cudf::type_id::INT32}, num_rows + 1, offsets.release(), rmm::device_buffer{}, 0); auto [null_mask, null_count] = cudf::detail::valid_if(valids.begin(), valids.end(), thrust::identity<bool>{}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); list_column = cudf::make_lists_column( num_rows, std::move(offsets_column), std::move(current_child_column), profile.get_null_probability().has_value() ? null_count : 0, profile.get_null_probability().has_value() ? std::move(null_mask) : rmm::device_buffer{}); } return list_column; // return the top-level column } using columns_vector = std::vector<std::unique_ptr<cudf::column>>; /** * @brief Creates a vector of columns with random content. * * @param profile Parameters for the random generator * @param dtype_ids vector of data type ids, one for each output column * @param engine Pseudo-random engine * @param num_rows Size of the output columns * * @return Column filled with random lists */ columns_vector create_random_columns(data_profile const& profile, std::vector<cudf::type_id> dtype_ids, thrust::minstd_rand engine, cudf::size_type num_rows) { columns_vector output_columns; std::transform( dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) { engine.discard(num_rows); return cudf::type_dispatcher( cudf::data_type(tid), create_rand_col_fn{}, profile, engine, num_rows); }); return output_columns; } /** * @brief Repeats the input data types cyclically order to fill a vector of @ref num_cols * elements. */ std::vector<cudf::type_id> cycle_dtypes(std::vector<cudf::type_id> const& dtype_ids, cudf::size_type num_cols) { if (dtype_ids.size() == static_cast<std::size_t>(num_cols)) { return dtype_ids; } std::vector<cudf::type_id> out_dtypes; out_dtypes.reserve(num_cols); for (cudf::size_type col = 0; col < num_cols; ++col) out_dtypes.push_back(dtype_ids[col % dtype_ids.size()]); return out_dtypes; } std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids, table_size_bytes table_bytes, data_profile const& profile, unsigned seed) { size_t const avg_row_bytes = std::accumulate(dtype_ids.begin(), dtype_ids.end(), 0ul, [&](size_t sum, auto tid) { return sum + avg_element_size(profile, cudf::data_type(tid)); }); cudf::size_type const num_rows = table_bytes.size / avg_row_bytes; return create_random_table(dtype_ids, row_count{num_rows}, profile, seed); } std::unique_ptr<cudf::table> create_random_table(std::vector<cudf::type_id> const& dtype_ids, row_count num_rows, data_profile const& profile, unsigned seed) { auto seed_engine = deterministic_engine(seed); thrust::uniform_int_distribution<unsigned> seed_dist; columns_vector output_columns; std::transform( dtype_ids.begin(), dtype_ids.end(), std::back_inserter(output_columns), [&](auto tid) mutable { return create_random_column(tid, num_rows, profile, seed_dist(seed_engine)); }); return std::make_unique<cudf::table>(std::move(output_columns)); } std::unique_ptr<cudf::column> create_random_column(cudf::type_id dtype_id, row_count num_rows, data_profile const& profile, unsigned seed) { auto engine = deterministic_engine(seed); return cudf::type_dispatcher( cudf::data_type(dtype_id), create_rand_col_fn{}, profile, engine, num_rows.count); } std::unique_ptr<cudf::table> create_sequence_table(std::vector<cudf::type_id> const& dtype_ids, row_count num_rows, std::optional<double> null_probability, unsigned seed) { auto seed_engine = deterministic_engine(seed); thrust::uniform_int_distribution<unsigned> seed_dist; auto columns = std::vector<std::unique_ptr<cudf::column>>(dtype_ids.size()); std::transform(dtype_ids.begin(), dtype_ids.end(), columns.begin(), [&](auto dtype) mutable { auto init = cudf::make_default_constructed_scalar(cudf::data_type{dtype}); auto col = cudf::sequence(num_rows.count, *init); auto [mask, count] = create_random_null_mask(num_rows.count, null_probability, seed_dist(seed_engine)); col->set_null_mask(std::move(mask), count); return col; }); return std::make_unique<cudf::table>(std::move(columns)); } std::pair<rmm::device_buffer, cudf::size_type> create_random_null_mask( cudf::size_type size, std::optional<double> null_probability, unsigned seed) { if (not null_probability.has_value()) { return {rmm::device_buffer{}, 0}; } CUDF_EXPECTS(*null_probability >= 0.0 and *null_probability <= 1.0, "Null probability must be within the range [0.0, 1.0]"); if (*null_probability == 0.0f) { return {cudf::create_null_mask(size, cudf::mask_state::ALL_VALID), 0}; } else if (*null_probability == 1.0) { return {cudf::create_null_mask(size, cudf::mask_state::ALL_NULL), size}; } else { return cudf::detail::valid_if(thrust::make_counting_iterator<cudf::size_type>(0), thrust::make_counting_iterator<cudf::size_type>(size), bool_generator{seed, 1.0 - *null_probability}, cudf::get_default_stream(), rmm::mr::get_current_device_resource()); } } std::vector<cudf::type_id> get_type_or_group(int32_t id) { // identity transformation when passing a concrete type_id if (id < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS)) return {static_cast<cudf::type_id>(id)}; // if the value is larger that type_id::NUM_TYPE_IDS, it's a group id type_group_id const group_id = static_cast<type_group_id>(id); using trait_fn = bool (*)(cudf::data_type); trait_fn is_integral = [](cudf::data_type type) { return cudf::is_numeric(type) && !cudf::is_floating_point(type); }; trait_fn is_integral_signed = [](cudf::data_type type) { return cudf::is_numeric(type) && !cudf::is_floating_point(type) && !cudf::is_unsigned(type); }; auto fn = [&]() -> trait_fn { switch (group_id) { case type_group_id::FLOATING_POINT: return cudf::is_floating_point; case type_group_id::INTEGRAL: return is_integral; case type_group_id::INTEGRAL_SIGNED: return is_integral_signed; case type_group_id::NUMERIC: return cudf::is_numeric; case type_group_id::TIMESTAMP: return cudf::is_timestamp; case type_group_id::DURATION: return cudf::is_duration; case type_group_id::FIXED_POINT: return cudf::is_fixed_point; case type_group_id::COMPOUND: return cudf::is_compound; case type_group_id::NESTED: return cudf::is_nested; default: CUDF_FAIL("Invalid data type group"); } }(); std::vector<cudf::type_id> types; for (int type_int = 0; type_int < static_cast<int32_t>(cudf::type_id::NUM_TYPE_IDS); ++type_int) { auto const type = static_cast<cudf::type_id>(type_int); if (type != cudf::type_id::EMPTY && fn(cudf::data_type(type))) types.push_back(type); } return types; } std::vector<cudf::type_id> get_type_or_group(std::vector<int32_t> const& ids) { std::vector<cudf::type_id> all_type_ids; for (auto& id : ids) { auto const type_ids = get_type_or_group(id); all_type_ids.insert(std::end(all_type_ids), std::cbegin(type_ids), std::cend(type_ids)); } return all_type_ids; }
e7ebdca165ad67c776d989c61ec43dc1507ff022.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file lp_app.cu * * @brief Gunrock label propagation (LP) application */ #include <gunrock/app/app.cuh> #include <gunrock/app/lp/lp_problem.cuh> #include <gunrock/app/lp/lp_enactor.cuh> #include <gunrock/app/lp/lp_test.cuh> namespace gunrock { namespace app { namespace lp { hipError_t UseParameters(util::Parameters &parameters) { hipError_t retval = hipSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<std::string>( "src", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, "0", "<Vertex-ID|random|largestdegree> The source vertices\n" "\tIf random, randomly select non-zero degree vertices;\n" "\tIf largestdegree, select vertices with largest degrees", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "src-seed", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, util::PreDefinedValues<int>::InvalidValue, "seed to generate random sources", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "test", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, -1, "test id for validation", __FILE__, __LINE__)); return retval; } /** * @brief Run LP tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[in] ref_labels Reference labels * @param[in] target Whether to perform the LP * \return hipError_t error message(s), if any */ template <typename GraphT, typename LabelT = typename GraphT::VertexT> hipError_t RunTests(util::Parameters &parameters, GraphT &graph, LabelT **ref_labels = NULL, util::Location target = util::DEVICE) { hipError_t retval = hipSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); // parse configurations from parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs"); int num_srcs = srcs.size(); util::Info info("LP", parameters, graph); // initialize Info structure // Allocate host-side array (for both reference and GPU-computed results) LabelT *h_labels = new LabelT[graph.nodes]; // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; GUARD_CU(problem.Init(graph, target)); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); // perform LP VertexT src; for (int run_num = 0; run_num < num_runs; ++run_num) { src = srcs[run_num % num_srcs]; GUARD_CU(problem.Reset(src, target)); GUARD_CU(enactor.Reset(src, target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact(src)); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + " ms, src = " + std::to_string(src) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(h_labels)); SizeT num_errors = app::lp::Validate_Results( parameters, graph, src, h_labels, ref_labels == NULL ? NULL : ref_labels[run_num % num_srcs], false); } } cpu_timer.Start(); // Copy out results GUARD_CU(problem.Extract(h_labels)); if (validation == "last") { SizeT num_errors = app::lp::Validate_Results( parameters, graph, src, h_labels, ref_labels == NULL ? NULL : ref_labels[(num_runs - 1) % num_srcs]); } // compute running statistics info.ComputeTraversalStats(enactor, h_labels); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(&enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); delete[] h_labels; h_labels = NULL; cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace lp } // namespace app } // namespace gunrock /* * @brief Entry of gunrock_lp function * @tparam GraphT Type of the graph * @tparam LabelT Type of the labels * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] labels Return the labels of the vertices * \return double Return accumulated elapsed times for all runs */ template <typename GraphT, typename LabelT = typename GraphT::VertexT> double gunrock_lp(gunrock::util::Parameters &parameters, GraphT &graph, LabelT **labels) { typedef typename GraphT::VertexT VertexT; typedef gunrock::app::lp::Problem<GraphT> ProblemT; typedef gunrock::app::lp::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, target); enactor.Init(problem, target); std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs"); int num_runs = parameters.Get<int>("num-runs"); int num_srcs = srcs.size(); for (int run_num = 0; run_num < num_runs; ++run_num) { int src_num = run_num % num_srcs; VertexT src = srcs[src_num]; problem.Reset(src, target); enactor.Reset(src, target); cpu_timer.Start(); enactor.Enact(src); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(labels[src_num]); } enactor.Release(target); problem.Release(target); srcs.clear(); return total_time; } /* * @brief Simple interface take in graph as CSR format * @param[in] num_nodes Number of veritces in the input graph * @param[in] num_edges Number of edges in the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] edge_values CSR-formatted graph input edge weights * @param[out] labels Return shortest hop distances to source per vertex * \return double Return accumulated elapsed times for all runs */ template <typename VertexT = int, typename SizeT = int, typename LabelT = VertexT> double lp(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets, const VertexT *col_indices, const int num_runs, VertexT *sources, LabelT **labels) { typedef typename gunrock::app::TestGraph<VertexT, SizeT, VertexT, gunrock::graph::HAS_CSR | gunrock::graph::HAS_CSC> GraphT; typedef typename GraphT::CsrT CsrT; // Setup parameters gunrock::util::Parameters parameters("lp"); gunrock::graphio::UseParameters(parameters); gunrock::app::lp::UseParameters(parameters); gunrock::app::UseParameters_test(parameters); parameters.Parse_CommandLine(0, NULL); parameters.Set("graph-type", "by-pass"); parameters.Set("num-runs", num_runs); parameters.Set("test", -1); std::vector<VertexT> srcs; for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]); parameters.Set("srcs", srcs); bool quiet = parameters.Get<bool>("quiet"); GraphT graph; // Assign pointers into gunrock graph format graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST); graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST); graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true); gunrock::graphio::LoadGraph(parameters, graph); // Run the LP double elapsed_time = gunrock_lp(parameters, graph, labels); // Cleanup graph.Release(); srcs.clear(); return elapsed_time; } /* * @brief Simple C-interface take in graph as CSR format * @param[in] num_nodes Number of veritces in the input graph * @param[in] num_edges Number of edges in the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[out] labels Return shortest hop distances to source per vertex * \return double Return accumulated elapsed times for all runs */ double lp(const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, int source, int *distances) { return lp(num_nodes, num_edges, row_offsets, col_indices, 1, &source, &distances); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
e7ebdca165ad67c776d989c61ec43dc1507ff022.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file lp_app.cu * * @brief Gunrock label propagation (LP) application */ #include <gunrock/app/app.cuh> #include <gunrock/app/lp/lp_problem.cuh> #include <gunrock/app/lp/lp_enactor.cuh> #include <gunrock/app/lp/lp_test.cuh> namespace gunrock { namespace app { namespace lp { cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<std::string>( "src", util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER, "0", "<Vertex-ID|random|largestdegree> The source vertices\n" "\tIf random, randomly select non-zero degree vertices;\n" "\tIf largestdegree, select vertices with largest degrees", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "src-seed", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, util::PreDefinedValues<int>::InvalidValue, "seed to generate random sources", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "test", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, -1, "test id for validation", __FILE__, __LINE__)); return retval; } /** * @brief Run LP tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[in] ref_labels Reference labels * @param[in] target Whether to perform the LP * \return cudaError_t error message(s), if any */ template <typename GraphT, typename LabelT = typename GraphT::VertexT> cudaError_t RunTests(util::Parameters &parameters, GraphT &graph, LabelT **ref_labels = NULL, util::Location target = util::DEVICE) { cudaError_t retval = cudaSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); // parse configurations from parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs"); int num_srcs = srcs.size(); util::Info info("LP", parameters, graph); // initialize Info structure // Allocate host-side array (for both reference and GPU-computed results) LabelT *h_labels = new LabelT[graph.nodes]; // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; GUARD_CU(problem.Init(graph, target)); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); // perform LP VertexT src; for (int run_num = 0; run_num < num_runs; ++run_num) { src = srcs[run_num % num_srcs]; GUARD_CU(problem.Reset(src, target)); GUARD_CU(enactor.Reset(src, target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact(src)); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + " ms, src = " + std::to_string(src) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(h_labels)); SizeT num_errors = app::lp::Validate_Results( parameters, graph, src, h_labels, ref_labels == NULL ? NULL : ref_labels[run_num % num_srcs], false); } } cpu_timer.Start(); // Copy out results GUARD_CU(problem.Extract(h_labels)); if (validation == "last") { SizeT num_errors = app::lp::Validate_Results( parameters, graph, src, h_labels, ref_labels == NULL ? NULL : ref_labels[(num_runs - 1) % num_srcs]); } // compute running statistics info.ComputeTraversalStats(enactor, h_labels); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(&enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); delete[] h_labels; h_labels = NULL; cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace lp } // namespace app } // namespace gunrock /* * @brief Entry of gunrock_lp function * @tparam GraphT Type of the graph * @tparam LabelT Type of the labels * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] labels Return the labels of the vertices * \return double Return accumulated elapsed times for all runs */ template <typename GraphT, typename LabelT = typename GraphT::VertexT> double gunrock_lp(gunrock::util::Parameters &parameters, GraphT &graph, LabelT **labels) { typedef typename GraphT::VertexT VertexT; typedef gunrock::app::lp::Problem<GraphT> ProblemT; typedef gunrock::app::lp::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, target); enactor.Init(problem, target); std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs"); int num_runs = parameters.Get<int>("num-runs"); int num_srcs = srcs.size(); for (int run_num = 0; run_num < num_runs; ++run_num) { int src_num = run_num % num_srcs; VertexT src = srcs[src_num]; problem.Reset(src, target); enactor.Reset(src, target); cpu_timer.Start(); enactor.Enact(src); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(labels[src_num]); } enactor.Release(target); problem.Release(target); srcs.clear(); return total_time; } /* * @brief Simple interface take in graph as CSR format * @param[in] num_nodes Number of veritces in the input graph * @param[in] num_edges Number of edges in the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] edge_values CSR-formatted graph input edge weights * @param[out] labels Return shortest hop distances to source per vertex * \return double Return accumulated elapsed times for all runs */ template <typename VertexT = int, typename SizeT = int, typename LabelT = VertexT> double lp(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets, const VertexT *col_indices, const int num_runs, VertexT *sources, LabelT **labels) { typedef typename gunrock::app::TestGraph<VertexT, SizeT, VertexT, gunrock::graph::HAS_CSR | gunrock::graph::HAS_CSC> GraphT; typedef typename GraphT::CsrT CsrT; // Setup parameters gunrock::util::Parameters parameters("lp"); gunrock::graphio::UseParameters(parameters); gunrock::app::lp::UseParameters(parameters); gunrock::app::UseParameters_test(parameters); parameters.Parse_CommandLine(0, NULL); parameters.Set("graph-type", "by-pass"); parameters.Set("num-runs", num_runs); parameters.Set("test", -1); std::vector<VertexT> srcs; for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]); parameters.Set("srcs", srcs); bool quiet = parameters.Get<bool>("quiet"); GraphT graph; // Assign pointers into gunrock graph format graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST); graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST); graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true); gunrock::graphio::LoadGraph(parameters, graph); // Run the LP double elapsed_time = gunrock_lp(parameters, graph, labels); // Cleanup graph.Release(); srcs.clear(); return elapsed_time; } /* * @brief Simple C-interface take in graph as CSR format * @param[in] num_nodes Number of veritces in the input graph * @param[in] num_edges Number of edges in the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[out] labels Return shortest hop distances to source per vertex * \return double Return accumulated elapsed times for all runs */ double lp(const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, int source, int *distances) { return lp(num_nodes, num_edges, row_offsets, col_indices, 1, &source, &distances); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
74217ce1b59d3965d12096b7db2956f951f3f5d3.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <c10/hip/HIPException.h> #include <ATen/ATen.h> #include "cuda_dlink_extension_add.cuh" __global__ void add_kernel(const float* a, const float* b, float* output, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { add(a + i, b + i, output + i); } } // output = a * b + c void add_cuda(const float* a, const float* b, float* output, int size) { const int threads = 1024; const int blocks = (size + threads - 1) / threads; hipLaunchKernelGGL(( add_kernel), dim3(blocks), dim3(threads), 0, 0, a, b, output, size); C10_HIP_KERNEL_LAUNCH_CHECK(); }
74217ce1b59d3965d12096b7db2956f951f3f5d3.cu
#include <cuda.h> #include <cuda_runtime.h> #include <c10/cuda/CUDAException.h> #include <ATen/ATen.h> #include "cuda_dlink_extension_add.cuh" __global__ void add_kernel(const float* a, const float* b, float* output, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { add(a + i, b + i, output + i); } } // output = a * b + c void add_cuda(const float* a, const float* b, float* output, int size) { const int threads = 1024; const int blocks = (size + threads - 1) / threads; add_kernel<<<blocks, threads>>>(a, b, output, size); C10_CUDA_KERNEL_LAUNCH_CHECK(); }
3126334214959e020c4d1696cd748bb906093416.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void dirichlet_gpu( double *res) { *res = 0.0; } // CUDA kernel function __global__ void op_cuda_dirichlet( double *__restrict ind_arg0, const int *__restrict opDat0Map, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables int map0idx; map0idx = opDat0Map[n + set_size * 0]; //user-supplied kernel call dirichlet_gpu(ind_arg0+map0idx*1); } } //host stub function void op_par_loop_dirichlet(char const *name, op_set set, op_arg arg0){ int nargs = 1; op_arg args[1]; args[0] = arg0; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(1); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[1].name = name; OP_kernels[1].count += 1; int ninds = 1; int inds[1] = {0}; if (OP_diags>2) { printf(" kernel routine with indirection: dirichlet\n"); } int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_cuda(nargs, args); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; hipLaunchKernelGGL(( op_cuda_dirichlet), dim3(nblocks),dim3(nthread), 0, 0, (double *)arg0.data_d, arg0.map_data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
3126334214959e020c4d1696cd748bb906093416.cu
// // auto-generated by op2.py // //user function __device__ void dirichlet_gpu( double *res) { *res = 0.0; } // CUDA kernel function __global__ void op_cuda_dirichlet( double *__restrict ind_arg0, const int *__restrict opDat0Map, int start, int end, int set_size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid + start < end) { int n = tid + start; //initialise local variables int map0idx; map0idx = opDat0Map[n + set_size * 0]; //user-supplied kernel call dirichlet_gpu(ind_arg0+map0idx*1); } } //host stub function void op_par_loop_dirichlet(char const *name, op_set set, op_arg arg0){ int nargs = 1; op_arg args[1]; args[0] = arg0; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(1); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[1].name = name; OP_kernels[1].count += 1; int ninds = 1; int inds[1] = {0}; if (OP_diags>2) { printf(" kernel routine with indirection: dirichlet\n"); } int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { //set CUDA execution parameters #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif for ( int round=0; round<2; round++ ){ if (round==1) { op_mpi_wait_all_cuda(nargs, args); } int start = round==0 ? 0 : set->core_size; int end = round==0 ? set->core_size : set->size + set->exec_size; if (end-start>0) { int nblocks = (end-start-1)/nthread+1; op_cuda_dirichlet<<<nblocks,nthread>>>( (double *)arg0.data_d, arg0.map_data_d, start,end,set->size+set->exec_size); } } } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
518acc69c5a344babd160472dbcdd1b1df90e5d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// Copyright (c) 2013, Intel Corporation /// Copyright (c) 2015, NVIDIA CORPORATION. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of <COPYRIGHT HOLDER> nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: transpose /// /// PURPOSE: This program measures the time for the transpose of a /// column-major stored matrix into a row-major stored matrix. /// /// USAGE: Program input is the matrix order and the number of times to /// repeat the operation: /// /// transpose <matrix_size> <# iterations> [tile size] /// /// An optional parameter specifies the tile size used to divide the /// individual matrix blocks for improved cache and TLB performance. /// /// The output consists of diagnostics to make sure the /// transpose worked and timing statistics. /// /// HISTORY: Written by Rob Van der Wijngaart, February 2009. /// Converted to C++11 by Jeff Hammond, February 2016 and May 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" #define TILED 1 #if TILED // The kernel was derived from https://github.com/parallel-forall/code-samples/blob/master/series/cuda-cpp/transpose/transpose.cu, // which is the reason for the additional copyright noted above. const int tile_dim = 32; const int block_rows = 8; __global__ void transpose(int order, prk_float * A, prk_float * B) { int x = blockIdx.x * tile_dim + threadIdx.x; int y = blockIdx.y * tile_dim + threadIdx.y; int width = gridDim.x * tile_dim; for (int j = 0; j < tile_dim; j+= block_rows) { B[x*width + (y+j)] += A[(y+j)*width + x]; A[(y+j)*width + x] += (prk_float)1; } } #else __global__ void transpose(unsigned order, prk_float * A, prk_float * B) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<order) && (j<order)) { B[i*order+j] += A[j*order+i]; A[j*order+i] += (prk_float)1; } } #endif int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA Matrix transpose: B = A^T" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations; int order, tile_size; try { if (argc < 3) { throw "Usage: <# iterations> <matrix order>"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } order = std::atoi(argv[2]); if (order <= 0) { throw "ERROR: Matrix Order must be greater than 0"; } else if (order > ::floor(std::sqrt(INT_MAX))) { throw "ERROR: matrix dimension too large - overflow risk"; } #if TILED if (order % tile_dim != 0) { std::cout << "Sorry, but order (" << order << ") must be evenly divible by " << tile_dim << " or the results are going to be wrong.\n"; } #else // default tile size for tiling of local transpose tile_size = 32; if (argc > 3) { tile_size = std::atoi(argv[3]); if (tile_size <= 0) tile_size = order; if (tile_size > order) tile_size = order; } #endif #ifdef __CORIANDERCC__ // This has not been analyzed, but it is an empirical fact. if (order > 1234) { std::cout << "The results are probably going to be wrong, because order>1234.\n"; } #endif } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Matrix order = " << order << std::endl; #if TILED std::cout << "Tile size = " << tile_dim << std::endl; #else std::cout << "Tile size = " << tile_size << std::endl; #endif #if TILED dim3 dimGrid(order/tile_dim, order/tile_dim, 1); dim3 dimBlock(tile_dim, block_rows, 1); #else dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); #endif info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space for the input and transpose matrix ////////////////////////////////////////////////////////////////////// const size_t nelems = (size_t)order * (size_t)order; const size_t bytes = nelems * sizeof(prk_float); prk_float * h_a; prk_float * h_b; #ifndef __CORIANDERCC__ prk::CUDA::check( hipHostMalloc((void**)&h_a, bytes) ); prk::CUDA::check( hipHostMalloc((void**)&h_b, bytes) ); #else h_a = new prk_float[nelems]; h_b = new prk_float[nelems]; #endif // fill A with the sequence 0 to order^2-1 for (auto j=0; j<order; j++) { for (auto i=0; i<order; i++) { h_a[j*order+i] = static_cast<prk_float>(order*j+i); h_b[j*order+i] = static_cast<prk_float>(0); } } // copy input from host to device prk_float * d_a; prk_float * d_b; prk::CUDA::check( hipMalloc((void**)&d_a, bytes) ); prk::CUDA::check( hipMalloc((void**)&d_b, bytes) ); prk::CUDA::check( hipMemcpy(d_a, &(h_a[0]), bytes, hipMemcpyHostToDevice) ); prk::CUDA::check( hipMemcpy(d_b, &(h_b[0]), bytes, hipMemcpyHostToDevice) ); auto trans_time = 0.0; for (auto iter = 0; iter<=iterations; iter++) { if (iter==1) trans_time = prk::wtime(); hipLaunchKernelGGL(( transpose), dim3(dimGrid), dim3(dimBlock), 0, 0, order, d_a, d_b); #ifndef __CORIANDERCC__ // silence "ignoring hipDeviceSynchronize for now" warning prk::CUDA::check( hipDeviceSynchronize() ); #endif } trans_time = prk::wtime() - trans_time; // copy output back to host prk::CUDA::check( hipMemcpy(&(h_b[0]), d_b, bytes, hipMemcpyDeviceToHost) ); #ifdef VERBOSE // copy input back to host - debug only prk::CUDA::check( hipMemcpy(&(h_a[0]), d_a, bytes, hipMemcpyDeviceToHost) ); #endif prk::CUDA::check( hipFree(d_b) ); prk::CUDA::check( hipFree(d_a) ); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// const double addit = (iterations+1.) * (iterations/2.); double abserr(0); for (auto j=0; j<order; j++) { for (auto i=0; i<order; i++) { const size_t ij = (size_t)i*(size_t)order+(size_t)j; const size_t ji = (size_t)j*(size_t)order+(size_t)i; const double reference = static_cast<double>(ij)*(1.+iterations)+addit; abserr += ::fabs(h_b[ji] - reference); } } #ifdef VERBOSE std::cout << "Sum of absolute differences: " << abserr << std::endl; #endif #ifndef __CORIANDERCC__ prk::CUDA::check( hipHostFree(h_b) ); prk::CUDA::check( hipHostFree(h_a) ); #endif const auto epsilon = 1.0e-8; if (abserr < epsilon) { std::cout << "Solution validates" << std::endl; auto avgtime = trans_time/iterations; auto bytes = (size_t)order * (size_t)order * sizeof(prk_float); std::cout << "Rate (MB/s): " << 1.0e-6 * (2L*bytes)/avgtime << " Avg time (s): " << avgtime << std::endl; } else { #ifdef VERBOSE for (auto i=0; i<order; i++) { for (auto j=0; j<order; j++) { std::cout << "(" << i << "," << j << ") = " << h_a[i*order+j] << ", " << h_b[i*order+j] << "\n"; } } #endif std::cout << "ERROR: Aggregate squared error " << abserr << " exceeds threshold " << epsilon << std::endl; return 1; } return 0; }
518acc69c5a344babd160472dbcdd1b1df90e5d8.cu
/// /// Copyright (c) 2013, Intel Corporation /// Copyright (c) 2015, NVIDIA CORPORATION. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions /// are met: /// /// * Redistributions of source code must retain the above copyright /// notice, this list of conditions and the following disclaimer. /// * Redistributions in binary form must reproduce the above /// copyright notice, this list of conditions and the following /// disclaimer in the documentation and/or other materials provided /// with the distribution. /// * Neither the name of <COPYRIGHT HOLDER> nor the names of its /// contributors may be used to endorse or promote products /// derived from this software without specific prior written /// permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS /// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT /// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE /// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, /// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, /// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; /// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT /// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN /// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE /// POSSIBILITY OF SUCH DAMAGE. ////////////////////////////////////////////////////////////////////// /// /// NAME: transpose /// /// PURPOSE: This program measures the time for the transpose of a /// column-major stored matrix into a row-major stored matrix. /// /// USAGE: Program input is the matrix order and the number of times to /// repeat the operation: /// /// transpose <matrix_size> <# iterations> [tile size] /// /// An optional parameter specifies the tile size used to divide the /// individual matrix blocks for improved cache and TLB performance. /// /// The output consists of diagnostics to make sure the /// transpose worked and timing statistics. /// /// HISTORY: Written by Rob Van der Wijngaart, February 2009. /// Converted to C++11 by Jeff Hammond, February 2016 and May 2017. /// ////////////////////////////////////////////////////////////////////// #include "prk_util.h" #include "prk_cuda.h" #define TILED 1 #if TILED // The kernel was derived from https://github.com/parallel-forall/code-samples/blob/master/series/cuda-cpp/transpose/transpose.cu, // which is the reason for the additional copyright noted above. const int tile_dim = 32; const int block_rows = 8; __global__ void transpose(int order, prk_float * A, prk_float * B) { int x = blockIdx.x * tile_dim + threadIdx.x; int y = blockIdx.y * tile_dim + threadIdx.y; int width = gridDim.x * tile_dim; for (int j = 0; j < tile_dim; j+= block_rows) { B[x*width + (y+j)] += A[(y+j)*width + x]; A[(y+j)*width + x] += (prk_float)1; } } #else __global__ void transpose(unsigned order, prk_float * A, prk_float * B) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if ((i<order) && (j<order)) { B[i*order+j] += A[j*order+i]; A[j*order+i] += (prk_float)1; } } #endif int main(int argc, char * argv[]) { std::cout << "Parallel Research Kernels version " << PRKVERSION << std::endl; std::cout << "C++11/CUDA Matrix transpose: B = A^T" << std::endl; prk::CUDA::info info; info.print(); ////////////////////////////////////////////////////////////////////// /// Read and test input parameters ////////////////////////////////////////////////////////////////////// int iterations; int order, tile_size; try { if (argc < 3) { throw "Usage: <# iterations> <matrix order>"; } iterations = std::atoi(argv[1]); if (iterations < 1) { throw "ERROR: iterations must be >= 1"; } order = std::atoi(argv[2]); if (order <= 0) { throw "ERROR: Matrix Order must be greater than 0"; } else if (order > std::floor(std::sqrt(INT_MAX))) { throw "ERROR: matrix dimension too large - overflow risk"; } #if TILED if (order % tile_dim != 0) { std::cout << "Sorry, but order (" << order << ") must be evenly divible by " << tile_dim << " or the results are going to be wrong.\n"; } #else // default tile size for tiling of local transpose tile_size = 32; if (argc > 3) { tile_size = std::atoi(argv[3]); if (tile_size <= 0) tile_size = order; if (tile_size > order) tile_size = order; } #endif #ifdef __CORIANDERCC__ // This has not been analyzed, but it is an empirical fact. if (order > 1234) { std::cout << "The results are probably going to be wrong, because order>1234.\n"; } #endif } catch (const char * e) { std::cout << e << std::endl; return 1; } std::cout << "Number of iterations = " << iterations << std::endl; std::cout << "Matrix order = " << order << std::endl; #if TILED std::cout << "Tile size = " << tile_dim << std::endl; #else std::cout << "Tile size = " << tile_size << std::endl; #endif #if TILED dim3 dimGrid(order/tile_dim, order/tile_dim, 1); dim3 dimBlock(tile_dim, block_rows, 1); #else dim3 dimGrid(prk::divceil(order,tile_size),prk::divceil(order,tile_size),1); dim3 dimBlock(tile_size, tile_size, 1); #endif info.checkDims(dimBlock, dimGrid); ////////////////////////////////////////////////////////////////////// // Allocate space for the input and transpose matrix ////////////////////////////////////////////////////////////////////// const size_t nelems = (size_t)order * (size_t)order; const size_t bytes = nelems * sizeof(prk_float); prk_float * h_a; prk_float * h_b; #ifndef __CORIANDERCC__ prk::CUDA::check( cudaMallocHost((void**)&h_a, bytes) ); prk::CUDA::check( cudaMallocHost((void**)&h_b, bytes) ); #else h_a = new prk_float[nelems]; h_b = new prk_float[nelems]; #endif // fill A with the sequence 0 to order^2-1 for (auto j=0; j<order; j++) { for (auto i=0; i<order; i++) { h_a[j*order+i] = static_cast<prk_float>(order*j+i); h_b[j*order+i] = static_cast<prk_float>(0); } } // copy input from host to device prk_float * d_a; prk_float * d_b; prk::CUDA::check( cudaMalloc((void**)&d_a, bytes) ); prk::CUDA::check( cudaMalloc((void**)&d_b, bytes) ); prk::CUDA::check( cudaMemcpy(d_a, &(h_a[0]), bytes, cudaMemcpyHostToDevice) ); prk::CUDA::check( cudaMemcpy(d_b, &(h_b[0]), bytes, cudaMemcpyHostToDevice) ); auto trans_time = 0.0; for (auto iter = 0; iter<=iterations; iter++) { if (iter==1) trans_time = prk::wtime(); transpose<<<dimGrid, dimBlock>>>(order, d_a, d_b); #ifndef __CORIANDERCC__ // silence "ignoring cudaDeviceSynchronize for now" warning prk::CUDA::check( cudaDeviceSynchronize() ); #endif } trans_time = prk::wtime() - trans_time; // copy output back to host prk::CUDA::check( cudaMemcpy(&(h_b[0]), d_b, bytes, cudaMemcpyDeviceToHost) ); #ifdef VERBOSE // copy input back to host - debug only prk::CUDA::check( cudaMemcpy(&(h_a[0]), d_a, bytes, cudaMemcpyDeviceToHost) ); #endif prk::CUDA::check( cudaFree(d_b) ); prk::CUDA::check( cudaFree(d_a) ); ////////////////////////////////////////////////////////////////////// /// Analyze and output results ////////////////////////////////////////////////////////////////////// const double addit = (iterations+1.) * (iterations/2.); double abserr(0); for (auto j=0; j<order; j++) { for (auto i=0; i<order; i++) { const size_t ij = (size_t)i*(size_t)order+(size_t)j; const size_t ji = (size_t)j*(size_t)order+(size_t)i; const double reference = static_cast<double>(ij)*(1.+iterations)+addit; abserr += std::fabs(h_b[ji] - reference); } } #ifdef VERBOSE std::cout << "Sum of absolute differences: " << abserr << std::endl; #endif #ifndef __CORIANDERCC__ prk::CUDA::check( cudaFreeHost(h_b) ); prk::CUDA::check( cudaFreeHost(h_a) ); #endif const auto epsilon = 1.0e-8; if (abserr < epsilon) { std::cout << "Solution validates" << std::endl; auto avgtime = trans_time/iterations; auto bytes = (size_t)order * (size_t)order * sizeof(prk_float); std::cout << "Rate (MB/s): " << 1.0e-6 * (2L*bytes)/avgtime << " Avg time (s): " << avgtime << std::endl; } else { #ifdef VERBOSE for (auto i=0; i<order; i++) { for (auto j=0; j<order; j++) { std::cout << "(" << i << "," << j << ") = " << h_a[i*order+j] << ", " << h_b[i*order+j] << "\n"; } } #endif std::cout << "ERROR: Aggregate squared error " << abserr << " exceeds threshold " << epsilon << std::endl; return 1; } return 0; }
1e86297fccc816b6558785ef863f8bfb826d89e1.hip
// !!! This is a file automatically generated by hipify!!! /* Realizar un programa CUDA que dado un vector V de N nmeros enteros multiplique a * cada nmero por una constante C. Realizar dos implementaciones: * a. C y N deben ser pasados como parmetros al kernel. * b. C y N deben estar almacenados en la memoria de constantes de la GPU. */ #include <stdio.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include <sys/resource.h> #ifdef _INT_ typedef int basetype; // Tipo para elementos: int #define labelelem "ints" #elif _DOUBLE_ typedef double basetype; // Tipo para elementos: double #define labelelem "doubles" #else typedef float basetype; // Tipo para elementos: float PREDETERMINADO #define labelelem "floats" #endif /* void kernel_multiplicar(const int c, int *vector) { int n=0; for (int i=1;) } */ double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } /* Funcin para inicializar el vector que vamos a utilizar */ void init_CPU_array(basetype array[], const unsigned int n) { unsigned int i; for(i = 0; i < n; i++) { array[i] = (basetype)i; } } void multiplicarV_CPU(basetype vec[], const unsigned int c, const unsigned int n) { for(int i=0; i<n; i++){ vec[i]=vec[i]*c; } } __constant__ int d_n=102400; __constant__ int d_c=9; __global__ void multiplicacionV_kernel_cuda(basetype *const arrayV){ unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id < d_n) arrayV[global_id] = arrayV[global_id]*d_c; } void multiplicacionV_GPU( basetype arrayV[], const unsigned int n, const unsigned int blk_size, const int c){ double timetick; // Nmero de bytes de cada uno de nuestros vectores unsigned int numBytes = n * sizeof(basetype); hipError_t error; // Reservamos memoria global del device (GPU) para el array y lo copiamos basetype *cV; timetick = dwalltime(); hipMalloc((void **) &cV, numBytes); printf("-> Tiempo de alocacion en memoria global de GPU %f\n", dwalltime() - timetick); timetick = dwalltime(); hipMemcpy(cV, arrayV, numBytes, hipMemcpyHostToDevice); // CPU -> GPU printf("-> Tiempo de copia de memoria CPU =>> GPU %f\n", dwalltime() - timetick); // Bloque unidimensional de hilos (*blk_size* hilos) dim3 dimBlock(blk_size); // Grid unidimensional (*ceil(n/blk_size)* bloques) dim3 dimGrid((n + dimBlock.x - 1) / dimBlock.x); // Lanzamos ejecucin del kernel en la GPU //timestamp(start); // Medimos tiempo de clculo en GPU timetick = dwalltime(); hipLaunchKernelGGL(( multiplicacionV_kernel_cuda), dim3(dimGrid), dim3(dimBlock), 0, 0, cV); error = hipDeviceSynchronize(); printf("Synchronyse error: %d\n", error); printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick); //timestamp(end); // Movemos resultado: GPU -> CPU timetick = dwalltime(); hipMemcpy(arrayV, cV, numBytes, hipMemcpyDeviceToHost); // GPU -> CPU printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick); // Liberamos memoria global del device utilizada hipFree (cV); } int main(int argc, char **argv) { double timetick; //Ejecucion en CPU basetype *vec; int c=9; int n=102400; timetick = dwalltime(); vec=(basetype *) malloc(sizeof(basetype)*n); init_CPU_array(vec, n); printf("-> Tiempo de inicializacion de vector en CPU %f\n", dwalltime() - timetick); timetick = dwalltime(); multiplicarV_CPU(vec,c,n); printf("-> Tiempo de ejecucion en CPU %f\n", dwalltime() - timetick); //Ejecucion en GPU int cb=64; //Inicializa nuevamente el vector para realizar la ejecucion en GPU init_CPU_array(vec, n); // Ejecutamos cuadradoV en GPU multiplicacionV_GPU(vec, n, cb, c); //Chequea si el resultado obtenido en la GPU es correcto //check_array(vec,n); free(vec); return 0; }
1e86297fccc816b6558785ef863f8bfb826d89e1.cu
/* Realizar un programa CUDA que dado un vector V de N números enteros multiplique a * cada número por una constante C. Realizar dos implementaciones: * a. C y N deben ser pasados como parámetros al kernel. * b. C y N deben estar almacenados en la memoria de constantes de la GPU. */ #include <stdio.h> #include <cuda.h> #include <sys/time.h> #include <sys/resource.h> #ifdef _INT_ typedef int basetype; // Tipo para elementos: int #define labelelem "ints" #elif _DOUBLE_ typedef double basetype; // Tipo para elementos: double #define labelelem "doubles" #else typedef float basetype; // Tipo para elementos: float PREDETERMINADO #define labelelem "floats" #endif /* void kernel_multiplicar(const int c, int *vector) { int n=0; for (int i=1;) } */ double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } /* Función para inicializar el vector que vamos a utilizar */ void init_CPU_array(basetype array[], const unsigned int n) { unsigned int i; for(i = 0; i < n; i++) { array[i] = (basetype)i; } } void multiplicarV_CPU(basetype vec[], const unsigned int c, const unsigned int n) { for(int i=0; i<n; i++){ vec[i]=vec[i]*c; } } __constant__ int d_n=102400; __constant__ int d_c=9; __global__ void multiplicacionV_kernel_cuda(basetype *const arrayV){ unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id < d_n) arrayV[global_id] = arrayV[global_id]*d_c; } void multiplicacionV_GPU( basetype arrayV[], const unsigned int n, const unsigned int blk_size, const int c){ double timetick; // Número de bytes de cada uno de nuestros vectores unsigned int numBytes = n * sizeof(basetype); cudaError_t error; // Reservamos memoria global del device (GPU) para el array y lo copiamos basetype *cV; timetick = dwalltime(); cudaMalloc((void **) &cV, numBytes); printf("-> Tiempo de alocacion en memoria global de GPU %f\n", dwalltime() - timetick); timetick = dwalltime(); cudaMemcpy(cV, arrayV, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU printf("-> Tiempo de copia de memoria CPU =>> GPU %f\n", dwalltime() - timetick); // Bloque unidimensional de hilos (*blk_size* hilos) dim3 dimBlock(blk_size); // Grid unidimensional (*ceil(n/blk_size)* bloques) dim3 dimGrid((n + dimBlock.x - 1) / dimBlock.x); // Lanzamos ejecución del kernel en la GPU //timestamp(start); // Medimos tiempo de cálculo en GPU timetick = dwalltime(); multiplicacionV_kernel_cuda<<<dimGrid, dimBlock>>>(cV); error = cudaDeviceSynchronize(); printf("Synchronyse error: %d\n", error); printf("-> Tiempo de ejecucion en GPU %f\n", dwalltime() - timetick); //timestamp(end); // Movemos resultado: GPU -> CPU timetick = dwalltime(); cudaMemcpy(arrayV, cV, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU printf("-> Tiempo de copia GPU ==>> CPU %f\n", dwalltime() - timetick); // Liberamos memoria global del device utilizada cudaFree (cV); } int main(int argc, char **argv) { double timetick; //Ejecucion en CPU basetype *vec; int c=9; int n=102400; timetick = dwalltime(); vec=(basetype *) malloc(sizeof(basetype)*n); init_CPU_array(vec, n); printf("-> Tiempo de inicializacion de vector en CPU %f\n", dwalltime() - timetick); timetick = dwalltime(); multiplicarV_CPU(vec,c,n); printf("-> Tiempo de ejecucion en CPU %f\n", dwalltime() - timetick); //Ejecucion en GPU int cb=64; //Inicializa nuevamente el vector para realizar la ejecucion en GPU init_CPU_array(vec, n); // Ejecutamos cuadradoV en GPU multiplicacionV_GPU(vec, n, cb, c); //Chequea si el resultado obtenido en la GPU es correcto //check_array(vec,n); free(vec); return 0; }
9164e191c39cdfbbae54c8b5f1a60593598bc753.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <iostream> #include <algorithm> #include <cstdint> #include <functional> #include <hip/hip_runtime_api.h> #include "vector_add.hpp" #include "parameters.hpp" #include "device_queries.hpp" #include "shared_utilities.hpp" #include "timer.hpp" using std::vector; vector<float> cpu_addition(const vector<float> &a, const vector<float> &b) { vector<float> results(a); std::transform(a.begin(), a.end(), b.cbegin(), results.begin(), std::plus<float>()); return results; } /* calculate step, calculate final index, if step doesn't work, specify final index and special step if special step doesn't exist, FFFFFFFF it. only warp divergence will be in one warp in final block. ideally, last block on last device */ __global__ void cuda_vector_add(float *a, float *b, unsigned step, const unsigned total, const unsigned fix_position, const unsigned fix_step) { unsigned position = blockDim.x * blockIdx.x + threadIdx.x; position *= step; // printf("%d\t%d\t%d\t%d\n", blockDim.x, blockIdx.x, threadIdx.x, position); // This is a really dumb edge case clearly used to break the code, but // hell if I'm missing points for not catching when you request more threads than elements! if (position < total) { // Interesting thing to test // ternary here vs if. Only divergence should be last warp in last block // But the ternary will probably slow down everything? // It would avoid a warp divergence, though! if (position == fix_position) { step = fix_step; } a += position; b += position; for (int i = 0; i < step; ++i, ++a, ++b) { // printf("%p %p %i %i %f %f\n", a, b, position, i, *a, *b); *a += *b; } } } using device_config_t = struct { int device; void *vec_a_device, *vec_b_device; // vecs gets summed into a unsigned step; unsigned fix_position; // UINT_MAX unsigned fix_step; }; void launch_kernels_and_report(const options_t &opts) { const int threads = opts.threads; const int blocks = opts.blocks; const bool validate = opts.validate; const bool multi = opts.multi; const double util = opts.utilization; const size_t thread_total = blocks * threads; if (threads == 0 || blocks == 0) { throw std::runtime_error("Thread/Block count of 0!"); } std::vector<int> devices = get_devices(); if (!multi) { devices.resize(1); } const size_t num_devices = devices.size(); std::vector<size_t> float_vec_size(num_devices); for (unsigned i = 0; i < num_devices; ++i) { float_vec_size[i] = get_global_mem(devices[i]) / sizeof(float) * util / 2.0; // number of total floats, get the utilization, div in two because a + b // resulting size is the size for vectors a and b } // Instead of making a giant contiguous vector and serving out slices to the devices // I'm just going to make smaller ones since there's no real difference std::vector<device_config_t> config(num_devices); for (unsigned i = 0; i < num_devices; ++i) { auto dim_pair = get_dims(devices[i]); if (dim_pair.first < threads || dim_pair.second < blocks) { throw std::runtime_error("Block/thread count outside device dims!"); } config[i].device = devices[i]; // config[i].a = generate_vector(float_vec_size[i]); // config[i].b = generate_vector(float_vec_size[i]); // config[i].c = std::vector<float>(float_vec_size[i]); config[i].step = float_vec_size[i] / thread_total; if (config[i].step == 0) { std::cout << "More threads than values! Rude!" << std::endl; // with a very low mem utilization (read: testing) // it will end up with a step of 0 if you get total_threads over n_elem // So I guess hardcode 1 and nop anything off the end of the vector config[i].step = 1; config[i].fix_position = UINT_MAX; config[i].fix_step = 1; } else { const bool offset_needed = (config[i].step * thread_total) != float_vec_size[i]; if (offset_needed) { config[i].fix_position = config[i].step * (thread_total - 1); config[i].fix_step = config[i].step + (float_vec_size[i] - (config[i].step * thread_total)); } else { config[i].fix_position = UINT_MAX; // should never trigger config[i].fix_step = config[i].step; // but just in case } } } std::cout << "Configuration complete, generating data and executing." << std::endl; // prepare and launch! Woooooo. for (unsigned i = 0; i < num_devices; ++i) { timer gpu_total, gpu_execute; std::cout << "Dev: " << config[i].device << " Step: " << config[i].step << " Fix_P: " << config[i].fix_position << " Fix_s: " << config[i].fix_step << " Threads: " << thread_total << " Val total: " << float_vec_size[i] << std::endl; std::vector<float> a = generate_vector(float_vec_size[i]); std::vector<float> b = generate_vector(float_vec_size[i]); std::vector<float> c = std::vector<float>(float_vec_size[i]); if (hipSetDevice(config[i].device) != hipSuccess) { throw std::runtime_error("could not select device!"); } gpu_total.begin(); if (hipMalloc(&config[i].vec_a_device, float_vec_size[i] * sizeof(float)) != hipSuccess || hipMalloc(&config[i].vec_b_device, float_vec_size[i] * sizeof(float)) != hipSuccess) { throw std::runtime_error("Failed to malloc vector!"); } if (hipMemcpy(config[i].vec_a_device, a.data(), float_vec_size[i] * sizeof(float), hipMemcpyHostToDevice) != hipSuccess || hipMemcpy(config[i].vec_b_device, b.data(), float_vec_size[i] * sizeof(float), hipMemcpyHostToDevice) != hipSuccess) { throw std::runtime_error("Failed to copy data to device!"); } gpu_execute.begin(); hipLaunchKernelGGL(( cuda_vector_add), dim3(blocks), dim3(threads), 0, 0, (float *) config[i].vec_a_device, (float *) config[i].vec_b_device, config[i].step, float_vec_size[i], config[i].fix_position, config[i].fix_step); if (hipDeviceSynchronize() != hipSuccess) { throw std::runtime_error("Sync issue! (Launch failure?)"); } gpu_execute.end(); if (hipMemcpy(c.data(), config[i].vec_a_device, float_vec_size[i] * sizeof(float), hipMemcpyDeviceToHost) != hipSuccess) { throw std::runtime_error("Could not copy data back!"); } hipFree(config[i].vec_a_device); hipFree(config[i].vec_b_device); gpu_total.end(); std::cout << "GPU_" << config[i].device << " time: " << gpu_total.ms_elapsed() << " ms (execute: " << gpu_execute.ms_elapsed() << " ms)" << std::endl; if (validate) { timer cpu_time; cpu_time.begin(); std::vector<float> cpu_result = cpu_addition(a, b); cpu_time.end(); std::cout << "CPU time: " << cpu_time.ms_elapsed() << " ms" << std::endl; if (!check_equal(c, cpu_result)) { std::cout << "VERIFICATION FAILED (epsilon issue?)" << std::endl; std::cout << a[0] << " " << a[1] << " " << a[2] << std::endl; std::cout << b[0] << " " << b[1] << " " << b[2] << std::endl << std::endl; std::cout << c[0] << " " << c[1] << " " << c[2] << std::endl; std::cout << cpu_result[0] << " " << cpu_result[1] << " " << cpu_result[2] << std::endl; } } } }
9164e191c39cdfbbae54c8b5f1a60593598bc753.cu
#include <vector> #include <iostream> #include <algorithm> #include <cstdint> #include <functional> #include <cuda_runtime_api.h> #include "vector_add.hpp" #include "parameters.hpp" #include "device_queries.hpp" #include "shared_utilities.hpp" #include "timer.hpp" using std::vector; vector<float> cpu_addition(const vector<float> &a, const vector<float> &b) { vector<float> results(a); std::transform(a.begin(), a.end(), b.cbegin(), results.begin(), std::plus<float>()); return results; } /* calculate step, calculate final index, if step doesn't work, specify final index and special step if special step doesn't exist, FFFFFFFF it. only warp divergence will be in one warp in final block. ideally, last block on last device */ __global__ void cuda_vector_add(float *a, float *b, unsigned step, const unsigned total, const unsigned fix_position, const unsigned fix_step) { unsigned position = blockDim.x * blockIdx.x + threadIdx.x; position *= step; // printf("%d\t%d\t%d\t%d\n", blockDim.x, blockIdx.x, threadIdx.x, position); // This is a really dumb edge case clearly used to break the code, but // hell if I'm missing points for not catching when you request more threads than elements! if (position < total) { // Interesting thing to test // ternary here vs if. Only divergence should be last warp in last block // But the ternary will probably slow down everything? // It would avoid a warp divergence, though! if (position == fix_position) { step = fix_step; } a += position; b += position; for (int i = 0; i < step; ++i, ++a, ++b) { // printf("%p %p %i %i %f %f\n", a, b, position, i, *a, *b); *a += *b; } } } using device_config_t = struct { int device; void *vec_a_device, *vec_b_device; // vecs gets summed into a unsigned step; unsigned fix_position; // UINT_MAX unsigned fix_step; }; void launch_kernels_and_report(const options_t &opts) { const int threads = opts.threads; const int blocks = opts.blocks; const bool validate = opts.validate; const bool multi = opts.multi; const double util = opts.utilization; const size_t thread_total = blocks * threads; if (threads == 0 || blocks == 0) { throw std::runtime_error("Thread/Block count of 0!"); } std::vector<int> devices = get_devices(); if (!multi) { devices.resize(1); } const size_t num_devices = devices.size(); std::vector<size_t> float_vec_size(num_devices); for (unsigned i = 0; i < num_devices; ++i) { float_vec_size[i] = get_global_mem(devices[i]) / sizeof(float) * util / 2.0; // number of total floats, get the utilization, div in two because a + b // resulting size is the size for vectors a and b } // Instead of making a giant contiguous vector and serving out slices to the devices // I'm just going to make smaller ones since there's no real difference std::vector<device_config_t> config(num_devices); for (unsigned i = 0; i < num_devices; ++i) { auto dim_pair = get_dims(devices[i]); if (dim_pair.first < threads || dim_pair.second < blocks) { throw std::runtime_error("Block/thread count outside device dims!"); } config[i].device = devices[i]; // config[i].a = generate_vector(float_vec_size[i]); // config[i].b = generate_vector(float_vec_size[i]); // config[i].c = std::vector<float>(float_vec_size[i]); config[i].step = float_vec_size[i] / thread_total; if (config[i].step == 0) { std::cout << "More threads than values! Rude!" << std::endl; // with a very low mem utilization (read: testing) // it will end up with a step of 0 if you get total_threads over n_elem // So I guess hardcode 1 and nop anything off the end of the vector config[i].step = 1; config[i].fix_position = UINT_MAX; config[i].fix_step = 1; } else { const bool offset_needed = (config[i].step * thread_total) != float_vec_size[i]; if (offset_needed) { config[i].fix_position = config[i].step * (thread_total - 1); config[i].fix_step = config[i].step + (float_vec_size[i] - (config[i].step * thread_total)); } else { config[i].fix_position = UINT_MAX; // should never trigger config[i].fix_step = config[i].step; // but just in case } } } std::cout << "Configuration complete, generating data and executing." << std::endl; // prepare and launch! Woooooo. for (unsigned i = 0; i < num_devices; ++i) { timer gpu_total, gpu_execute; std::cout << "Dev: " << config[i].device << " Step: " << config[i].step << " Fix_P: " << config[i].fix_position << " Fix_s: " << config[i].fix_step << " Threads: " << thread_total << " Val total: " << float_vec_size[i] << std::endl; std::vector<float> a = generate_vector(float_vec_size[i]); std::vector<float> b = generate_vector(float_vec_size[i]); std::vector<float> c = std::vector<float>(float_vec_size[i]); if (cudaSetDevice(config[i].device) != cudaSuccess) { throw std::runtime_error("could not select device!"); } gpu_total.begin(); if (cudaMalloc(&config[i].vec_a_device, float_vec_size[i] * sizeof(float)) != cudaSuccess || cudaMalloc(&config[i].vec_b_device, float_vec_size[i] * sizeof(float)) != cudaSuccess) { throw std::runtime_error("Failed to malloc vector!"); } if (cudaMemcpy(config[i].vec_a_device, a.data(), float_vec_size[i] * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess || cudaMemcpy(config[i].vec_b_device, b.data(), float_vec_size[i] * sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) { throw std::runtime_error("Failed to copy data to device!"); } gpu_execute.begin(); cuda_vector_add<<<blocks, threads>>>((float *) config[i].vec_a_device, (float *) config[i].vec_b_device, config[i].step, float_vec_size[i], config[i].fix_position, config[i].fix_step); if (cudaDeviceSynchronize() != cudaSuccess) { throw std::runtime_error("Sync issue! (Launch failure?)"); } gpu_execute.end(); if (cudaMemcpy(c.data(), config[i].vec_a_device, float_vec_size[i] * sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) { throw std::runtime_error("Could not copy data back!"); } cudaFree(config[i].vec_a_device); cudaFree(config[i].vec_b_device); gpu_total.end(); std::cout << "GPU_" << config[i].device << " time: " << gpu_total.ms_elapsed() << " ms (execute: " << gpu_execute.ms_elapsed() << " ms)" << std::endl; if (validate) { timer cpu_time; cpu_time.begin(); std::vector<float> cpu_result = cpu_addition(a, b); cpu_time.end(); std::cout << "CPU time: " << cpu_time.ms_elapsed() << " ms" << std::endl; if (!check_equal(c, cpu_result)) { std::cout << "VERIFICATION FAILED (epsilon issue?)" << std::endl; std::cout << a[0] << " " << a[1] << " " << a[2] << std::endl; std::cout << b[0] << " " << b[1] << " " << b[2] << std::endl << std::endl; std::cout << c[0] << " " << c[1] << " " << c[2] << std::endl; std::cout << cpu_result[0] << " " << cpu_result[1] << " " << cpu_result[2] << std::endl; } } } }
afbff5760c3485f206b2afae8aacdd41eb0d970c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdio> #include <cmath> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "common.h" #include <hip/hip_runtime.h> #include <chrono> #define N 5//Change bluring window size using namespace std; __global__ void blur_kernel(unsigned char* input, unsigned char* output, int width, int height, int step) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; int flr = floor (N/2.0); int matAvg = 0; //Avoiding edge pixels if ((xIndex < width) && (yIndex < height)) { //Current pixel const int tid = yIndex * step + (3 * xIndex); int bAvg = 0; int grAvg = 0; int rAvg = 0; //Get the average of the surrounding pixels for (int i = -flr; i <= flr; i++) { for (int j = -flr; j <= flr; j++) { const int tid = (yIndex+i) * step + (3 * (xIndex+j)); if(xIndex+j>0 && yIndex+i>0 && xIndex+j<width && yIndex+i<height ) { matAvg+=1; bAvg += input[tid]; grAvg += input[tid + 1]; rAvg += input[tid + 2]; } } } //Changing the central pixel with the average of the others output[tid] = static_cast<unsigned char>(bAvg/(matAvg)); output[tid+1] = static_cast<unsigned char>(grAvg/(matAvg)); output[tid+2] = static_cast<unsigned char>(rAvg/(matAvg)); } } void blur(const cv::Mat& input, cv::Mat& output) { cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl; size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; unsigned char *d_input, *d_output; // Allocate device memory SAFE_CALL(hipMalloc(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc(&d_output, grayBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(hipMemcpy(d_input, input.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(d_output, output.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); // Specify a reasonable block size const dim3 block(16, 16); // Calculate grid size to cover the whole image const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); printf("blur_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // Launch the color conversion kernel hipLaunchKernelGGL(( blur_kernel) , dim3(grid), dim3(block) , 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step)); // Synchronize to check for any kernel launch errors SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); // Copy back data from destination device meory to OpenCV output image SAFE_CALL(hipMemcpy(output.ptr(), d_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); // Free the device memory SAFE_CALL(hipFree(d_input), "CUDA Free Failed"); SAFE_CALL(hipFree(d_output), "CUDA Free Failed"); } int main(int argc, char *argv[]) { string imagePath; if(argc < 2) imagePath = "image.jpg"; else imagePath = argv[1]; // Read input image from the disk cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR); if (input.empty()) { cout << "Image Not Found!" << std::endl; cin.get(); return -1; } //Create output image cv::Mat output(input.rows, input.cols, CV_8UC3); //output = input.clone(); //Execute blur function and measure time auto start_cpu = chrono::high_resolution_clock::now(); blur(input, output); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("elapsed %f ms\n", duration_ms.count()); //Allow the windows to resize namedWindow("Input", cv::WINDOW_NORMAL); namedWindow("Output", cv::WINDOW_NORMAL); //Show the input and output imshow("Input", input); imshow("Output", output); //Wait for key press cv::waitKey(); return 0; }
afbff5760c3485f206b2afae8aacdd41eb0d970c.cu
#include <iostream> #include <cstdio> #include <cmath> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include "common.h" #include <cuda_runtime.h> #include <chrono> #define N 5//Change bluring window size using namespace std; __global__ void blur_kernel(unsigned char* input, unsigned char* output, int width, int height, int step) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; int flr = floor (N/2.0); int matAvg = 0; //Avoiding edge pixels if ((xIndex < width) && (yIndex < height)) { //Current pixel const int tid = yIndex * step + (3 * xIndex); int bAvg = 0; int grAvg = 0; int rAvg = 0; //Get the average of the surrounding pixels for (int i = -flr; i <= flr; i++) { for (int j = -flr; j <= flr; j++) { const int tid = (yIndex+i) * step + (3 * (xIndex+j)); if(xIndex+j>0 && yIndex+i>0 && xIndex+j<width && yIndex+i<height ) { matAvg+=1; bAvg += input[tid]; grAvg += input[tid + 1]; rAvg += input[tid + 2]; } } } //Changing the central pixel with the average of the others output[tid] = static_cast<unsigned char>(bAvg/(matAvg)); output[tid+1] = static_cast<unsigned char>(grAvg/(matAvg)); output[tid+2] = static_cast<unsigned char>(rAvg/(matAvg)); } } void blur(const cv::Mat& input, cv::Mat& output) { cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl; size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; unsigned char *d_input, *d_output; // Allocate device memory SAFE_CALL(cudaMalloc(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc(&d_output, grayBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(cudaMemcpy(d_input, input.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(d_output, output.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); // Specify a reasonable block size const dim3 block(16, 16); // Calculate grid size to cover the whole image const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); printf("blur_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // Launch the color conversion kernel blur_kernel <<<grid, block >>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step)); // Synchronize to check for any kernel launch errors SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); // Copy back data from destination device meory to OpenCV output image SAFE_CALL(cudaMemcpy(output.ptr(), d_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); // Free the device memory SAFE_CALL(cudaFree(d_input), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_output), "CUDA Free Failed"); } int main(int argc, char *argv[]) { string imagePath; if(argc < 2) imagePath = "image.jpg"; else imagePath = argv[1]; // Read input image from the disk cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR); if (input.empty()) { cout << "Image Not Found!" << std::endl; cin.get(); return -1; } //Create output image cv::Mat output(input.rows, input.cols, CV_8UC3); //output = input.clone(); //Execute blur function and measure time auto start_cpu = chrono::high_resolution_clock::now(); blur(input, output); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("elapsed %f ms\n", duration_ms.count()); //Allow the windows to resize namedWindow("Input", cv::WINDOW_NORMAL); namedWindow("Output", cv::WINDOW_NORMAL); //Show the input and output imshow("Input", input); imshow("Output", output); //Wait for key press cv::waitKey(); return 0; }
1dacb791c8003411a4a946908d641963596f45ff.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <limits> #include <time.h> #include <hip/hip_runtime.h> #include <minigun/minigun.h> #include "./baseline/yzh_kernels.cuh" #include "./minigun/esoftmax.cuh" #include "../samples_io.h" #include "../samples_utils.h" using minigun::advance::RuntimeConfig; using namespace esoftmax; double RunMinigun(const utils::SampleCsr& scsr, const minigun::IntCsr& csr, int32_t feat_size, int32_t num_heads) { // gdata GData gdata, truth; gdata.H = num_heads; InitGData(scsr, &gdata, &truth); CUDA_CALL(hipDeviceSynchronize()); // create stream RuntimeConfig rtcfg; rtcfg.ctx = {kDLGPU, 0}; int nt = utils::_FindNumThreads(gdata.H, 32); rtcfg.data_num_threads = nt; rtcfg.data_num_blocks = gdata.H / nt; CUDA_CALL(hipStreamCreate(&rtcfg.stream)); minigun::IntArray infront; // dry run typedef minigun::advance::Config<true, minigun::advance::kV2N> Config; minigun::advance::Advance<kDLGPU, int32_t, Config, GData, EdgeMax>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, MinusMaxExpSum>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, Norm>( rtcfg, csr, &gdata, infront); CUDA_CALL(hipDeviceSynchronize()); CheckResult(scsr, &gdata, &truth); const int K = 10; timeval t0, t1; gettimeofday(&t0, nullptr); for (int i = 0; i < K; ++i) { minigun::advance::Advance<kDLGPU, int32_t, Config, GData, EdgeMax>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, MinusMaxExpSum>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, Norm>( rtcfg, csr, &gdata, infront); } CUDA_CALL(hipDeviceSynchronize()); gettimeofday(&t1, nullptr); double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec - (t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms FreeGData(&gdata, &truth); return dur; } double RunBaseline1(const utils::SampleCsr& scsr, const minigun::IntCsr& csr, int32_t feat_size, int32_t num_heads) { // gdata GData gdata, truth; gdata.H = num_heads; InitGData(scsr, &gdata, &truth); const int32_t N = csr.row_offsets.length - 1; const int H = gdata.H; // dry run hipLaunchKernelGGL(( custom_kernel::sparse_softmax_forward_kernel<int32_t, float>), dim3((N + 31) / 32), dim3(dim3(32, H)), 0, 0, csr.row_offsets.data, gdata.score, gdata.ret, (int)N, (int)H); CUDA_CALL(hipDeviceSynchronize()); const int K = 10; timeval t0, t1; gettimeofday(&t0, nullptr); for (int i = 0; i < K; ++i) { hipLaunchKernelGGL(( custom_kernel::sparse_softmax_forward_kernel<int32_t, float>), dim3((N + 31) / 32), dim3(dim3(32, H)), 0, 0, csr.row_offsets.data, gdata.score, gdata.ret, (int)N, (int)H); } CUDA_CALL(hipDeviceSynchronize()); gettimeofday(&t1, nullptr); double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec - (t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms FreeGData(&gdata, &truth); return dur; } int main(int argc, char** argv) { srand(42); if (argc < 3) { std::cout << "USAGE: ./bench_masked_mm <file_name> <num_heads>" << std::endl; return 1; } const char* filename = argv[1]; const int num_heads = std::atoi(argv[2]); std::cout << "filename=" << filename << " num_heads=" << num_heads << std::endl; utils::SampleCsr scsr; utils::LoadGraphFromFile(filename, &scsr); const int32_t N = scsr.row_offsets.size() - 1; const int32_t M = scsr.column_indices.size(); std::cout << "#Nodes: " << N << " #Edges: " << M << std::endl; // csr minigun::IntCsr csr = utils::ToMinigunCsr(scsr, kDLGPU); double dur1 = RunMinigun(scsr, csr, 0, num_heads); std::cout << "minigun time(ms): " << dur1 << std::endl; double dur2 = RunBaseline1(scsr, csr, 0, num_heads); std::cout << "baseline1 time(ms): " << dur2 << std::endl; return 0; }
1dacb791c8003411a4a946908d641963596f45ff.cu
#include <iostream> #include <cstdlib> #include <limits> #include <time.h> #include <cuda_runtime.h> #include <minigun/minigun.h> #include "./baseline/yzh_kernels.cuh" #include "./minigun/esoftmax.cuh" #include "../samples_io.h" #include "../samples_utils.h" using minigun::advance::RuntimeConfig; using namespace esoftmax; double RunMinigun(const utils::SampleCsr& scsr, const minigun::IntCsr& csr, int32_t feat_size, int32_t num_heads) { // gdata GData gdata, truth; gdata.H = num_heads; InitGData(scsr, &gdata, &truth); CUDA_CALL(cudaDeviceSynchronize()); // create stream RuntimeConfig rtcfg; rtcfg.ctx = {kDLGPU, 0}; int nt = utils::_FindNumThreads(gdata.H, 32); rtcfg.data_num_threads = nt; rtcfg.data_num_blocks = gdata.H / nt; CUDA_CALL(cudaStreamCreate(&rtcfg.stream)); minigun::IntArray infront; // dry run typedef minigun::advance::Config<true, minigun::advance::kV2N> Config; minigun::advance::Advance<kDLGPU, int32_t, Config, GData, EdgeMax>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, MinusMaxExpSum>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, Norm>( rtcfg, csr, &gdata, infront); CUDA_CALL(cudaDeviceSynchronize()); CheckResult(scsr, &gdata, &truth); const int K = 10; timeval t0, t1; gettimeofday(&t0, nullptr); for (int i = 0; i < K; ++i) { minigun::advance::Advance<kDLGPU, int32_t, Config, GData, EdgeMax>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, MinusMaxExpSum>( rtcfg, csr, &gdata, infront); minigun::advance::Advance<kDLGPU, int32_t, Config, GData, Norm>( rtcfg, csr, &gdata, infront); } CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&t1, nullptr); double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec - (t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms FreeGData(&gdata, &truth); return dur; } double RunBaseline1(const utils::SampleCsr& scsr, const minigun::IntCsr& csr, int32_t feat_size, int32_t num_heads) { // gdata GData gdata, truth; gdata.H = num_heads; InitGData(scsr, &gdata, &truth); const int32_t N = csr.row_offsets.length - 1; const int H = gdata.H; // dry run custom_kernel::sparse_softmax_forward_kernel<int32_t, float><<<(N + 31) / 32, dim3(32, H)>>>( csr.row_offsets.data, gdata.score, gdata.ret, (int)N, (int)H); CUDA_CALL(cudaDeviceSynchronize()); const int K = 10; timeval t0, t1; gettimeofday(&t0, nullptr); for (int i = 0; i < K; ++i) { custom_kernel::sparse_softmax_forward_kernel<int32_t, float><<<(N + 31) / 32, dim3(32, H)>>>( csr.row_offsets.data, gdata.score, gdata.ret, (int)N, (int)H); } CUDA_CALL(cudaDeviceSynchronize()); gettimeofday(&t1, nullptr); double dur = (double)(t1.tv_sec * 1e6 + t1.tv_usec - (t0.tv_sec * 1e6 + t0.tv_usec)) / K / 1000.0; // ms FreeGData(&gdata, &truth); return dur; } int main(int argc, char** argv) { srand(42); if (argc < 3) { std::cout << "USAGE: ./bench_masked_mm <file_name> <num_heads>" << std::endl; return 1; } const char* filename = argv[1]; const int num_heads = std::atoi(argv[2]); std::cout << "filename=" << filename << " num_heads=" << num_heads << std::endl; utils::SampleCsr scsr; utils::LoadGraphFromFile(filename, &scsr); const int32_t N = scsr.row_offsets.size() - 1; const int32_t M = scsr.column_indices.size(); std::cout << "#Nodes: " << N << " #Edges: " << M << std::endl; // csr minigun::IntCsr csr = utils::ToMinigunCsr(scsr, kDLGPU); double dur1 = RunMinigun(scsr, csr, 0, num_heads); std::cout << "minigun time(ms): " << dur1 << std::endl; double dur2 = RunBaseline1(scsr, csr, 0, num_heads); std::cout << "baseline1 time(ms): " << dur2 << std::endl; return 0; }
6cb7c1e840954e965c0f48843c1004bef95145e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void box_encode_kernel(float * targets_dx, float * targets_dy, float * targets_dw, float * targets_dh, float4 * boxes, float4 * anchors, float wx, float wy, float ww, float wh, size_t gt, size_t idxJump) { int idx = blockIdx.x*blockDim.x + threadIdx.x; size_t row_offset; float anchors_x1,anchors_x2, anchors_y1, anchors_y2, boxes_x1, boxes_x2, boxes_y1, boxes_y2, ex_w, ex_h, ex_ctr_x, ex_ctr_y, gt_w, gt_h, gt_ctr_x, gt_ctr_y; for (int i=idx; i<gt; i+=idxJump){ row_offset = i; anchors_x1 = anchors[row_offset].x; anchors_y1 = anchors[row_offset].y; anchors_x2 = anchors[row_offset].z; anchors_y2 = anchors[row_offset].w; boxes_x1 = boxes[row_offset].x; boxes_y1 = boxes[row_offset].y; boxes_x2 = boxes[row_offset].z; boxes_y2 = boxes[row_offset].w; ex_w = anchors_x2 - anchors_x1 + 1; ex_h = anchors_y2 - anchors_y1 + 1; ex_ctr_x = anchors_x1 + 0.5 * ex_w; ex_ctr_y = anchors_y1 + 0.5 * ex_h; gt_w = boxes_x2 - boxes_x1 + 1; gt_h = boxes_y2 - boxes_y1 + 1; gt_ctr_x = boxes_x1 + 0.5 * gt_w; gt_ctr_y = boxes_y1 + 0.5 * gt_h; targets_dx[i] = wx * (gt_ctr_x-ex_ctr_x)/ex_w; targets_dy[i] = wy * (gt_ctr_y-ex_ctr_y)/ex_h; targets_dw[i] = ww * log(gt_w/ex_w); targets_dh[i] = wh * log(gt_h/ex_h); } }
6cb7c1e840954e965c0f48843c1004bef95145e5.cu
#include "includes.h" __global__ void box_encode_kernel(float * targets_dx, float * targets_dy, float * targets_dw, float * targets_dh, float4 * boxes, float4 * anchors, float wx, float wy, float ww, float wh, size_t gt, size_t idxJump) { int idx = blockIdx.x*blockDim.x + threadIdx.x; size_t row_offset; float anchors_x1,anchors_x2, anchors_y1, anchors_y2, boxes_x1, boxes_x2, boxes_y1, boxes_y2, ex_w, ex_h, ex_ctr_x, ex_ctr_y, gt_w, gt_h, gt_ctr_x, gt_ctr_y; for (int i=idx; i<gt; i+=idxJump){ row_offset = i; anchors_x1 = anchors[row_offset].x; anchors_y1 = anchors[row_offset].y; anchors_x2 = anchors[row_offset].z; anchors_y2 = anchors[row_offset].w; boxes_x1 = boxes[row_offset].x; boxes_y1 = boxes[row_offset].y; boxes_x2 = boxes[row_offset].z; boxes_y2 = boxes[row_offset].w; ex_w = anchors_x2 - anchors_x1 + 1; ex_h = anchors_y2 - anchors_y1 + 1; ex_ctr_x = anchors_x1 + 0.5 * ex_w; ex_ctr_y = anchors_y1 + 0.5 * ex_h; gt_w = boxes_x2 - boxes_x1 + 1; gt_h = boxes_y2 - boxes_y1 + 1; gt_ctr_x = boxes_x1 + 0.5 * gt_w; gt_ctr_y = boxes_y1 + 0.5 * gt_h; targets_dx[i] = wx * (gt_ctr_x-ex_ctr_x)/ex_w; targets_dy[i] = wy * (gt_ctr_y-ex_ctr_y)/ex_h; targets_dw[i] = ww * log(gt_w/ex_w); targets_dh[i] = wh * log(gt_h/ex_h); } }
1cc4a27269f7cd3e0bf78238286a235ea4ff2e80.hip
// !!! This is a file automatically generated by hipify!!! // Array multiplication: C = A * B: // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" #ifndef BLOCKSIZE #define BLOCKSIZE 16 // number of threads per block #endif #ifndef SIZE #define SIZE 1*1024*1024 // array size #endif #ifndef NUMTRIALS #define NUMTRIALS 100 // to make the timing more accurate #endif #ifndef TOLERANCE #define TOLERANCE 0.00001f // tolerance to relative error #endif // array multiplication (CUDA Kernel) on the device: C = A * B __global__ void ArrayMul( float *A, float *B, float *C ) { __shared__ float prods[BLOCKSIZE]; unsigned int numItems = blockDim.x; unsigned int tnum = threadIdx.x; unsigned int wgNum = blockIdx.x; unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; prods[tnum] = A[gid] * B[gid]; for (int offset = 1; offset < numItems; offset *= 2) { int mask = 2 * offset - 1; __syncthreads(); if ((tnum & mask) == 0) { prods[tnum] += prods[tnum + offset]; } } __syncthreads(); if (tnum == 0) C[wgNum] = prods[0]; } // main program: int main( int argc, char* argv[ ] ) { int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float * hA = new float [ SIZE ]; float * hB = new float [ SIZE ]; float * hC = new float [ SIZE/BLOCKSIZE ]; for( int i = 0; i < SIZE; i++ ) { hA[i] = hB[i] = (float) sqrt( (float)(i+1) ); } // allocate device memory: float *dA, *dB, *dC; dim3 dimsA( SIZE, 1, 1 ); dim3 dimsB( SIZE, 1, 1 ); dim3 dimsC( SIZE/BLOCKSIZE, 1, 1 ); //__shared__ float prods[SIZE/BLOCKSIZE]; hipError_t status; status = hipMalloc( reinterpret_cast<void **>(&dA), SIZE*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( reinterpret_cast<void **>(&dB), SIZE*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( reinterpret_cast<void **>(&dC), (SIZE/BLOCKSIZE)*sizeof(float) ); checkCudaErrors( status ); // copy host memory to the device: status = hipMemcpy( dA, hA, SIZE*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dB, hB, SIZE*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid( SIZE / threads.x, 1, 1 ); // Create and start timer hipDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: hipEvent_t start, stop; status = hipEventCreate( &start ); checkCudaErrors( status ); status = hipEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = hipEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: for( int t = 0; t < NUMTRIALS; t++) { hipLaunchKernelGGL(( ArrayMul), dim3(grid), dim3(threads) , 0, 0, dA, dB, dC ); } // record the stop event: status = hipEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = hipEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = hipEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (float)SIZE * (float)NUMTRIALS / secondsTotal; double megaMultsPerSecond = multsPerSecond / 1000000.; fprintf( stderr, "Array Size = %10d, MegaMultReductions/Second = %10.2lf\n", SIZE, megaMultsPerSecond ); // copy result from the device to the host: status = hipMemcpy( hC, dC, (SIZE/BLOCKSIZE)*sizeof(float), hipMemcpyDeviceToHost ); checkCudaErrors( status ); // check the sum : double sum = 0.; for(int i = 0; i < SIZE/BLOCKSIZE; i++ ) { //fprintf(stderr, "hC[%6d] = %10.2f\n", i, hC[i]); sum += (double)hC[i]; } fprintf( stderr, "\nsum = %10.2lf\n", sum ); // clean up memory: delete [ ] hA; delete [ ] hB; delete [ ] hC; status = hipFree( dA ); checkCudaErrors( status ); status = hipFree( dB ); checkCudaErrors( status ); status = hipFree( dC ); checkCudaErrors( status ); return 0; }
1cc4a27269f7cd3e0bf78238286a235ea4ff2e80.cu
// Array multiplication: C = A * B: // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" #ifndef BLOCKSIZE #define BLOCKSIZE 16 // number of threads per block #endif #ifndef SIZE #define SIZE 1*1024*1024 // array size #endif #ifndef NUMTRIALS #define NUMTRIALS 100 // to make the timing more accurate #endif #ifndef TOLERANCE #define TOLERANCE 0.00001f // tolerance to relative error #endif // array multiplication (CUDA Kernel) on the device: C = A * B __global__ void ArrayMul( float *A, float *B, float *C ) { __shared__ float prods[BLOCKSIZE]; unsigned int numItems = blockDim.x; unsigned int tnum = threadIdx.x; unsigned int wgNum = blockIdx.x; unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; prods[tnum] = A[gid] * B[gid]; for (int offset = 1; offset < numItems; offset *= 2) { int mask = 2 * offset - 1; __syncthreads(); if ((tnum & mask) == 0) { prods[tnum] += prods[tnum + offset]; } } __syncthreads(); if (tnum == 0) C[wgNum] = prods[0]; } // main program: int main( int argc, char* argv[ ] ) { int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float * hA = new float [ SIZE ]; float * hB = new float [ SIZE ]; float * hC = new float [ SIZE/BLOCKSIZE ]; for( int i = 0; i < SIZE; i++ ) { hA[i] = hB[i] = (float) sqrt( (float)(i+1) ); } // allocate device memory: float *dA, *dB, *dC; dim3 dimsA( SIZE, 1, 1 ); dim3 dimsB( SIZE, 1, 1 ); dim3 dimsC( SIZE/BLOCKSIZE, 1, 1 ); //__shared__ float prods[SIZE/BLOCKSIZE]; cudaError_t status; status = cudaMalloc( reinterpret_cast<void **>(&dA), SIZE*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( reinterpret_cast<void **>(&dB), SIZE*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( reinterpret_cast<void **>(&dC), (SIZE/BLOCKSIZE)*sizeof(float) ); checkCudaErrors( status ); // copy host memory to the device: status = cudaMemcpy( dA, hA, SIZE*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dB, hB, SIZE*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid( SIZE / threads.x, 1, 1 ); // Create and start timer cudaDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: cudaEvent_t start, stop; status = cudaEventCreate( &start ); checkCudaErrors( status ); status = cudaEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = cudaEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: for( int t = 0; t < NUMTRIALS; t++) { ArrayMul<<< grid, threads >>>( dA, dB, dC ); } // record the stop event: status = cudaEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = cudaEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = cudaEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (float)SIZE * (float)NUMTRIALS / secondsTotal; double megaMultsPerSecond = multsPerSecond / 1000000.; fprintf( stderr, "Array Size = %10d, MegaMultReductions/Second = %10.2lf\n", SIZE, megaMultsPerSecond ); // copy result from the device to the host: status = cudaMemcpy( hC, dC, (SIZE/BLOCKSIZE)*sizeof(float), cudaMemcpyDeviceToHost ); checkCudaErrors( status ); // check the sum : double sum = 0.; for(int i = 0; i < SIZE/BLOCKSIZE; i++ ) { //fprintf(stderr, "hC[%6d] = %10.2f\n", i, hC[i]); sum += (double)hC[i]; } fprintf( stderr, "\nsum = %10.2lf\n", sum ); // clean up memory: delete [ ] hA; delete [ ] hB; delete [ ] hC; status = cudaFree( dA ); checkCudaErrors( status ); status = cudaFree( dB ); checkCudaErrors( status ); status = cudaFree( dC ); checkCudaErrors( status ); return 0; }
4abf17c2a5d7928d1c265fc70fd20eb3211c5746.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ComputePositions(float *g_Data1, float *g_Data2, float *g_Data3, int *d_Ptrs, float *d_Sift, int numPts, int maxPts, int w, int h) { int i = __mul24(blockIdx.x, POSBLK_SIZE) + threadIdx.x; if (i>=numPts) return; int p = d_Ptrs[i]; //if (p<w+1 || p>=(w*h-w-1)) // printf("ComputePositions: read error\n"); float val[7]; val[0] = g_Data2[p]; val[1] = g_Data2[p-1]; val[2] = g_Data2[p+1]; float dx = 0.5f*(val[2] - val[1]); float dxx = 2.0f*val[0] - val[1] - val[2]; val[3] = g_Data2[p-w]; val[4] = g_Data2[p+w]; float dy = 0.5f*(val[4] - val[3]); float dyy = 2.0f*val[0] - val[3] - val[4]; val[5] = g_Data3[p]; val[6] = g_Data1[p]; float ds = 0.5f*(val[6] - val[5]); float dss = 2.0f*val[0] - val[5] - val[6]; float dxy = 0.25f* (g_Data2[p+w+1] + g_Data2[p-w-1] - g_Data2[p-w+1] - g_Data2[p+w-1]); float dxs = 0.25f* (g_Data3[p+1] + g_Data1[p-1] - g_Data1[p+1] - g_Data3[p-1]); float dys = 0.25f* (g_Data3[p+w] + g_Data1[p-w] - g_Data3[p-w] - g_Data1[p+w]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float det = idxx*dxx + idxy*dxy + idxs*dxs; float idet = 1.0f / det; float pdx = idet* (idxx*dx + idxy*dy + idxs*ds); float pdy = idet* (idxy*dx + idyy*dy + idys*ds); float pds = idet* (idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f){ pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); d_Sift[i+0*maxPts] = (p%w) + pdx; d_Sift[i+1*maxPts] = (p/w) + pdy; d_Sift[i+2*maxPts] = d_ConstantA[0] * exp2f(pds*d_ConstantB[0]); d_Sift[i+3*maxPts] = val[0] + dval; float tra = dxx + dyy; det = dxx*dyy - dxy*dxy; d_Sift[i+4*maxPts] = __fdividef(tra*tra, det); }
4abf17c2a5d7928d1c265fc70fd20eb3211c5746.cu
#include "includes.h" __global__ void ComputePositions(float *g_Data1, float *g_Data2, float *g_Data3, int *d_Ptrs, float *d_Sift, int numPts, int maxPts, int w, int h) { int i = __mul24(blockIdx.x, POSBLK_SIZE) + threadIdx.x; if (i>=numPts) return; int p = d_Ptrs[i]; //if (p<w+1 || p>=(w*h-w-1)) // printf("ComputePositions: read error\n"); float val[7]; val[0] = g_Data2[p]; val[1] = g_Data2[p-1]; val[2] = g_Data2[p+1]; float dx = 0.5f*(val[2] - val[1]); float dxx = 2.0f*val[0] - val[1] - val[2]; val[3] = g_Data2[p-w]; val[4] = g_Data2[p+w]; float dy = 0.5f*(val[4] - val[3]); float dyy = 2.0f*val[0] - val[3] - val[4]; val[5] = g_Data3[p]; val[6] = g_Data1[p]; float ds = 0.5f*(val[6] - val[5]); float dss = 2.0f*val[0] - val[5] - val[6]; float dxy = 0.25f* (g_Data2[p+w+1] + g_Data2[p-w-1] - g_Data2[p-w+1] - g_Data2[p+w-1]); float dxs = 0.25f* (g_Data3[p+1] + g_Data1[p-1] - g_Data1[p+1] - g_Data3[p-1]); float dys = 0.25f* (g_Data3[p+w] + g_Data1[p-w] - g_Data3[p-w] - g_Data1[p+w]); float idxx = dyy*dss - dys*dys; float idxy = dys*dxs - dxy*dss; float idxs = dxy*dys - dyy*dxs; float idyy = dxx*dss - dxs*dxs; float idys = dxy*dxs - dxx*dys; float idss = dxx*dyy - dxy*dxy; float det = idxx*dxx + idxy*dxy + idxs*dxs; float idet = 1.0f / det; float pdx = idet* (idxx*dx + idxy*dy + idxs*ds); float pdy = idet* (idxy*dx + idyy*dy + idys*ds); float pds = idet* (idxs*dx + idys*dy + idss*ds); if (pdx<-0.5f || pdx>0.5f || pdy<-0.5f || pdy>0.5f || pds<-0.5f || pds>0.5f){ pdx = __fdividef(dx, dxx); pdy = __fdividef(dy, dyy); pds = __fdividef(ds, dss); } float dval = 0.5f*(dx*pdx + dy*pdy + ds*pds); d_Sift[i+0*maxPts] = (p%w) + pdx; d_Sift[i+1*maxPts] = (p/w) + pdy; d_Sift[i+2*maxPts] = d_ConstantA[0] * exp2f(pds*d_ConstantB[0]); d_Sift[i+3*maxPts] = val[0] + dval; float tra = dxx + dyy; det = dxx*dyy - dxy*dxy; d_Sift[i+4*maxPts] = __fdividef(tra*tra, det); }
37e231a4357afafeadd65d5b0c76490a87e14832.hip
// !!! This is a file automatically generated by hipify!!! #include "LBM2D_1D_indices.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <vector> #include <iostream> #include <glm\gtx\norm.hpp> #include "CUDAUtils.cuh" #include <omp.h> __constant__ int d_latticeWidth; ///< Lattice width constant on the device __constant__ int d_latticeHeight; ///< Lattice height constant on the device __constant__ int d_latticeSize; ///< Lattice size constant on the device (latticeWidth * latticeHeight) __constant__ float d_tau; ///< Tau value on the device __constant__ float d_itau; ///< Inverse tau value (1.0f / tau) on the device __constant__ int d_mirrorSides; ///< Whether to mirror sides (cycle) on the device //__constant__ int d_visualizeVelocity; __device__ int d_respawnIndex = 0; ///< Respawn index (y coordinate) for particle respawn, not used __constant__ int d_respawnMinY; ///< Minimum y respawn coordinate, not used __constant__ int d_respawnMaxY; ///< Maximum y respawn coordinate, not used __constant__ glm::vec3 d_directionVectors[NUM_2D_DIRECTIONS]; ///< Constant array of direction vectors /// Returns uniform random between 0.0 and 1.0. Provided from different student's work. __device__ __host__ float rand2D(int x, int y) { int n = x + y * 57; n = (n << 13) ^ n; return ((1.0f - ((n * (n * n * 15731 + 789221) + 1376312589) & 0x7fffffff) / 1073741824.0f) + 1.0f) * 0.5f; } /// Returns the flattened index using the device constants and provided coordinates. __device__ int getIdxKernel(int x, int y) { return x + y * d_latticeWidth; } /// Maps the value to the viridis color map. __device__ glm::vec3 mapToViridis2D(float val) { val = glm::clamp(val, 0.0f, 1.0f); int discreteVal = (int)(val * 255.0f); return glm::vec3(viridis_cm[discreteVal][0], viridis_cm[discreteVal][1], viridis_cm[discreteVal][2]); } /// Kernel for moving particles that uses OpenGL interoperability. /** Kernel for moving particles that uses OpenGL interoperability for setting particle positions and colors. If the particles venture beyond the simulation bounding volume, they are randomly respawned. If we use side mirroring (cycling), particles that go beyond side walls (on the y axis) will be mirrored/cycled to the other side of the bounding volume. \param[in] particleVertices Vertices (positions stored in VBO) of particles to be updated/moved. \param[in] velocities Array of velocities that will act on the particles. \param[in] numParticles Number of particles. \param[in] particleColors VBO of particle colors. */ __global__ void moveParticlesKernelInterop(glm::vec3 *particleVertices, glm::vec2 *velocities, int *numParticles, glm::vec3 *particleColors) { glm::vec2 adjVelocities[4]; int idx = threadIdx.x + blockDim.x * blockIdx.x; while (idx < *numParticles) { float x = particleVertices[idx].x; float y = particleVertices[idx].y; int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; adjVelocities[0] = velocities[getIdxKernel(leftX, topY)]; adjVelocities[1] = velocities[getIdxKernel(rightX, topY)]; adjVelocities[2] = velocities[getIdxKernel(leftX, bottomY)]; adjVelocities[3] = velocities[getIdxKernel(rightX, bottomY)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; glm::vec2 topVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec2 bottomVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec2 finalVelocity = bottomVelocity * verticalRatio + topVelocity * (1.0f - verticalRatio); //particleVertices[idx] += make_float3(finalVelocity.x, 0.0f); particleVertices[idx].x += finalVelocity.x; particleVertices[idx].y += finalVelocity.y; //particleColors[idx] = glm::vec3(glm::length2(finalVelocity) * 4.0f); //particleColors[idx] = mapToColor(glm::length2(finalVelocity) * 4.0f); particleColors[idx] = mapToViridis2D(glm::length2(finalVelocity) * 4.0f); if (particleVertices[idx].x <= 0.0f || particleVertices[idx].x >= d_latticeWidth - 1 || particleVertices[idx].y <= 0.0f || particleVertices[idx].y >= d_latticeHeight - 1) { if (d_mirrorSides) { if (particleVertices[idx].x <= 0.0f || particleVertices[idx].x >= d_latticeWidth - 1) { particleVertices[idx].x = 0.0f; particleVertices[idx].y = rand2D(idx, y) * (d_latticeHeight - 1); ////particleVertices[idx].y = d_respawnIndex++; //particleVertices[idx].y = d_respawnIndex; //atomicAdd(&d_respawnIndex, 1); //if (d_respawnIndex >= d_respawnMaxY) { // //d_respawnIndex = d_respawnMinY; // atomicExch(&d_respawnIndex, d_respawnMinY); //} } else { particleVertices[idx].y = (float)((int)(particleVertices[idx].y + d_latticeHeight - 1) % (d_latticeHeight - 1)); } } else { particleVertices[idx].x = 0.0f; particleVertices[idx].y = rand2D(idx, y) * (d_latticeHeight - 1); ////particleVertices[idx].y = d_respawnIndex++; //particleVertices[idx].y = d_respawnIndex; //atomicAdd(&d_respawnIndex, 1); //if (d_respawnIndex >= d_respawnMaxY) { // //d_respawnIndex = d_respawnMinY; // atomicExch(&d_respawnIndex, d_respawnMinY); //} } particleVertices[idx].z = 0.0f; } idx += blockDim.x * gridDim.x; } } /// Kernel for clearing the back lattice. /** Kernel that clears the back lattice. \param[in] backLattice Pointer to the back lattice to be cleared. */ __global__ void clearBackLatticeKernel(Node *backLattice) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < d_latticeSize) { for (int i = 0; i < 9; i++) { backLattice[idx].adj[i] = 0.0f; } } } /// Kernel that streams the microscopic particles from the previous frame. /** Kernel that streams the microscopic particles from the previous frame. \param[in] backLatice Lattice that will be used in the current frame (the one we are currently updating). \param[in] frontLattice Lattice from the previous frame from which we stream the particles. */ __global__ void streamingStepKernel(Node *backLattice, Node *frontLattice) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; backLattice[idx].adj[DIR_MIDDLE] += frontLattice[idx].adj[DIR_MIDDLE]; int right; int left; int top; int bottom; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; if (right > d_latticeWidth - 1) { right = d_latticeWidth - 1; } if (left < 0) { left = 0; } if (top > d_latticeHeight - 1) { top = d_latticeHeight - 1; } if (bottom < 0) { bottom = 0; } backLattice[idx].adj[DIR_RIGHT] += frontLattice[getIdxKernel(left, y)].adj[DIR_RIGHT]; backLattice[idx].adj[DIR_TOP] += frontLattice[getIdxKernel(x, bottom)].adj[DIR_TOP]; backLattice[idx].adj[DIR_LEFT] += frontLattice[getIdxKernel(right, y)].adj[DIR_LEFT]; backLattice[idx].adj[DIR_BOTTOM] += frontLattice[getIdxKernel(x, top)].adj[DIR_BOTTOM]; backLattice[idx].adj[DIR_TOP_RIGHT] += frontLattice[getIdxKernel(left, bottom)].adj[DIR_TOP_RIGHT]; backLattice[idx].adj[DIR_TOP_LEFT] += frontLattice[getIdxKernel(right, bottom)].adj[DIR_TOP_LEFT]; backLattice[idx].adj[DIR_BOTTOM_LEFT] += frontLattice[getIdxKernel(right, top)].adj[DIR_BOTTOM_LEFT]; backLattice[idx].adj[DIR_BOTTOM_RIGHT] += frontLattice[getIdxKernel(left, top)].adj[DIR_BOTTOM_RIGHT]; for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } /// Kernel for updating the inlets. /** Kernel for updating the inlets. Acts the same way as collision step but with predetermined velocity and density. The inlet is the left wall of the simulation bounding volume. \param[in] backLattice The back lattice where we update node values. \param[in] velocities Velocities array for the lattice. \param[in] inletVelocity Our desired inlet velocity. */ __global__ void updateInletsKernel(Node *lattice, glm::vec3 inletVelocity) { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; float macroDensity = 1.0f; //glm::vec3 macroVelocity = inletVelocity; // unnecessary variable -> remove const glm::vec3 vRight = glm::vec3(1.0f, 0.0f, 0.0f); const glm::vec3 vTop = glm::vec3(0.0f, 1.0f, 0.0f); const glm::vec3 vLeft = glm::vec3(-1.0f, 0.0f, 0.0f); const glm::vec3 vBottom = glm::vec3(0.0f, -1.0f, 0.0f); const glm::vec3 vTopRight = glm::vec3(1.0f, 1.0f, 0.0f); const glm::vec3 vTopLeft = glm::vec3(-1.0f, 1.0f, 0.0f); const glm::vec3 vBottomLeft = glm::vec3(-1.0f, -1.0f, 0.0f); const glm::vec3 vBottomRight = glm::vec3(1.0f, -1.0f, 0.0f); // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float macroVelocityDot = glm::dot(inletVelocity, inletVelocity); float thirdTerm = 1.5f * macroVelocityDot / LAT_SPEED_SQ; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); // this can all be rewritten into arrays + for cycles! float dotProd = glm::dot(vRight, inletVelocity); float firstTerm = 3.0f * dotProd / LAT_SPEED; float secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); int idx = threadIdx.x + blockDim.x * blockIdx.x; int x = idx % d_latticeWidth; if (x == 0 && idx < d_latticeSize) { lattice[idx].adj[DIR_MIDDLE] = middleEq; lattice[idx].adj[DIR_RIGHT] = rightEq; lattice[idx].adj[DIR_TOP] = topEq; lattice[idx].adj[DIR_LEFT] = leftEq; lattice[idx].adj[DIR_BOTTOM] = bottomEq; lattice[idx].adj[DIR_TOP_RIGHT] = topRightEq; lattice[idx].adj[DIR_TOP_LEFT] = topLeftEq; lattice[idx].adj[DIR_BOTTOM_LEFT] = bottomLeftEq; lattice[idx].adj[DIR_BOTTOM_RIGHT] = bottomRightEq; for (int i = 0; i < 9; i++) { if (lattice[idx].adj[i] < 0.0f) { lattice[idx].adj[i] = 0.0f; } else if (lattice[idx].adj[i] > 1.0f) { lattice[idx].adj[i] = 1.0f; } } } } /// Kernel for updating colliders/obstacles in the lattice. /** Updates colliders/obstacles by using the full bounce back approach. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. \param[in] heightMap Height map of the scene. */ __global__ void updateCollidersKernel(Node *backLattice, bool *tCol) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < d_latticeSize) { if (tCol[idx]) { float right = backLattice[idx].adj[DIR_RIGHT]; float top = backLattice[idx].adj[DIR_TOP]; float left = backLattice[idx].adj[DIR_LEFT]; float bottom = backLattice[idx].adj[DIR_BOTTOM]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT]; backLattice[idx].adj[DIR_RIGHT] = left; backLattice[idx].adj[DIR_TOP] = bottom; backLattice[idx].adj[DIR_LEFT] = right; backLattice[idx].adj[DIR_BOTTOM] = top; backLattice[idx].adj[DIR_TOP_RIGHT] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_LEFT] = topRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT] = topLeft; } } } /// Kernel for calculating the collision operator. /** Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. Uses shared memory for speedup. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernel(Node *backLattice, glm::vec2 *velocities) { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; int idx = threadIdx.x + blockDim.x * blockIdx.x; // 1D array kernel int cacheIdx = threadIdx.x; extern __shared__ Node cache[]; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; float macroDensity = 0.0f; for (int i = 0; i < 9; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); macroVelocity += LAT_SPEED * d_directionVectors[DIR_RIGHT] * cache[cacheIdx].adj[DIR_RIGHT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_TOP] * cache[cacheIdx].adj[DIR_TOP]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_LEFT] * cache[cacheIdx].adj[DIR_LEFT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_BOTTOM] * cache[cacheIdx].adj[DIR_BOTTOM]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_TOP_RIGHT] * cache[cacheIdx].adj[DIR_TOP_RIGHT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_TOP_LEFT] * cache[cacheIdx].adj[DIR_TOP_LEFT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_BOTTOM_LEFT] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_BOTTOM_RIGHT] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT]; macroVelocity /= macroDensity; //velocities[idx] = glm::vec2(macroVelocity.x, macroVelocity.y); velocities[idx].x = macroVelocity.x; velocities[idx].y = macroVelocity.y; // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float thirdTerm = 1.5f * glm::dot(macroVelocity, macroVelocity) / LAT_SPEED_SQ; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); // this can all be rewritten into arrays + for cycles! float dotProd = glm::dot(d_directionVectors[DIR_RIGHT], macroVelocity); float firstTerm = 3.0f * dotProd / LAT_SPEED; float secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_TOP], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_LEFT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_BOTTOM], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_TOP_RIGHT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_TOP_LEFT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_BOTTOM_LEFT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_BOTTOM_RIGHT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE] - middleEq); cache[cacheIdx].adj[DIR_RIGHT] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT] - rightEq); cache[cacheIdx].adj[DIR_TOP] -= d_itau * (cache[cacheIdx].adj[DIR_TOP] - topEq); cache[cacheIdx].adj[DIR_LEFT] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT] - leftEq); cache[cacheIdx].adj[DIR_BOTTOM] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM] - bottomEq); cache[cacheIdx].adj[DIR_TOP_RIGHT] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); for (int i = 0; i < 9; i++) { if (cache[cacheIdx].adj[i] < 0.0f) { cache[cacheIdx].adj[i] = 0.0f; } else if (cache[cacheIdx].adj[i] > 1.0f) { cache[cacheIdx].adj[i] = 1.0f; } } backLattice[idx] = cache[cacheIdx]; } } LBM2D_1D_indices::LBM2D_1D_indices() { } LBM2D_1D_indices::LBM2D_1D_indices(glm::ivec3 dim, string sceneFilename, float tau, ParticleSystemLBM *particleSystem, int numThreads) : LBM(nullptr, dim, sceneFilename, tau, particleSystem), numThreads(numThreads) { initScene(); frontLattice = new Node[latticeSize](); backLattice = new Node[latticeSize](); velocities = new glm::vec2[latticeSize](); hipMalloc((void**)&d_frontLattice, sizeof(Node) * latticeSize); hipMalloc((void**)&d_backLattice, sizeof(Node) * latticeSize); hipMalloc((void**)&d_velocities, sizeof(glm::vec2) * latticeSize); hipMemcpyToSymbol(d_latticeWidth, &latticeWidth, sizeof(int)); hipMemcpyToSymbol(d_latticeHeight, &latticeHeight, sizeof(int)); hipMemcpyToSymbol(d_latticeSize, &latticeSize, sizeof(int)); hipMemcpyToSymbol(d_tau, &tau, sizeof(float)); hipMemcpyToSymbol(d_itau, &itau, sizeof(float)); hipMemcpyToSymbol(d_mirrorSides, &mirrorSides, sizeof(int)); hipMemcpyToSymbol(d_directionVectors, &directionVectors, sizeof(glm::vec3) * NUM_2D_DIRECTIONS); hipGraphicsGLRegisterBuffer(&cudaParticleVerticesVBO, particleSystem->vbo, hipGraphicsMapFlagsWriteDiscard); hipGraphicsGLRegisterBuffer(&cudaParticleColorsVBO, particleSystem->colorsVBO, hipGraphicsMapFlagsWriteDiscard); initBuffers(); initLattice(); //updateInlets(frontLattice); hipMemcpy(d_backLattice, backLattice, sizeof(Node) * latticeSize, hipMemcpyHostToDevice); hipMemcpy(d_velocities, velocities, sizeof(glm::vec2) * latticeSize, hipMemcpyHostToDevice); hipMemcpy(d_frontLattice, frontLattice, sizeof(Node) * latticeSize, hipMemcpyHostToDevice); numBlocks = (int)ceil(latticeSize / this->numThreads) + 1; } void LBM2D_1D_indices::resetSimulation() { cout << "Resetting simulation..." << endl; particleSystem->initParticlePositions(latticeWidth, latticeHeight, tCol->area); for (int i = 0; i < latticeWidth * latticeHeight; i++) { for (int j = 0; j < 9; j++) { backLattice[i].adj[j] = 0.0f; } velocities[i] = glm::vec3(0.0f); } initLattice(); hipMemcpy(d_frontLattice, frontLattice, sizeof(Node) * latticeWidth * latticeHeight, hipMemcpyHostToDevice); hipMemcpy(d_backLattice, backLattice, sizeof(Node) * latticeWidth * latticeHeight, hipMemcpyHostToDevice); hipMemcpy(d_velocities, velocities, sizeof(glm::vec2) * latticeWidth * latticeHeight, hipMemcpyHostToDevice); } void LBM2D_1D_indices::switchToCPU() { cout << "Copying data back to CPU for simulation..." << endl; hipMemcpy(frontLattice, d_frontLattice, sizeof(Node) * latticeSize, hipMemcpyDeviceToHost); hipMemcpy(backLattice, d_backLattice, sizeof(Node) * latticeSize, hipMemcpyDeviceToHost); hipMemcpy(velocities, d_velocities, sizeof(glm::vec2) * latticeSize, hipMemcpyDeviceToHost); particleSystem->copyDataFromVBOtoCPU(); } void LBM2D_1D_indices::synchronize() { hipDeviceSynchronize(); } LBM2D_1D_indices::~LBM2D_1D_indices() { delete[] frontLattice; delete[] backLattice; delete[] velocities; delete tCol; hipFree(d_frontLattice); hipFree(d_backLattice); hipFree(d_tCol); hipFree(d_velocities); hipGraphicsUnregisterResource(cudaParticleVerticesVBO); hipGraphicsUnregisterResource(cudaParticleColorsVBO); } void LBM2D_1D_indices::recalculateVariables() { LBM::recalculateVariables(); hipMemcpyToSymbol(d_tau, &tau, sizeof(float)); hipMemcpyToSymbol(d_itau, &itau, sizeof(float)); } void LBM2D_1D_indices::initScene() { tCol = new LatticeCollider(sceneFilename); latticeWidth = tCol->width; latticeHeight = tCol->height; latticeDepth = 1; latticeSize = latticeWidth * latticeHeight; precomputeRespawnRange(); hipMalloc((void**)&d_tCol, sizeof(bool) * latticeSize); hipMemcpy(d_tCol, &tCol->area[0], sizeof(bool) * latticeSize, hipMemcpyHostToDevice); particleVertices = particleSystem->particleVertices; d_numParticles = particleSystem->d_numParticles; particleSystem->initParticlePositions(latticeWidth, latticeHeight, tCol->area); } void LBM2D_1D_indices::draw(ShaderProgram &shader) { //glPointSize(0.4f); //shader.setVec3("u_Color", glm::vec3(0.4f, 0.4f, 0.1f)); //glUseProgram(shader.id); //glBindVertexArray(vao); //glDrawArrays(GL_POINTS, 0, latticeWidth * latticeHeight); //cout << "Velocity arrows size = " << velocityArrows.size() << endl; #ifdef DRAW_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.2f, 0.3f, 1.0f)); glBindVertexArray(velocityVAO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * velocityArrows.size(), &velocityArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, velocityArrows.size()); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.8f, 1.0f, 0.6f)); glBindVertexArray(particleArrowsVAO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * particleArrows.size(), &particleArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, particleArrows.size()); #endif // Draw scene collider tCol->draw(shader); } void LBM2D_1D_indices::doStep() { clearBackLattice(); updateInlets(); streamingStep(); updateColliders(); collisionStep(); //collisionStepStreamlined(); moveParticles(); swapLattices(); } void LBM2D_1D_indices::doStepCUDA() { // ============================================= clear back lattice CUDA clearBackLatticeKernel << <numBlocks, numThreads >> > (d_backLattice); // ============================================= update inlets CUDA updateInletsKernel << <numBlocks, numThreads >> > (d_backLattice, inletVelocity); // ============================================= streaming step CUDA streamingStepKernel << <numBlocks, numThreads >> > (d_backLattice, d_frontLattice); // ============================================= update colliders CUDA updateCollidersKernel << <numBlocks, numThreads >> > (d_backLattice, d_tCol); // ============================================= collision step CUDA collisionStepKernel << <numBlocks, numThreads, numThreads * sizeof(Node) >> > (d_backLattice, d_velocities); // ============================================= move particles CUDA - different respawn from CPU !!! glm::vec3 *dptr; hipGraphicsMapResources(1, &cudaParticleVerticesVBO, 0); size_t num_bytes; hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cudaParticleVerticesVBO); //printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes); glm::vec3 *d_particleColors; hipGraphicsMapResources(1, &cudaParticleColorsVBO, 0); hipGraphicsResourceGetMappedPointer((void **)&d_particleColors, &num_bytes, cudaParticleColorsVBO); moveParticlesKernelInterop << <numBlocks, numThreads >> > (dptr, d_velocities, d_numParticles, d_particleColors); hipGraphicsUnmapResources(1, &cudaParticleVerticesVBO, 0); hipGraphicsUnmapResources(1, &cudaParticleColorsVBO, 0); swapLattices(); } void LBM2D_1D_indices::clearBackLattice() { for (int i = 0; i < latticeSize; i++) { for (int j = 0; j < 9; j++) { backLattice[i].adj[j] = 0.0f; } } #ifdef DRAW_VELOCITY_ARROWS velocityArrows.clear(); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.clear(); #endif } void LBM2D_1D_indices::streamingStep() { for (int x = 0; x < latticeWidth; x++) { //#pragma omp parallel for/* simd */ for (int y = 0; y < latticeHeight; y++) { backLattice[getIdx(x, y)].adj[DIR_MIDDLE] += frontLattice[getIdx(x, y)].adj[DIR_MIDDLE]; int right; int left; int top; int bottom; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; if (right > latticeWidth - 1) { right = latticeWidth - 1; } if (left < 0) { left = 0; } if (top > latticeHeight - 1) { top = latticeHeight - 1; } if (bottom < 0) { bottom = 0; } backLattice[getIdx(x, y)].adj[DIR_RIGHT] += frontLattice[getIdx(left, y)].adj[DIR_RIGHT]; backLattice[getIdx(x, y)].adj[DIR_TOP] += frontLattice[getIdx(x, bottom)].adj[DIR_TOP]; backLattice[getIdx(x, y)].adj[DIR_LEFT] += frontLattice[getIdx(right, y)].adj[DIR_LEFT]; backLattice[getIdx(x, y)].adj[DIR_BOTTOM] += frontLattice[getIdx(x, top)].adj[DIR_BOTTOM]; backLattice[getIdx(x, y)].adj[DIR_TOP_RIGHT] += frontLattice[getIdx(left, bottom)].adj[DIR_TOP_RIGHT]; backLattice[getIdx(x, y)].adj[DIR_TOP_LEFT] += frontLattice[getIdx(right, bottom)].adj[DIR_TOP_LEFT]; backLattice[getIdx(x, y)].adj[DIR_BOTTOM_LEFT] += frontLattice[getIdx(right, top)].adj[DIR_BOTTOM_LEFT]; backLattice[getIdx(x, y)].adj[DIR_BOTTOM_RIGHT] += frontLattice[getIdx(left, top)].adj[DIR_BOTTOM_RIGHT]; for (int i = 0; i < 9; i++) { if (backLattice[getIdx(x, y)].adj[i] < 0.0f) { backLattice[getIdx(x, y)].adj[i] = 0.0f; } else if (backLattice[getIdx(x, y)].adj[i] > 1.0f) { backLattice[getIdx(x, y)].adj[i] = 1.0f; } } } } } void LBM2D_1D_indices::collisionStep() { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; for (int x = 0; x < latticeWidth; x++) { //#pragma omp parallel for /*simd*/ for (int y = 0; y < latticeHeight; y++) { float macroDensity = calculateMacroscopicDensity(x, y); glm::vec3 macroVelocity = calculateMacroscopicVelocity(x, y, macroDensity); int idx = getIdx(x, y); velocities[idx] = glm::vec2(macroVelocity.x, macroVelocity.y); #ifdef DRAW_VELOCITY_ARROWS velocityArrows.push_back(glm::vec3(x, y, -0.5f)); velocityArrows.push_back(glm::vec3(velocities[idx] * 5.0f, -1.0f) + glm::vec3(x, y, 0.0f)); #endif // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); #ifdef SUBGRID_EXPERIMENTAL // SUBGRID MODEL float middleTensor; float rightTensor; float topTensor; float leftTensor; float bottomTensor; float topRightTensor; float topLeftTensor; float bottomLeftTensor; float bottomRightTensor; float pi[9]; /*float sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[i], directionVectors[i]); }*/ float sum = 0.0f; middleTensor = sum * (backLattice[idx].adj[DIR_MIDDLE] - middleEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[1], directionVectors[1]); } rightTensor = sum * (backLattice[idx].adj[DIR_RIGHT] - rightEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[2], directionVectors[2]); } topTensor = sum * (backLattice[idx].adj[DIR_TOP] - topEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[3], directionVectors[3]); } leftTensor = sum * (backLattice[idx].adj[DIR_LEFT] - leftEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[4], directionVectors[4]); } bottomTensor = sum * (backLattice[idx].adj[DIR_BOTTOM] - bottomEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[5], directionVectors[5]); } topRightTensor = sum * (backLattice[idx].adj[DIR_TOP_RIGHT] - topRightEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[6], directionVectors[6]); } topLeftTensor = sum * (backLattice[idx].adj[DIR_TOP_LEFT] - topLeftEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[7], directionVectors[7]); } bottomLeftTensor = sum * (backLattice[idx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[8], directionVectors[8]); } bottomRightTensor = sum * (backLattice[idx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); sum = 0.0f; sum += middleTensor * middleTensor; sum += rightTensor * rightTensor; sum += topTensor * topTensor; sum += leftTensor * leftTensor; sum += bottomTensor * bottomTensor; sum += topRightTensor * topRightTensor; sum += topLeftTensor * topLeftTensor; sum += bottomLeftTensor * bottomLeftTensor; sum += bottomRightTensor * bottomRightTensor; float S = (-nu + sqrtf(nu * nu + 18.0f * SMAG_C * sqrtf(sum))) / (6.0f * SMAG_C * SMAG_C); tau = 3.0f * (nu + SMAG_C * SMAG_C * S) + 0.5f; itau = 1.0f / tau; //cout << "TAU = " << tau << endl; #endif backLattice[idx].adj[DIR_MIDDLE] -= itau * (backLattice[idx].adj[DIR_MIDDLE] - middleEq); backLattice[idx].adj[DIR_RIGHT] -= itau * (backLattice[idx].adj[DIR_RIGHT] - rightEq); backLattice[idx].adj[DIR_TOP] -= itau * (backLattice[idx].adj[DIR_TOP] - topEq); backLattice[idx].adj[DIR_LEFT] -= itau * (backLattice[idx].adj[DIR_LEFT] - leftEq); backLattice[idx].adj[DIR_BOTTOM] -= itau * (backLattice[idx].adj[DIR_BOTTOM] - bottomEq); backLattice[idx].adj[DIR_TOP_RIGHT] -= itau * (backLattice[idx].adj[DIR_TOP_RIGHT] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT] -= itau * (backLattice[idx].adj[DIR_TOP_LEFT] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_LEFT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } void LBM2D_1D_indices::collisionStepStreamlined() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { float macroDensity = calculateMacroscopicDensity(x, y); glm::vec3 macroVelocity = calculateMacroscopicVelocity(x, y, macroDensity); int idx = getIdx(x, y); velocities[idx] = glm::vec2(macroVelocity.x, macroVelocity.y); #ifdef DRAW_VELOCITY_ARROWS velocityArrows.push_back(glm::vec3(x, y, -0.5f)); velocityArrows.push_back(glm::vec3(velocities[idx] * 5.0f, -1.0f) + glm::vec3(x, y, 0.0f)); #endif // let's find the equilibrium float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermDiagonal = WEIGHT_DIAGONAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float rightEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float topEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); float leftEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float bottomEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); float topRightEq = leftTermDiagonal * (1.0f + 3.0f * (macroVelocity.x + macroVelocity.y) + 4.5f * (macroVelocity.x + macroVelocity.y) * (macroVelocity.x + macroVelocity.y) - thirdTerm); float topLeftEq = leftTermDiagonal * (1.0f + 3.0f * (-macroVelocity.x + macroVelocity.y) + 4.5f * (-macroVelocity.x + macroVelocity.y) * (-macroVelocity.x + macroVelocity.y) - thirdTerm); float bottomLeftEq = leftTermDiagonal * (1.0f + 3.0f * (-macroVelocity.x - macroVelocity.y) + 4.5f * (-macroVelocity.x - macroVelocity.y) * (-macroVelocity.x - macroVelocity.y) - thirdTerm); float bottomRightEq = leftTermDiagonal * (1.0f + 3.0f * (macroVelocity.x - macroVelocity.y) + 4.5f * (macroVelocity.x - macroVelocity.y) * (macroVelocity.x - macroVelocity.y) - thirdTerm); backLattice[idx].adj[DIR_MIDDLE] -= itau * (backLattice[idx].adj[DIR_MIDDLE] - middleEq); backLattice[idx].adj[DIR_RIGHT] -= itau * (backLattice[idx].adj[DIR_RIGHT] - rightEq); backLattice[idx].adj[DIR_TOP] -= itau * (backLattice[idx].adj[DIR_TOP] - topEq); backLattice[idx].adj[DIR_LEFT] -= itau * (backLattice[idx].adj[DIR_LEFT] - leftEq); backLattice[idx].adj[DIR_BOTTOM] -= itau * (backLattice[idx].adj[DIR_BOTTOM] - bottomEq); backLattice[idx].adj[DIR_TOP_RIGHT] -= itau * (backLattice[idx].adj[DIR_TOP_RIGHT] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT] -= itau * (backLattice[idx].adj[DIR_TOP_LEFT] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_LEFT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } void LBM2D_1D_indices::moveParticles() { glm::vec2 adjVelocities[4]; //#pragma omp parallel for/* simd*/ for (int i = 0; i < particleSystem->numParticles; i++) { float x = particleVertices[i].x; float y = particleVertices[i].y; //printf("OpenMP move particles num threads = %d\n", omp_get_num_threads()); int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; adjVelocities[0] = velocities[getIdx(leftX, topY)]; adjVelocities[1] = velocities[getIdx(rightX, topY)]; adjVelocities[2] = velocities[getIdx(leftX, bottomY)]; adjVelocities[3] = velocities[getIdx(rightX, bottomY)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; glm::vec2 topVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec2 bottomVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec2 finalVelocity = bottomVelocity * verticalRatio + topVelocity * (1.0f - verticalRatio); #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.push_back(particleVertices[i]); #endif if (particleSystem->drawStreamlines) { particleSystem->streamLines[i * MAX_STREAMLINE_LENGTH + streamLineCounter] = particleVertices[i]; } particleVertices[i] += glm::vec3(finalVelocity, 0.0f); #ifdef DRAW_PARTICLE_VELOCITY_ARROWS glm::vec3 tmp = particleVertices[i] + 10.0f * glm::vec3(finalVelocity, 0.0f); particleArrows.push_back(tmp); #endif if (!respawnLinearly) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1) { if (mirrorSides) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1) { particleVertices[i].x = 0.0f; particleVertices[i].y = rand2D(i, (int)y) * (latticeHeight - 1); } else { particleVertices[i].y = (float)((int)(particleVertices[i].y + latticeHeight - 1) % (latticeHeight - 1)); } } else { particleVertices[i].x = 0.0f; particleVertices[i].y = rand2D(i, (int)y) * (latticeHeight - 1); } particleVertices[i].z = 0.0f; } } else { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1) { if (mirrorSides) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1) { particleVertices[i] = glm::vec3(0, respawnIndex++, 0.0f); if (respawnIndex >= respawnMaxY) { respawnIndex = respawnMinY; } } else { particleVertices[i] = glm::vec3(x, (int)(particleVertices[i].y + latticeHeight - 1) % (latticeHeight - 1), 0.0f); } } else { particleVertices[i] = glm::vec3(0, respawnIndex++, 0.0f); if (respawnIndex >= respawnMaxY) { respawnIndex = respawnMinY; } } if (particleSystem->drawStreamlines) { for (int k = 0; k < MAX_STREAMLINE_LENGTH; k++) { particleSystem->streamLines[i * MAX_STREAMLINE_LENGTH + k] = particleVertices[i]; } } } } } streamLineCounter++; if (streamLineCounter > MAX_STREAMLINE_LENGTH) { streamLineCounter = 0; } } void LBM2D_1D_indices::updateInlets() { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; float macroDensity = 1.0f; glm::vec3 macroVelocity = inletVelocity; // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); // this can all be rewritten into arrays + for cycles! float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(0, y); backLattice[idx].adj[DIR_MIDDLE] = middleEq; backLattice[idx].adj[DIR_RIGHT] = rightEq; backLattice[idx].adj[DIR_TOP] = topEq; backLattice[idx].adj[DIR_LEFT] = leftEq; backLattice[idx].adj[DIR_BOTTOM] = bottomEq; backLattice[idx].adj[DIR_TOP_RIGHT] = topRightEq; backLattice[idx].adj[DIR_TOP_LEFT] = topLeftEq; backLattice[idx].adj[DIR_BOTTOM_LEFT] = bottomLeftEq; backLattice[idx].adj[DIR_BOTTOM_RIGHT] = bottomRightEq; for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } //velocities[idx] = macroVelocity; } } void LBM2D_1D_indices::updateColliders() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(x, y); if (tCol->area[idx]) { float right = backLattice[idx].adj[DIR_RIGHT]; float top = backLattice[idx].adj[DIR_TOP]; float left = backLattice[idx].adj[DIR_LEFT]; float bottom = backLattice[idx].adj[DIR_BOTTOM]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT]; backLattice[idx].adj[DIR_RIGHT] = left; backLattice[idx].adj[DIR_TOP] = bottom; backLattice[idx].adj[DIR_LEFT] = right; backLattice[idx].adj[DIR_BOTTOM] = top; backLattice[idx].adj[DIR_TOP_RIGHT] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_LEFT] = topRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT] = topLeft; } } } } void LBM2D_1D_indices::initBuffers() { glGenVertexArrays(1, &vao); glBindVertexArray(vao); glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); vector<glm::vec3> bData; for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { bData.push_back(glm::vec3(x, y, 0.0f)); } } glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * bData.size(), &bData[0], GL_STATIC_DRAW); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #ifdef DRAW_VELOCITY_ARROWS // Velocity arrows glGenVertexArrays(1, &velocityVAO); glBindVertexArray(velocityVAO); glGenBuffers(1, &velocityVBO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS // Particle arrows glGenVertexArrays(1, &particleArrowsVAO); glBindVertexArray(particleArrowsVAO); glGenBuffers(1, &particleArrowsVBO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #endif } void LBM2D_1D_indices::initLattice() { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(x, y); frontLattice[idx].adj[DIR_MIDDLE] = weightMiddle; for (int dir = 1; dir <= 4; dir++) { frontLattice[idx].adj[dir] = weightAxis; } for (int dir = 5; dir <= 8; dir++) { frontLattice[idx].adj[dir] = weightDiagonal; } } } } void LBM2D_1D_indices::precomputeRespawnRange() { respawnMinY = 0; respawnMaxY = latticeHeight; bool minSet = false; bool maxSet = false; for (int y = 0; y < latticeHeight; y++) { if (!minSet && !tCol->area[latticeWidth * y]) { respawnMinY = y; minSet = true; } if (minSet && tCol->area[latticeWidth * y]) { respawnMaxY = y - 1; maxSet = true; break; } } if (!minSet && !maxSet) { cerr << "The left wall of the scene is completely blocked off! Inlet incorrect" << endl; exit(-1); } if (!maxSet) { respawnMaxY = latticeHeight - 1; } cout << " || min respawn y = " << respawnMinY << ", max respawn y = " << respawnMaxY << endl; respawnIndex = respawnMinY; hipMemcpyToSymbol(d_respawnIndex, &respawnIndex, sizeof(int)); hipMemcpyToSymbol(d_respawnMinY, &respawnMinY, sizeof(int)); hipMemcpyToSymbol(d_respawnMaxY, &respawnMaxY, sizeof(int)); } void LBM2D_1D_indices::swapLattices() { // CPU Node *tmp = frontLattice; frontLattice = backLattice; backLattice = tmp; // GPU tmp = d_frontLattice; d_frontLattice = d_backLattice; d_backLattice = tmp; } float LBM2D_1D_indices::calculateMacroscopicDensity(int x, int y) { float macroDensity = 0.0f; int idx = getIdx(x, y); for (int i = 0; i < 9; i++) { macroDensity += backLattice[idx].adj[i]; } return macroDensity; } glm::vec3 LBM2D_1D_indices::calculateMacroscopicVelocity(int x, int y, float macroDensity) { glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); int idx = getIdx(x, y); macroVelocity += vRight * backLattice[idx].adj[DIR_RIGHT]; macroVelocity += vTop * backLattice[idx].adj[DIR_TOP]; macroVelocity += vLeft * backLattice[idx].adj[DIR_LEFT]; macroVelocity += vBottom * backLattice[idx].adj[DIR_BOTTOM]; macroVelocity += vTopRight * backLattice[idx].adj[DIR_TOP_RIGHT]; macroVelocity += vTopLeft * backLattice[idx].adj[DIR_TOP_LEFT]; macroVelocity += vBottomLeft * backLattice[idx].adj[DIR_BOTTOM_LEFT]; macroVelocity += vBottomRight * backLattice[idx].adj[DIR_BOTTOM_RIGHT]; macroVelocity /= macroDensity; return macroVelocity; }
37e231a4357afafeadd65d5b0c76490a87e14832.cu
#include "LBM2D_1D_indices.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <vector> #include <iostream> #include <glm\gtx\norm.hpp> #include "CUDAUtils.cuh" #include <omp.h> __constant__ int d_latticeWidth; ///< Lattice width constant on the device __constant__ int d_latticeHeight; ///< Lattice height constant on the device __constant__ int d_latticeSize; ///< Lattice size constant on the device (latticeWidth * latticeHeight) __constant__ float d_tau; ///< Tau value on the device __constant__ float d_itau; ///< Inverse tau value (1.0f / tau) on the device __constant__ int d_mirrorSides; ///< Whether to mirror sides (cycle) on the device //__constant__ int d_visualizeVelocity; __device__ int d_respawnIndex = 0; ///< Respawn index (y coordinate) for particle respawn, not used __constant__ int d_respawnMinY; ///< Minimum y respawn coordinate, not used __constant__ int d_respawnMaxY; ///< Maximum y respawn coordinate, not used __constant__ glm::vec3 d_directionVectors[NUM_2D_DIRECTIONS]; ///< Constant array of direction vectors /// Returns uniform random between 0.0 and 1.0. Provided from different student's work. __device__ __host__ float rand2D(int x, int y) { int n = x + y * 57; n = (n << 13) ^ n; return ((1.0f - ((n * (n * n * 15731 + 789221) + 1376312589) & 0x7fffffff) / 1073741824.0f) + 1.0f) * 0.5f; } /// Returns the flattened index using the device constants and provided coordinates. __device__ int getIdxKernel(int x, int y) { return x + y * d_latticeWidth; } /// Maps the value to the viridis color map. __device__ glm::vec3 mapToViridis2D(float val) { val = glm::clamp(val, 0.0f, 1.0f); int discreteVal = (int)(val * 255.0f); return glm::vec3(viridis_cm[discreteVal][0], viridis_cm[discreteVal][1], viridis_cm[discreteVal][2]); } /// Kernel for moving particles that uses OpenGL interoperability. /** Kernel for moving particles that uses OpenGL interoperability for setting particle positions and colors. If the particles venture beyond the simulation bounding volume, they are randomly respawned. If we use side mirroring (cycling), particles that go beyond side walls (on the y axis) will be mirrored/cycled to the other side of the bounding volume. \param[in] particleVertices Vertices (positions stored in VBO) of particles to be updated/moved. \param[in] velocities Array of velocities that will act on the particles. \param[in] numParticles Number of particles. \param[in] particleColors VBO of particle colors. */ __global__ void moveParticlesKernelInterop(glm::vec3 *particleVertices, glm::vec2 *velocities, int *numParticles, glm::vec3 *particleColors) { glm::vec2 adjVelocities[4]; int idx = threadIdx.x + blockDim.x * blockIdx.x; while (idx < *numParticles) { float x = particleVertices[idx].x; float y = particleVertices[idx].y; int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; adjVelocities[0] = velocities[getIdxKernel(leftX, topY)]; adjVelocities[1] = velocities[getIdxKernel(rightX, topY)]; adjVelocities[2] = velocities[getIdxKernel(leftX, bottomY)]; adjVelocities[3] = velocities[getIdxKernel(rightX, bottomY)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; glm::vec2 topVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec2 bottomVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec2 finalVelocity = bottomVelocity * verticalRatio + topVelocity * (1.0f - verticalRatio); //particleVertices[idx] += make_float3(finalVelocity.x, 0.0f); particleVertices[idx].x += finalVelocity.x; particleVertices[idx].y += finalVelocity.y; //particleColors[idx] = glm::vec3(glm::length2(finalVelocity) * 4.0f); //particleColors[idx] = mapToColor(glm::length2(finalVelocity) * 4.0f); particleColors[idx] = mapToViridis2D(glm::length2(finalVelocity) * 4.0f); if (particleVertices[idx].x <= 0.0f || particleVertices[idx].x >= d_latticeWidth - 1 || particleVertices[idx].y <= 0.0f || particleVertices[idx].y >= d_latticeHeight - 1) { if (d_mirrorSides) { if (particleVertices[idx].x <= 0.0f || particleVertices[idx].x >= d_latticeWidth - 1) { particleVertices[idx].x = 0.0f; particleVertices[idx].y = rand2D(idx, y) * (d_latticeHeight - 1); ////particleVertices[idx].y = d_respawnIndex++; //particleVertices[idx].y = d_respawnIndex; //atomicAdd(&d_respawnIndex, 1); //if (d_respawnIndex >= d_respawnMaxY) { // //d_respawnIndex = d_respawnMinY; // atomicExch(&d_respawnIndex, d_respawnMinY); //} } else { particleVertices[idx].y = (float)((int)(particleVertices[idx].y + d_latticeHeight - 1) % (d_latticeHeight - 1)); } } else { particleVertices[idx].x = 0.0f; particleVertices[idx].y = rand2D(idx, y) * (d_latticeHeight - 1); ////particleVertices[idx].y = d_respawnIndex++; //particleVertices[idx].y = d_respawnIndex; //atomicAdd(&d_respawnIndex, 1); //if (d_respawnIndex >= d_respawnMaxY) { // //d_respawnIndex = d_respawnMinY; // atomicExch(&d_respawnIndex, d_respawnMinY); //} } particleVertices[idx].z = 0.0f; } idx += blockDim.x * gridDim.x; } } /// Kernel for clearing the back lattice. /** Kernel that clears the back lattice. \param[in] backLattice Pointer to the back lattice to be cleared. */ __global__ void clearBackLatticeKernel(Node *backLattice) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < d_latticeSize) { for (int i = 0; i < 9; i++) { backLattice[idx].adj[i] = 0.0f; } } } /// Kernel that streams the microscopic particles from the previous frame. /** Kernel that streams the microscopic particles from the previous frame. \param[in] backLatice Lattice that will be used in the current frame (the one we are currently updating). \param[in] frontLattice Lattice from the previous frame from which we stream the particles. */ __global__ void streamingStepKernel(Node *backLattice, Node *frontLattice) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < d_latticeSize) { int x = idx % d_latticeWidth; int y = (idx / d_latticeWidth) % d_latticeHeight; backLattice[idx].adj[DIR_MIDDLE] += frontLattice[idx].adj[DIR_MIDDLE]; int right; int left; int top; int bottom; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; if (right > d_latticeWidth - 1) { right = d_latticeWidth - 1; } if (left < 0) { left = 0; } if (top > d_latticeHeight - 1) { top = d_latticeHeight - 1; } if (bottom < 0) { bottom = 0; } backLattice[idx].adj[DIR_RIGHT] += frontLattice[getIdxKernel(left, y)].adj[DIR_RIGHT]; backLattice[idx].adj[DIR_TOP] += frontLattice[getIdxKernel(x, bottom)].adj[DIR_TOP]; backLattice[idx].adj[DIR_LEFT] += frontLattice[getIdxKernel(right, y)].adj[DIR_LEFT]; backLattice[idx].adj[DIR_BOTTOM] += frontLattice[getIdxKernel(x, top)].adj[DIR_BOTTOM]; backLattice[idx].adj[DIR_TOP_RIGHT] += frontLattice[getIdxKernel(left, bottom)].adj[DIR_TOP_RIGHT]; backLattice[idx].adj[DIR_TOP_LEFT] += frontLattice[getIdxKernel(right, bottom)].adj[DIR_TOP_LEFT]; backLattice[idx].adj[DIR_BOTTOM_LEFT] += frontLattice[getIdxKernel(right, top)].adj[DIR_BOTTOM_LEFT]; backLattice[idx].adj[DIR_BOTTOM_RIGHT] += frontLattice[getIdxKernel(left, top)].adj[DIR_BOTTOM_RIGHT]; for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } /// Kernel for updating the inlets. /** Kernel for updating the inlets. Acts the same way as collision step but with predetermined velocity and density. The inlet is the left wall of the simulation bounding volume. \param[in] backLattice The back lattice where we update node values. \param[in] velocities Velocities array for the lattice. \param[in] inletVelocity Our desired inlet velocity. */ __global__ void updateInletsKernel(Node *lattice, glm::vec3 inletVelocity) { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; float macroDensity = 1.0f; //glm::vec3 macroVelocity = inletVelocity; // unnecessary variable -> remove const glm::vec3 vRight = glm::vec3(1.0f, 0.0f, 0.0f); const glm::vec3 vTop = glm::vec3(0.0f, 1.0f, 0.0f); const glm::vec3 vLeft = glm::vec3(-1.0f, 0.0f, 0.0f); const glm::vec3 vBottom = glm::vec3(0.0f, -1.0f, 0.0f); const glm::vec3 vTopRight = glm::vec3(1.0f, 1.0f, 0.0f); const glm::vec3 vTopLeft = glm::vec3(-1.0f, 1.0f, 0.0f); const glm::vec3 vBottomLeft = glm::vec3(-1.0f, -1.0f, 0.0f); const glm::vec3 vBottomRight = glm::vec3(1.0f, -1.0f, 0.0f); // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float macroVelocityDot = glm::dot(inletVelocity, inletVelocity); float thirdTerm = 1.5f * macroVelocityDot / LAT_SPEED_SQ; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); // this can all be rewritten into arrays + for cycles! float dotProd = glm::dot(vRight, inletVelocity); float firstTerm = 3.0f * dotProd / LAT_SPEED; float secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, inletVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); int idx = threadIdx.x + blockDim.x * blockIdx.x; int x = idx % d_latticeWidth; if (x == 0 && idx < d_latticeSize) { lattice[idx].adj[DIR_MIDDLE] = middleEq; lattice[idx].adj[DIR_RIGHT] = rightEq; lattice[idx].adj[DIR_TOP] = topEq; lattice[idx].adj[DIR_LEFT] = leftEq; lattice[idx].adj[DIR_BOTTOM] = bottomEq; lattice[idx].adj[DIR_TOP_RIGHT] = topRightEq; lattice[idx].adj[DIR_TOP_LEFT] = topLeftEq; lattice[idx].adj[DIR_BOTTOM_LEFT] = bottomLeftEq; lattice[idx].adj[DIR_BOTTOM_RIGHT] = bottomRightEq; for (int i = 0; i < 9; i++) { if (lattice[idx].adj[i] < 0.0f) { lattice[idx].adj[i] = 0.0f; } else if (lattice[idx].adj[i] > 1.0f) { lattice[idx].adj[i] = 1.0f; } } } } /// Kernel for updating colliders/obstacles in the lattice. /** Updates colliders/obstacles by using the full bounce back approach. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. \param[in] heightMap Height map of the scene. */ __global__ void updateCollidersKernel(Node *backLattice, bool *tCol) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < d_latticeSize) { if (tCol[idx]) { float right = backLattice[idx].adj[DIR_RIGHT]; float top = backLattice[idx].adj[DIR_TOP]; float left = backLattice[idx].adj[DIR_LEFT]; float bottom = backLattice[idx].adj[DIR_BOTTOM]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT]; backLattice[idx].adj[DIR_RIGHT] = left; backLattice[idx].adj[DIR_TOP] = bottom; backLattice[idx].adj[DIR_LEFT] = right; backLattice[idx].adj[DIR_BOTTOM] = top; backLattice[idx].adj[DIR_TOP_RIGHT] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_LEFT] = topRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT] = topLeft; } } } /// Kernel for calculating the collision operator. /** Kernel that calculates the collision operator using Bhatnagar-Gross-Krook operator. Uses shared memory for speedup. \param[in] backLattice Back lattice in which we do our calculations. \param[in] velocities Velocities array for the lattice. */ __global__ void collisionStepKernel(Node *backLattice, glm::vec2 *velocities) { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; int idx = threadIdx.x + blockDim.x * blockIdx.x; // 1D array kernel int cacheIdx = threadIdx.x; extern __shared__ Node cache[]; if (idx < d_latticeSize) { cache[cacheIdx] = backLattice[idx]; float macroDensity = 0.0f; for (int i = 0; i < 9; i++) { macroDensity += cache[cacheIdx].adj[i]; } glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); macroVelocity += LAT_SPEED * d_directionVectors[DIR_RIGHT] * cache[cacheIdx].adj[DIR_RIGHT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_TOP] * cache[cacheIdx].adj[DIR_TOP]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_LEFT] * cache[cacheIdx].adj[DIR_LEFT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_BOTTOM] * cache[cacheIdx].adj[DIR_BOTTOM]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_TOP_RIGHT] * cache[cacheIdx].adj[DIR_TOP_RIGHT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_TOP_LEFT] * cache[cacheIdx].adj[DIR_TOP_LEFT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_BOTTOM_LEFT] * cache[cacheIdx].adj[DIR_BOTTOM_LEFT]; macroVelocity += LAT_SPEED * d_directionVectors[DIR_BOTTOM_RIGHT] * cache[cacheIdx].adj[DIR_BOTTOM_RIGHT]; macroVelocity /= macroDensity; //velocities[idx] = glm::vec2(macroVelocity.x, macroVelocity.y); velocities[idx].x = macroVelocity.x; velocities[idx].y = macroVelocity.y; // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float thirdTerm = 1.5f * glm::dot(macroVelocity, macroVelocity) / LAT_SPEED_SQ; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); // this can all be rewritten into arrays + for cycles! float dotProd = glm::dot(d_directionVectors[DIR_RIGHT], macroVelocity); float firstTerm = 3.0f * dotProd / LAT_SPEED; float secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_TOP], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_LEFT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_BOTTOM], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_TOP_RIGHT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_TOP_LEFT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_BOTTOM_LEFT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(d_directionVectors[DIR_BOTTOM_RIGHT], macroVelocity); firstTerm = 3.0f * dotProd / LAT_SPEED; secondTerm = 4.5f * dotProd * dotProd / LAT_SPEED_SQ; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); cache[cacheIdx].adj[DIR_MIDDLE] -= d_itau * (cache[cacheIdx].adj[DIR_MIDDLE] - middleEq); cache[cacheIdx].adj[DIR_RIGHT] -= d_itau * (cache[cacheIdx].adj[DIR_RIGHT] - rightEq); cache[cacheIdx].adj[DIR_TOP] -= d_itau * (cache[cacheIdx].adj[DIR_TOP] - topEq); cache[cacheIdx].adj[DIR_LEFT] -= d_itau * (cache[cacheIdx].adj[DIR_LEFT] - leftEq); cache[cacheIdx].adj[DIR_BOTTOM] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM] - bottomEq); cache[cacheIdx].adj[DIR_TOP_RIGHT] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_RIGHT] - topRightEq); cache[cacheIdx].adj[DIR_TOP_LEFT] -= d_itau * (cache[cacheIdx].adj[DIR_TOP_LEFT] - topLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_LEFT] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); cache[cacheIdx].adj[DIR_BOTTOM_RIGHT] -= d_itau * (cache[cacheIdx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); for (int i = 0; i < 9; i++) { if (cache[cacheIdx].adj[i] < 0.0f) { cache[cacheIdx].adj[i] = 0.0f; } else if (cache[cacheIdx].adj[i] > 1.0f) { cache[cacheIdx].adj[i] = 1.0f; } } backLattice[idx] = cache[cacheIdx]; } } LBM2D_1D_indices::LBM2D_1D_indices() { } LBM2D_1D_indices::LBM2D_1D_indices(glm::ivec3 dim, string sceneFilename, float tau, ParticleSystemLBM *particleSystem, int numThreads) : LBM(nullptr, dim, sceneFilename, tau, particleSystem), numThreads(numThreads) { initScene(); frontLattice = new Node[latticeSize](); backLattice = new Node[latticeSize](); velocities = new glm::vec2[latticeSize](); cudaMalloc((void**)&d_frontLattice, sizeof(Node) * latticeSize); cudaMalloc((void**)&d_backLattice, sizeof(Node) * latticeSize); cudaMalloc((void**)&d_velocities, sizeof(glm::vec2) * latticeSize); cudaMemcpyToSymbol(d_latticeWidth, &latticeWidth, sizeof(int)); cudaMemcpyToSymbol(d_latticeHeight, &latticeHeight, sizeof(int)); cudaMemcpyToSymbol(d_latticeSize, &latticeSize, sizeof(int)); cudaMemcpyToSymbol(d_tau, &tau, sizeof(float)); cudaMemcpyToSymbol(d_itau, &itau, sizeof(float)); cudaMemcpyToSymbol(d_mirrorSides, &mirrorSides, sizeof(int)); cudaMemcpyToSymbol(d_directionVectors, &directionVectors, sizeof(glm::vec3) * NUM_2D_DIRECTIONS); cudaGraphicsGLRegisterBuffer(&cudaParticleVerticesVBO, particleSystem->vbo, cudaGraphicsMapFlagsWriteDiscard); cudaGraphicsGLRegisterBuffer(&cudaParticleColorsVBO, particleSystem->colorsVBO, cudaGraphicsMapFlagsWriteDiscard); initBuffers(); initLattice(); //updateInlets(frontLattice); cudaMemcpy(d_backLattice, backLattice, sizeof(Node) * latticeSize, cudaMemcpyHostToDevice); cudaMemcpy(d_velocities, velocities, sizeof(glm::vec2) * latticeSize, cudaMemcpyHostToDevice); cudaMemcpy(d_frontLattice, frontLattice, sizeof(Node) * latticeSize, cudaMemcpyHostToDevice); numBlocks = (int)ceil(latticeSize / this->numThreads) + 1; } void LBM2D_1D_indices::resetSimulation() { cout << "Resetting simulation..." << endl; particleSystem->initParticlePositions(latticeWidth, latticeHeight, tCol->area); for (int i = 0; i < latticeWidth * latticeHeight; i++) { for (int j = 0; j < 9; j++) { backLattice[i].adj[j] = 0.0f; } velocities[i] = glm::vec3(0.0f); } initLattice(); cudaMemcpy(d_frontLattice, frontLattice, sizeof(Node) * latticeWidth * latticeHeight, cudaMemcpyHostToDevice); cudaMemcpy(d_backLattice, backLattice, sizeof(Node) * latticeWidth * latticeHeight, cudaMemcpyHostToDevice); cudaMemcpy(d_velocities, velocities, sizeof(glm::vec2) * latticeWidth * latticeHeight, cudaMemcpyHostToDevice); } void LBM2D_1D_indices::switchToCPU() { cout << "Copying data back to CPU for simulation..." << endl; cudaMemcpy(frontLattice, d_frontLattice, sizeof(Node) * latticeSize, cudaMemcpyDeviceToHost); cudaMemcpy(backLattice, d_backLattice, sizeof(Node) * latticeSize, cudaMemcpyDeviceToHost); cudaMemcpy(velocities, d_velocities, sizeof(glm::vec2) * latticeSize, cudaMemcpyDeviceToHost); particleSystem->copyDataFromVBOtoCPU(); } void LBM2D_1D_indices::synchronize() { cudaDeviceSynchronize(); } LBM2D_1D_indices::~LBM2D_1D_indices() { delete[] frontLattice; delete[] backLattice; delete[] velocities; delete tCol; cudaFree(d_frontLattice); cudaFree(d_backLattice); cudaFree(d_tCol); cudaFree(d_velocities); cudaGraphicsUnregisterResource(cudaParticleVerticesVBO); cudaGraphicsUnregisterResource(cudaParticleColorsVBO); } void LBM2D_1D_indices::recalculateVariables() { LBM::recalculateVariables(); cudaMemcpyToSymbol(d_tau, &tau, sizeof(float)); cudaMemcpyToSymbol(d_itau, &itau, sizeof(float)); } void LBM2D_1D_indices::initScene() { tCol = new LatticeCollider(sceneFilename); latticeWidth = tCol->width; latticeHeight = tCol->height; latticeDepth = 1; latticeSize = latticeWidth * latticeHeight; precomputeRespawnRange(); cudaMalloc((void**)&d_tCol, sizeof(bool) * latticeSize); cudaMemcpy(d_tCol, &tCol->area[0], sizeof(bool) * latticeSize, cudaMemcpyHostToDevice); particleVertices = particleSystem->particleVertices; d_numParticles = particleSystem->d_numParticles; particleSystem->initParticlePositions(latticeWidth, latticeHeight, tCol->area); } void LBM2D_1D_indices::draw(ShaderProgram &shader) { //glPointSize(0.4f); //shader.setVec3("u_Color", glm::vec3(0.4f, 0.4f, 0.1f)); //glUseProgram(shader.id); //glBindVertexArray(vao); //glDrawArrays(GL_POINTS, 0, latticeWidth * latticeHeight); //cout << "Velocity arrows size = " << velocityArrows.size() << endl; #ifdef DRAW_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.2f, 0.3f, 1.0f)); glBindVertexArray(velocityVAO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * velocityArrows.size(), &velocityArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, velocityArrows.size()); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS shader.setVec3("u_Color", glm::vec3(0.8f, 1.0f, 0.6f)); glBindVertexArray(particleArrowsVAO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * particleArrows.size(), &particleArrows[0], GL_STATIC_DRAW); glDrawArrays(GL_LINES, 0, particleArrows.size()); #endif // Draw scene collider tCol->draw(shader); } void LBM2D_1D_indices::doStep() { clearBackLattice(); updateInlets(); streamingStep(); updateColliders(); collisionStep(); //collisionStepStreamlined(); moveParticles(); swapLattices(); } void LBM2D_1D_indices::doStepCUDA() { // ============================================= clear back lattice CUDA clearBackLatticeKernel << <numBlocks, numThreads >> > (d_backLattice); // ============================================= update inlets CUDA updateInletsKernel << <numBlocks, numThreads >> > (d_backLattice, inletVelocity); // ============================================= streaming step CUDA streamingStepKernel << <numBlocks, numThreads >> > (d_backLattice, d_frontLattice); // ============================================= update colliders CUDA updateCollidersKernel << <numBlocks, numThreads >> > (d_backLattice, d_tCol); // ============================================= collision step CUDA collisionStepKernel << <numBlocks, numThreads, numThreads * sizeof(Node) >> > (d_backLattice, d_velocities); // ============================================= move particles CUDA - different respawn from CPU !!! glm::vec3 *dptr; cudaGraphicsMapResources(1, &cudaParticleVerticesVBO, 0); size_t num_bytes; cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, cudaParticleVerticesVBO); //printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes); glm::vec3 *d_particleColors; cudaGraphicsMapResources(1, &cudaParticleColorsVBO, 0); cudaGraphicsResourceGetMappedPointer((void **)&d_particleColors, &num_bytes, cudaParticleColorsVBO); moveParticlesKernelInterop << <numBlocks, numThreads >> > (dptr, d_velocities, d_numParticles, d_particleColors); cudaGraphicsUnmapResources(1, &cudaParticleVerticesVBO, 0); cudaGraphicsUnmapResources(1, &cudaParticleColorsVBO, 0); swapLattices(); } void LBM2D_1D_indices::clearBackLattice() { for (int i = 0; i < latticeSize; i++) { for (int j = 0; j < 9; j++) { backLattice[i].adj[j] = 0.0f; } } #ifdef DRAW_VELOCITY_ARROWS velocityArrows.clear(); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.clear(); #endif } void LBM2D_1D_indices::streamingStep() { for (int x = 0; x < latticeWidth; x++) { //#pragma omp parallel for/* simd */ for (int y = 0; y < latticeHeight; y++) { backLattice[getIdx(x, y)].adj[DIR_MIDDLE] += frontLattice[getIdx(x, y)].adj[DIR_MIDDLE]; int right; int left; int top; int bottom; right = x + 1; left = x - 1; top = y + 1; bottom = y - 1; if (right > latticeWidth - 1) { right = latticeWidth - 1; } if (left < 0) { left = 0; } if (top > latticeHeight - 1) { top = latticeHeight - 1; } if (bottom < 0) { bottom = 0; } backLattice[getIdx(x, y)].adj[DIR_RIGHT] += frontLattice[getIdx(left, y)].adj[DIR_RIGHT]; backLattice[getIdx(x, y)].adj[DIR_TOP] += frontLattice[getIdx(x, bottom)].adj[DIR_TOP]; backLattice[getIdx(x, y)].adj[DIR_LEFT] += frontLattice[getIdx(right, y)].adj[DIR_LEFT]; backLattice[getIdx(x, y)].adj[DIR_BOTTOM] += frontLattice[getIdx(x, top)].adj[DIR_BOTTOM]; backLattice[getIdx(x, y)].adj[DIR_TOP_RIGHT] += frontLattice[getIdx(left, bottom)].adj[DIR_TOP_RIGHT]; backLattice[getIdx(x, y)].adj[DIR_TOP_LEFT] += frontLattice[getIdx(right, bottom)].adj[DIR_TOP_LEFT]; backLattice[getIdx(x, y)].adj[DIR_BOTTOM_LEFT] += frontLattice[getIdx(right, top)].adj[DIR_BOTTOM_LEFT]; backLattice[getIdx(x, y)].adj[DIR_BOTTOM_RIGHT] += frontLattice[getIdx(left, top)].adj[DIR_BOTTOM_RIGHT]; for (int i = 0; i < 9; i++) { if (backLattice[getIdx(x, y)].adj[i] < 0.0f) { backLattice[getIdx(x, y)].adj[i] = 0.0f; } else if (backLattice[getIdx(x, y)].adj[i] > 1.0f) { backLattice[getIdx(x, y)].adj[i] = 1.0f; } } } } } void LBM2D_1D_indices::collisionStep() { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; for (int x = 0; x < latticeWidth; x++) { //#pragma omp parallel for /*simd*/ for (int y = 0; y < latticeHeight; y++) { float macroDensity = calculateMacroscopicDensity(x, y); glm::vec3 macroVelocity = calculateMacroscopicVelocity(x, y, macroDensity); int idx = getIdx(x, y); velocities[idx] = glm::vec2(macroVelocity.x, macroVelocity.y); #ifdef DRAW_VELOCITY_ARROWS velocityArrows.push_back(glm::vec3(x, y, -0.5f)); velocityArrows.push_back(glm::vec3(velocities[idx] * 5.0f, -1.0f) + glm::vec3(x, y, 0.0f)); #endif // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); #ifdef SUBGRID_EXPERIMENTAL // SUBGRID MODEL float middleTensor; float rightTensor; float topTensor; float leftTensor; float bottomTensor; float topRightTensor; float topLeftTensor; float bottomLeftTensor; float bottomRightTensor; float pi[9]; /*float sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[i], directionVectors[i]); }*/ float sum = 0.0f; middleTensor = sum * (backLattice[idx].adj[DIR_MIDDLE] - middleEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[1], directionVectors[1]); } rightTensor = sum * (backLattice[idx].adj[DIR_RIGHT] - rightEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[2], directionVectors[2]); } topTensor = sum * (backLattice[idx].adj[DIR_TOP] - topEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[3], directionVectors[3]); } leftTensor = sum * (backLattice[idx].adj[DIR_LEFT] - leftEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[4], directionVectors[4]); } bottomTensor = sum * (backLattice[idx].adj[DIR_BOTTOM] - bottomEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[5], directionVectors[5]); } topRightTensor = sum * (backLattice[idx].adj[DIR_TOP_RIGHT] - topRightEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[6], directionVectors[6]); } topLeftTensor = sum * (backLattice[idx].adj[DIR_TOP_LEFT] - topLeftEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[7], directionVectors[7]); } bottomLeftTensor = sum * (backLattice[idx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); sum = 0.0f; for (int i = 0; i < 9; i++) { sum += glm::dot(directionVectors[8], directionVectors[8]); } bottomRightTensor = sum * (backLattice[idx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); sum = 0.0f; sum += middleTensor * middleTensor; sum += rightTensor * rightTensor; sum += topTensor * topTensor; sum += leftTensor * leftTensor; sum += bottomTensor * bottomTensor; sum += topRightTensor * topRightTensor; sum += topLeftTensor * topLeftTensor; sum += bottomLeftTensor * bottomLeftTensor; sum += bottomRightTensor * bottomRightTensor; float S = (-nu + sqrtf(nu * nu + 18.0f * SMAG_C * sqrtf(sum))) / (6.0f * SMAG_C * SMAG_C); tau = 3.0f * (nu + SMAG_C * SMAG_C * S) + 0.5f; itau = 1.0f / tau; //cout << "TAU = " << tau << endl; #endif backLattice[idx].adj[DIR_MIDDLE] -= itau * (backLattice[idx].adj[DIR_MIDDLE] - middleEq); backLattice[idx].adj[DIR_RIGHT] -= itau * (backLattice[idx].adj[DIR_RIGHT] - rightEq); backLattice[idx].adj[DIR_TOP] -= itau * (backLattice[idx].adj[DIR_TOP] - topEq); backLattice[idx].adj[DIR_LEFT] -= itau * (backLattice[idx].adj[DIR_LEFT] - leftEq); backLattice[idx].adj[DIR_BOTTOM] -= itau * (backLattice[idx].adj[DIR_BOTTOM] - bottomEq); backLattice[idx].adj[DIR_TOP_RIGHT] -= itau * (backLattice[idx].adj[DIR_TOP_RIGHT] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT] -= itau * (backLattice[idx].adj[DIR_TOP_LEFT] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_LEFT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } void LBM2D_1D_indices::collisionStepStreamlined() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { float macroDensity = calculateMacroscopicDensity(x, y); glm::vec3 macroVelocity = calculateMacroscopicVelocity(x, y, macroDensity); int idx = getIdx(x, y); velocities[idx] = glm::vec2(macroVelocity.x, macroVelocity.y); #ifdef DRAW_VELOCITY_ARROWS velocityArrows.push_back(glm::vec3(x, y, -0.5f)); velocityArrows.push_back(glm::vec3(velocities[idx] * 5.0f, -1.0f) + glm::vec3(x, y, 0.0f)); #endif // let's find the equilibrium float leftTermMiddle = WEIGHT_MIDDLE * macroDensity; float leftTermAxis = WEIGHT_AXIS * macroDensity; float leftTermDiagonal = WEIGHT_DIAGONAL * macroDensity; float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); float rightEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float topEq = leftTermAxis * (1.0f + 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); float leftEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.x + 4.5f * macroVelocity.x * macroVelocity.x - thirdTerm); float bottomEq = leftTermAxis * (1.0f - 3.0f * macroVelocity.y + 4.5f * macroVelocity.y * macroVelocity.y - thirdTerm); float topRightEq = leftTermDiagonal * (1.0f + 3.0f * (macroVelocity.x + macroVelocity.y) + 4.5f * (macroVelocity.x + macroVelocity.y) * (macroVelocity.x + macroVelocity.y) - thirdTerm); float topLeftEq = leftTermDiagonal * (1.0f + 3.0f * (-macroVelocity.x + macroVelocity.y) + 4.5f * (-macroVelocity.x + macroVelocity.y) * (-macroVelocity.x + macroVelocity.y) - thirdTerm); float bottomLeftEq = leftTermDiagonal * (1.0f + 3.0f * (-macroVelocity.x - macroVelocity.y) + 4.5f * (-macroVelocity.x - macroVelocity.y) * (-macroVelocity.x - macroVelocity.y) - thirdTerm); float bottomRightEq = leftTermDiagonal * (1.0f + 3.0f * (macroVelocity.x - macroVelocity.y) + 4.5f * (macroVelocity.x - macroVelocity.y) * (macroVelocity.x - macroVelocity.y) - thirdTerm); backLattice[idx].adj[DIR_MIDDLE] -= itau * (backLattice[idx].adj[DIR_MIDDLE] - middleEq); backLattice[idx].adj[DIR_RIGHT] -= itau * (backLattice[idx].adj[DIR_RIGHT] - rightEq); backLattice[idx].adj[DIR_TOP] -= itau * (backLattice[idx].adj[DIR_TOP] - topEq); backLattice[idx].adj[DIR_LEFT] -= itau * (backLattice[idx].adj[DIR_LEFT] - leftEq); backLattice[idx].adj[DIR_BOTTOM] -= itau * (backLattice[idx].adj[DIR_BOTTOM] - bottomEq); backLattice[idx].adj[DIR_TOP_RIGHT] -= itau * (backLattice[idx].adj[DIR_TOP_RIGHT] - topRightEq); backLattice[idx].adj[DIR_TOP_LEFT] -= itau * (backLattice[idx].adj[DIR_TOP_LEFT] - topLeftEq); backLattice[idx].adj[DIR_BOTTOM_LEFT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_LEFT] - bottomLeftEq); backLattice[idx].adj[DIR_BOTTOM_RIGHT] -= itau * (backLattice[idx].adj[DIR_BOTTOM_RIGHT] - bottomRightEq); for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } } } } void LBM2D_1D_indices::moveParticles() { glm::vec2 adjVelocities[4]; //#pragma omp parallel for/* simd*/ for (int i = 0; i < particleSystem->numParticles; i++) { float x = particleVertices[i].x; float y = particleVertices[i].y; //printf("OpenMP move particles num threads = %d\n", omp_get_num_threads()); int leftX = (int)x; int rightX = leftX + 1; int bottomY = (int)y; int topY = bottomY + 1; adjVelocities[0] = velocities[getIdx(leftX, topY)]; adjVelocities[1] = velocities[getIdx(rightX, topY)]; adjVelocities[2] = velocities[getIdx(leftX, bottomY)]; adjVelocities[3] = velocities[getIdx(rightX, bottomY)]; float horizontalRatio = x - leftX; float verticalRatio = y - bottomY; glm::vec2 topVelocity = adjVelocities[0] * horizontalRatio + adjVelocities[1] * (1.0f - horizontalRatio); glm::vec2 bottomVelocity = adjVelocities[2] * horizontalRatio + adjVelocities[3] * (1.0f - horizontalRatio); glm::vec2 finalVelocity = bottomVelocity * verticalRatio + topVelocity * (1.0f - verticalRatio); #ifdef DRAW_PARTICLE_VELOCITY_ARROWS particleArrows.push_back(particleVertices[i]); #endif if (particleSystem->drawStreamlines) { particleSystem->streamLines[i * MAX_STREAMLINE_LENGTH + streamLineCounter] = particleVertices[i]; } particleVertices[i] += glm::vec3(finalVelocity, 0.0f); #ifdef DRAW_PARTICLE_VELOCITY_ARROWS glm::vec3 tmp = particleVertices[i] + 10.0f * glm::vec3(finalVelocity, 0.0f); particleArrows.push_back(tmp); #endif if (!respawnLinearly) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1) { if (mirrorSides) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1) { particleVertices[i].x = 0.0f; particleVertices[i].y = rand2D(i, (int)y) * (latticeHeight - 1); } else { particleVertices[i].y = (float)((int)(particleVertices[i].y + latticeHeight - 1) % (latticeHeight - 1)); } } else { particleVertices[i].x = 0.0f; particleVertices[i].y = rand2D(i, (int)y) * (latticeHeight - 1); } particleVertices[i].z = 0.0f; } } else { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1 || particleVertices[i].y <= 0.0f || particleVertices[i].y >= latticeHeight - 1) { if (mirrorSides) { if (particleVertices[i].x <= 0.0f || particleVertices[i].x >= latticeWidth - 1) { particleVertices[i] = glm::vec3(0, respawnIndex++, 0.0f); if (respawnIndex >= respawnMaxY) { respawnIndex = respawnMinY; } } else { particleVertices[i] = glm::vec3(x, (int)(particleVertices[i].y + latticeHeight - 1) % (latticeHeight - 1), 0.0f); } } else { particleVertices[i] = glm::vec3(0, respawnIndex++, 0.0f); if (respawnIndex >= respawnMaxY) { respawnIndex = respawnMinY; } } if (particleSystem->drawStreamlines) { for (int k = 0; k < MAX_STREAMLINE_LENGTH; k++) { particleSystem->streamLines[i * MAX_STREAMLINE_LENGTH + k] = particleVertices[i]; } } } } } streamLineCounter++; if (streamLineCounter > MAX_STREAMLINE_LENGTH) { streamLineCounter = 0; } } void LBM2D_1D_indices::updateInlets() { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; float macroDensity = 1.0f; glm::vec3 macroVelocity = inletVelocity; // let's find the equilibrium float leftTermMiddle = weightMiddle * macroDensity; float leftTermAxis = weightAxis * macroDensity; float leftTermDiagonal = weightDiagonal * macroDensity; // optimize these operations later float macroVelocityDot = glm::dot(macroVelocity, macroVelocity); float thirdTerm = 1.5f * macroVelocityDot; float middleEq = leftTermMiddle + leftTermMiddle * (-thirdTerm); // this can all be rewritten into arrays + for cycles! float dotProd = glm::dot(vRight, macroVelocity); float firstTerm = 3.0f * dotProd; float secondTerm = 4.5f * dotProd * dotProd; float rightEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTop, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float leftEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottom, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomEq = leftTermAxis + leftTermAxis * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vTopLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float topLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomLeft, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomLeftEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); dotProd = glm::dot(vBottomRight, macroVelocity); firstTerm = 3.0f * dotProd; secondTerm = 4.5f * dotProd * dotProd; float bottomRightEq = leftTermDiagonal + leftTermDiagonal * (firstTerm + secondTerm - thirdTerm); for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(0, y); backLattice[idx].adj[DIR_MIDDLE] = middleEq; backLattice[idx].adj[DIR_RIGHT] = rightEq; backLattice[idx].adj[DIR_TOP] = topEq; backLattice[idx].adj[DIR_LEFT] = leftEq; backLattice[idx].adj[DIR_BOTTOM] = bottomEq; backLattice[idx].adj[DIR_TOP_RIGHT] = topRightEq; backLattice[idx].adj[DIR_TOP_LEFT] = topLeftEq; backLattice[idx].adj[DIR_BOTTOM_LEFT] = bottomLeftEq; backLattice[idx].adj[DIR_BOTTOM_RIGHT] = bottomRightEq; for (int i = 0; i < 9; i++) { if (backLattice[idx].adj[i] < 0.0f) { backLattice[idx].adj[i] = 0.0f; } else if (backLattice[idx].adj[i] > 1.0f) { backLattice[idx].adj[i] = 1.0f; } } //velocities[idx] = macroVelocity; } } void LBM2D_1D_indices::updateColliders() { for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(x, y); if (tCol->area[idx]) { float right = backLattice[idx].adj[DIR_RIGHT]; float top = backLattice[idx].adj[DIR_TOP]; float left = backLattice[idx].adj[DIR_LEFT]; float bottom = backLattice[idx].adj[DIR_BOTTOM]; float topRight = backLattice[idx].adj[DIR_TOP_RIGHT]; float topLeft = backLattice[idx].adj[DIR_TOP_LEFT]; float bottomLeft = backLattice[idx].adj[DIR_BOTTOM_LEFT]; float bottomRight = backLattice[idx].adj[DIR_BOTTOM_RIGHT]; backLattice[idx].adj[DIR_RIGHT] = left; backLattice[idx].adj[DIR_TOP] = bottom; backLattice[idx].adj[DIR_LEFT] = right; backLattice[idx].adj[DIR_BOTTOM] = top; backLattice[idx].adj[DIR_TOP_RIGHT] = bottomLeft; backLattice[idx].adj[DIR_TOP_LEFT] = bottomRight; backLattice[idx].adj[DIR_BOTTOM_LEFT] = topRight; backLattice[idx].adj[DIR_BOTTOM_RIGHT] = topLeft; } } } } void LBM2D_1D_indices::initBuffers() { glGenVertexArrays(1, &vao); glBindVertexArray(vao); glGenBuffers(1, &vbo); glBindBuffer(GL_ARRAY_BUFFER, vbo); vector<glm::vec3> bData; for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { bData.push_back(glm::vec3(x, y, 0.0f)); } } glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * bData.size(), &bData[0], GL_STATIC_DRAW); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #ifdef DRAW_VELOCITY_ARROWS // Velocity arrows glGenVertexArrays(1, &velocityVAO); glBindVertexArray(velocityVAO); glGenBuffers(1, &velocityVBO); glBindBuffer(GL_ARRAY_BUFFER, velocityVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #endif #ifdef DRAW_PARTICLE_VELOCITY_ARROWS // Particle arrows glGenVertexArrays(1, &particleArrowsVAO); glBindVertexArray(particleArrowsVAO); glGenBuffers(1, &particleArrowsVBO); glBindBuffer(GL_ARRAY_BUFFER, particleArrowsVBO); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), (void *)0); glBindVertexArray(0); #endif } void LBM2D_1D_indices::initLattice() { float weightMiddle = 4.0f / 9.0f; float weightAxis = 1.0f / 9.0f; float weightDiagonal = 1.0f / 36.0f; for (int x = 0; x < latticeWidth; x++) { for (int y = 0; y < latticeHeight; y++) { int idx = getIdx(x, y); frontLattice[idx].adj[DIR_MIDDLE] = weightMiddle; for (int dir = 1; dir <= 4; dir++) { frontLattice[idx].adj[dir] = weightAxis; } for (int dir = 5; dir <= 8; dir++) { frontLattice[idx].adj[dir] = weightDiagonal; } } } } void LBM2D_1D_indices::precomputeRespawnRange() { respawnMinY = 0; respawnMaxY = latticeHeight; bool minSet = false; bool maxSet = false; for (int y = 0; y < latticeHeight; y++) { if (!minSet && !tCol->area[latticeWidth * y]) { respawnMinY = y; minSet = true; } if (minSet && tCol->area[latticeWidth * y]) { respawnMaxY = y - 1; maxSet = true; break; } } if (!minSet && !maxSet) { cerr << "The left wall of the scene is completely blocked off! Inlet incorrect" << endl; exit(-1); } if (!maxSet) { respawnMaxY = latticeHeight - 1; } cout << " || min respawn y = " << respawnMinY << ", max respawn y = " << respawnMaxY << endl; respawnIndex = respawnMinY; cudaMemcpyToSymbol(d_respawnIndex, &respawnIndex, sizeof(int)); cudaMemcpyToSymbol(d_respawnMinY, &respawnMinY, sizeof(int)); cudaMemcpyToSymbol(d_respawnMaxY, &respawnMaxY, sizeof(int)); } void LBM2D_1D_indices::swapLattices() { // CPU Node *tmp = frontLattice; frontLattice = backLattice; backLattice = tmp; // GPU tmp = d_frontLattice; d_frontLattice = d_backLattice; d_backLattice = tmp; } float LBM2D_1D_indices::calculateMacroscopicDensity(int x, int y) { float macroDensity = 0.0f; int idx = getIdx(x, y); for (int i = 0; i < 9; i++) { macroDensity += backLattice[idx].adj[i]; } return macroDensity; } glm::vec3 LBM2D_1D_indices::calculateMacroscopicVelocity(int x, int y, float macroDensity) { glm::vec3 macroVelocity = glm::vec3(0.0f, 0.0f, 0.0f); int idx = getIdx(x, y); macroVelocity += vRight * backLattice[idx].adj[DIR_RIGHT]; macroVelocity += vTop * backLattice[idx].adj[DIR_TOP]; macroVelocity += vLeft * backLattice[idx].adj[DIR_LEFT]; macroVelocity += vBottom * backLattice[idx].adj[DIR_BOTTOM]; macroVelocity += vTopRight * backLattice[idx].adj[DIR_TOP_RIGHT]; macroVelocity += vTopLeft * backLattice[idx].adj[DIR_TOP_LEFT]; macroVelocity += vBottomLeft * backLattice[idx].adj[DIR_BOTTOM_LEFT]; macroVelocity += vBottomRight * backLattice[idx].adj[DIR_BOTTOM_RIGHT]; macroVelocity /= macroDensity; return macroVelocity; }
8502b6b4ea9f0a630830a9db235b61d785b26ecb.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <hip/hip_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" int main(int argc, char *argv[]) { hipsolverDnHandle_t cusolverH = NULL; hipStream_t stream = NULL; hipsolverSyevjInfo_t syevj_params = NULL; const int m = 3; const int lda = m; /* * | 3.5 0.5 0 | * A = | 0.5 3.5 0 | * | 0 0 2 | */ const std::vector<double> A = {3.5, 0.5, 0, 0.5, 3.5, 0.0, 0.0, 0.0, 2.0}; const std::vector<double> lambda = {2.0, 3.0, 4.0}; std::vector<double> V(lda * m); // eigenvectors std::vector<double> W(m); // eigenvalues double *d_A = nullptr; double *d_W = nullptr; int *devInfo = nullptr; double *d_work = nullptr; int lwork = 0; int info_gpu = 0; /* configuration of syevj */ const double tol = 1.e-7; const int max_sweeps = 15; const hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR; // compute eigenvectors. const hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER; /* numerical results of syevj */ double residual = 0; int executed_sweeps = 0; printf("tol = %E, default value is machine zero \n", tol); printf("max. sweeps = %d, default value is 100\n", max_sweeps); printf("A = (matlab base-1)\n"); print_matrix(m, m, A.data(), lda); printf("=====\n"); /* step 1: create cusolver handle, bind a stream */ CUSOLVER_CHECK(hipsolverDnCreate(&cusolverH)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUSOLVER_CHECK(hipsolverDnSetStream(cusolverH, stream)); /* step 2: configuration of syevj */ CUSOLVER_CHECK(hipsolverDnCreateSyevjInfo(&syevj_params)); /* default value of tolerance is machine zero */ CUSOLVER_CHECK(hipsolverDnXsyevjSetTolerance(syevj_params, tol)); /* default value of max. sweeps is 100 */ CUSOLVER_CHECK(hipsolverDnXsyevjSetMaxSweeps(syevj_params, max_sweeps)); /* step 3: copy A to device */ CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(double) * lda * m)); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_W), sizeof(double) * m)); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&devInfo), sizeof(int))); CUDA_CHECK( hipMemcpyAsync(d_A, A.data(), sizeof(double) * lda * m, hipMemcpyHostToDevice, stream)); /* step 4: query working space of syevj */ CUSOLVER_CHECK( hipsolverDnDsyevj_bufferSize(cusolverH, jobz, uplo, m, d_A, lda, d_W, &lwork, syevj_params)); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_work), sizeof(double) * lwork)); /* step 5: compute eigen-pair */ CUSOLVER_CHECK(hipsolverDnDsyevj(cusolverH, jobz, uplo, m, d_A, lda, d_W, d_work, lwork, devInfo, syevj_params)); CUDA_CHECK( hipMemcpyAsync(V.data(), d_A, sizeof(double) * lda * m, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(W.data(), d_W, sizeof(double) * m, hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipMemcpyAsync(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost, stream)); CUDA_CHECK(hipStreamSynchronize(stream)); if (0 == info_gpu) { printf("syevj converges \n"); } else if (0 > info_gpu) { printf("%d-th parameter is wrong \n", -info_gpu); exit(1); } else { printf("WARNING: info = %d : syevj does not converge \n", info_gpu); } printf("Eigenvalue = (matlab base-1), ascending order\n"); for (int i = 0; i < m; i++) { printf("W[%d] = %E\n", i + 1, W[i]); } printf("V = (matlab base-1)\n"); print_matrix(m, m, V.data(), lda); printf("=====\n"); /* step 6: check eigenvalues */ double lambda_sup = 0; for (int i = 0; i < m; i++) { double error = fabs(lambda[i] - W[i]); lambda_sup = (lambda_sup > error) ? lambda_sup : error; } printf("|lambda - W| = %E\n", lambda_sup); CUSOLVER_CHECK(hipsolverDnXsyevjGetSweeps(cusolverH, syevj_params, &executed_sweeps)); CUSOLVER_CHECK(hipsolverDnXsyevjGetResidual(cusolverH, syevj_params, &residual)); printf("residual |A - V*W*V**H|_F = %E \n", residual); printf("number of executed sweeps = %d \n", executed_sweeps); /* free resources */ CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_W)); CUDA_CHECK(hipFree(devInfo)); CUDA_CHECK(hipFree(d_work)); CUSOLVER_CHECK(hipsolverDnDestroySyevjInfo(syevj_params)); CUSOLVER_CHECK(hipsolverDnDestroy(cusolverH)); CUDA_CHECK(hipStreamDestroy(stream)); CUDA_CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
8502b6b4ea9f0a630830a9db235b61d785b26ecb.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <vector> #include <cuda_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" int main(int argc, char *argv[]) { cusolverDnHandle_t cusolverH = NULL; cudaStream_t stream = NULL; syevjInfo_t syevj_params = NULL; const int m = 3; const int lda = m; /* * | 3.5 0.5 0 | * A = | 0.5 3.5 0 | * | 0 0 2 | */ const std::vector<double> A = {3.5, 0.5, 0, 0.5, 3.5, 0.0, 0.0, 0.0, 2.0}; const std::vector<double> lambda = {2.0, 3.0, 4.0}; std::vector<double> V(lda * m); // eigenvectors std::vector<double> W(m); // eigenvalues double *d_A = nullptr; double *d_W = nullptr; int *devInfo = nullptr; double *d_work = nullptr; int lwork = 0; int info_gpu = 0; /* configuration of syevj */ const double tol = 1.e-7; const int max_sweeps = 15; const cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; // compute eigenvectors. const cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER; /* numerical results of syevj */ double residual = 0; int executed_sweeps = 0; printf("tol = %E, default value is machine zero \n", tol); printf("max. sweeps = %d, default value is 100\n", max_sweeps); printf("A = (matlab base-1)\n"); print_matrix(m, m, A.data(), lda); printf("=====\n"); /* step 1: create cusolver handle, bind a stream */ CUSOLVER_CHECK(cusolverDnCreate(&cusolverH)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUSOLVER_CHECK(cusolverDnSetStream(cusolverH, stream)); /* step 2: configuration of syevj */ CUSOLVER_CHECK(cusolverDnCreateSyevjInfo(&syevj_params)); /* default value of tolerance is machine zero */ CUSOLVER_CHECK(cusolverDnXsyevjSetTolerance(syevj_params, tol)); /* default value of max. sweeps is 100 */ CUSOLVER_CHECK(cusolverDnXsyevjSetMaxSweeps(syevj_params, max_sweeps)); /* step 3: copy A to device */ CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(double) * lda * m)); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_W), sizeof(double) * m)); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&devInfo), sizeof(int))); CUDA_CHECK( cudaMemcpyAsync(d_A, A.data(), sizeof(double) * lda * m, cudaMemcpyHostToDevice, stream)); /* step 4: query working space of syevj */ CUSOLVER_CHECK( cusolverDnDsyevj_bufferSize(cusolverH, jobz, uplo, m, d_A, lda, d_W, &lwork, syevj_params)); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_work), sizeof(double) * lwork)); /* step 5: compute eigen-pair */ CUSOLVER_CHECK(cusolverDnDsyevj(cusolverH, jobz, uplo, m, d_A, lda, d_W, d_work, lwork, devInfo, syevj_params)); CUDA_CHECK( cudaMemcpyAsync(V.data(), d_A, sizeof(double) * lda * m, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(W.data(), d_W, sizeof(double) * m, cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaMemcpyAsync(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost, stream)); CUDA_CHECK(cudaStreamSynchronize(stream)); if (0 == info_gpu) { printf("syevj converges \n"); } else if (0 > info_gpu) { printf("%d-th parameter is wrong \n", -info_gpu); exit(1); } else { printf("WARNING: info = %d : syevj does not converge \n", info_gpu); } printf("Eigenvalue = (matlab base-1), ascending order\n"); for (int i = 0; i < m; i++) { printf("W[%d] = %E\n", i + 1, W[i]); } printf("V = (matlab base-1)\n"); print_matrix(m, m, V.data(), lda); printf("=====\n"); /* step 6: check eigenvalues */ double lambda_sup = 0; for (int i = 0; i < m; i++) { double error = fabs(lambda[i] - W[i]); lambda_sup = (lambda_sup > error) ? lambda_sup : error; } printf("|lambda - W| = %E\n", lambda_sup); CUSOLVER_CHECK(cusolverDnXsyevjGetSweeps(cusolverH, syevj_params, &executed_sweeps)); CUSOLVER_CHECK(cusolverDnXsyevjGetResidual(cusolverH, syevj_params, &residual)); printf("residual |A - V*W*V**H|_F = %E \n", residual); printf("number of executed sweeps = %d \n", executed_sweeps); /* free resources */ CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_W)); CUDA_CHECK(cudaFree(devInfo)); CUDA_CHECK(cudaFree(d_work)); CUSOLVER_CHECK(cusolverDnDestroySyevjInfo(syevj_params)); CUSOLVER_CHECK(cusolverDnDestroy(cusolverH)); CUDA_CHECK(cudaStreamDestroy(stream)); CUDA_CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
5c7f0e40c32433e0b967626b06356c5eaa8e0ad0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /*********************************************************** By Huahua Wang, the University of Minnesota, twin cities ***********************************************************/ __global__ void matsub( float* X, float* Y, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { X[i] -= Y[i]; } }
5c7f0e40c32433e0b967626b06356c5eaa8e0ad0.cu
#include "includes.h" /*********************************************************** By Huahua Wang, the University of Minnesota, twin cities ***********************************************************/ __global__ void matsub( float* X, float* Y, unsigned int size) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = idx; i < size; i += stride) { X[i] -= Y[i]; } }
3e95ff202e663ab591717a388ac9dd6269839395.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "julia.hpp" #include <assert.h> #include <complex> #include <iostream> __constant__ float c_dev[2]; __constant__ float edges[4]; __constant__ float size[2]; __constant__ int n_pixels[2]; __constant__ int max_value_dev; namespace julia { // Declare this here because we don't want to export it __global__ void julia_gpu_kernel(int *res); void julia(int x_pixels, int y_pixels, cfloat c, float left_edge, float right_edge, float bottom_edge, float top_edge, int max_value, int *res) { auto width = right_edge - left_edge; auto height = top_edge - bottom_edge; for (int x = 0; x < x_pixels; x++) { for (int y = 0; y < y_pixels; y++) { // This is not quite right - at bottom left of pixel cfloat pos = {(float)x / x_pixels * width + left_edge, (float)y / y_pixels * height + bottom_edge}; while (res[x + y * x_pixels] < max_value) { pos = julia::iter_julia(pos, c); if (std::abs(pos) >= 2) { break; } res[x+y * x_pixels] += 1; } } } } cfloat iter_julia(cfloat z_old, cfloat c) { return ::pow(z_old, 2) + c; } void julia_gpu(int x_pixels, int y_pixels, cfloat c, float left_edge, float right_edge, float bottom_edge, float top_edge, int max_value, int *res) { // INSTRUMENT! hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); // Space on device int *dev_res; assert(hipMalloc(&dev_res, x_pixels * y_pixels * sizeof(int)) == hipSuccess); float tmp_c[2] = {c.real(), c.imag()}; float tmp_edges[4] = {left_edge, right_edge, bottom_edge, top_edge}; float tmp_size[2] = {right_edge - left_edge, top_edge - bottom_edge}; int tmp_n_pixels[2] = {x_pixels, y_pixels}; assert(hipMemcpyToSymbol(c_dev, tmp_c, sizeof(tmp_c)) == hipSuccess); assert(hipMemcpyToSymbol(edges, tmp_edges, sizeof(tmp_edges)) == hipSuccess); assert(hipMemcpyToSymbol(max_value_dev, &max_value, sizeof(max_value)) == hipSuccess); assert(hipMemcpyToSymbol(n_pixels, tmp_n_pixels, sizeof(tmp_n_pixels)) == hipSuccess); assert(hipMemcpyToSymbol(size, tmp_size, sizeof(tmp_size)) == hipSuccess); // When working in more than 1d we don't do the loop. We just spin up enough blocks to cover the image. dim3 threadsPerBlock(16, 16); dim3 blocks(x_pixels / 16, y_pixels / 16); hipLaunchKernelGGL(( julia_gpu_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, dev_res); // Copy results back to the host assert(hipMemcpy(res, dev_res, x_pixels * y_pixels * sizeof(int), hipMemcpyDeviceToHost) == hipSuccess); assert(hipFree(dev_res) == hipSuccess); hipEventRecord(stop); // ^ tells us to record an event when we get here. But we can't read the time off it until we've got there // So, we synchronize on that event. hipEventSynchronize(stop); float t; hipEventElapsedTime(&t, start, stop); std::cout << "Time taken: " << t << "ms" << std::endl; } __global__ void julia_gpu_kernel(int *res) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * n_pixels[0]; float pos_x, pos_y, pos_x_tmp; if ((x < n_pixels[0]) && (y <= n_pixels[1])) { int count = 0; // This is real/imag in the normal version // The first part is the fraction along the image pos_x = (float)x / n_pixels[0] * size[0] + edges[0]; pos_y = (float)y / n_pixels[1] * size[1] + edges[2]; while(count < max_value_dev) { pos_x_tmp = pos_x * pos_x - pos_y * pos_y + c_dev[0]; pos_y = 2 * pos_x * pos_y + c_dev[1]; pos_x = pos_x_tmp; if (pos_x * pos_x + pos_y * pos_y >= 4) { break; } count++; } res[offset] = count; } } } // namespace julia
3e95ff202e663ab591717a388ac9dd6269839395.cu
#include "julia.hpp" #include <assert.h> #include <complex> #include <iostream> __constant__ float c_dev[2]; __constant__ float edges[4]; __constant__ float size[2]; __constant__ int n_pixels[2]; __constant__ int max_value_dev; namespace julia { // Declare this here because we don't want to export it __global__ void julia_gpu_kernel(int *res); void julia(int x_pixels, int y_pixels, cfloat c, float left_edge, float right_edge, float bottom_edge, float top_edge, int max_value, int *res) { auto width = right_edge - left_edge; auto height = top_edge - bottom_edge; for (int x = 0; x < x_pixels; x++) { for (int y = 0; y < y_pixels; y++) { // This is not quite right - at bottom left of pixel cfloat pos = {(float)x / x_pixels * width + left_edge, (float)y / y_pixels * height + bottom_edge}; while (res[x + y * x_pixels] < max_value) { pos = julia::iter_julia(pos, c); if (std::abs(pos) >= 2) { break; } res[x+y * x_pixels] += 1; } } } } cfloat iter_julia(cfloat z_old, cfloat c) { return std::pow(z_old, 2) + c; } void julia_gpu(int x_pixels, int y_pixels, cfloat c, float left_edge, float right_edge, float bottom_edge, float top_edge, int max_value, int *res) { // INSTRUMENT! cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Space on device int *dev_res; assert(cudaMalloc(&dev_res, x_pixels * y_pixels * sizeof(int)) == cudaSuccess); float tmp_c[2] = {c.real(), c.imag()}; float tmp_edges[4] = {left_edge, right_edge, bottom_edge, top_edge}; float tmp_size[2] = {right_edge - left_edge, top_edge - bottom_edge}; int tmp_n_pixels[2] = {x_pixels, y_pixels}; assert(cudaMemcpyToSymbol(c_dev, tmp_c, sizeof(tmp_c)) == cudaSuccess); assert(cudaMemcpyToSymbol(edges, tmp_edges, sizeof(tmp_edges)) == cudaSuccess); assert(cudaMemcpyToSymbol(max_value_dev, &max_value, sizeof(max_value)) == cudaSuccess); assert(cudaMemcpyToSymbol(n_pixels, tmp_n_pixels, sizeof(tmp_n_pixels)) == cudaSuccess); assert(cudaMemcpyToSymbol(size, tmp_size, sizeof(tmp_size)) == cudaSuccess); // When working in more than 1d we don't do the loop. We just spin up enough blocks to cover the image. dim3 threadsPerBlock(16, 16); dim3 blocks(x_pixels / 16, y_pixels / 16); julia_gpu_kernel<<<blocks, threadsPerBlock>>>(dev_res); // Copy results back to the host assert(cudaMemcpy(res, dev_res, x_pixels * y_pixels * sizeof(int), cudaMemcpyDeviceToHost) == cudaSuccess); assert(cudaFree(dev_res) == cudaSuccess); cudaEventRecord(stop); // ^ tells us to record an event when we get here. But we can't read the time off it until we've got there // So, we synchronize on that event. cudaEventSynchronize(stop); float t; cudaEventElapsedTime(&t, start, stop); std::cout << "Time taken: " << t << "ms" << std::endl; } __global__ void julia_gpu_kernel(int *res) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * n_pixels[0]; float pos_x, pos_y, pos_x_tmp; if ((x < n_pixels[0]) && (y <= n_pixels[1])) { int count = 0; // This is real/imag in the normal version // The first part is the fraction along the image pos_x = (float)x / n_pixels[0] * size[0] + edges[0]; pos_y = (float)y / n_pixels[1] * size[1] + edges[2]; while(count < max_value_dev) { pos_x_tmp = pos_x * pos_x - pos_y * pos_y + c_dev[0]; pos_y = 2 * pos_x * pos_y + c_dev[1]; pos_x = pos_x_tmp; if (pos_x * pos_x + pos_y * pos_y >= 4) { break; } count++; } res[offset] = count; } } } // namespace julia
f2cdefc4c9ef840a4b9b83c50eac96feb9c4d954.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <cstdlib> class Unified { public: void *operator new(size_t len) { void *ptr; hipMallocManaged(&ptr, len); return ptr; } void operator delete(void *ptr) { hipFree(ptr); } void* operator new[] (std::size_t size) { void *ptr; hipMallocManaged(&ptr,size); return ptr; } void operator delete[] (void* ptr) { hipFree(ptr); } }; class publisher : public Unified { public: float value; __device__ void setValue(float v) { value=v; } }; __global__ void publish_msg(publisher *topic,float num) { int i=threadIdx.x + blockIdx.x*blockDim.x; topic[i].setValue(num); } class subscriber : public Unified { public: float value; __device__ void getValue(float v) { value=v; } }; /* GPU kernel: set an array of topic to a value */ __host__ void sub_msg(publisher *topic,int i) { std::cout<<"Topic["<<i<<"] = "<<topic[i].value<<"\n"; } int main(int argc,char *argv[]) { int n=1; int i=0; publisher *topic=new publisher[n]; hipLaunchKernelGGL(( publish_msg), dim3(1),dim3(1), 0, 0, topic,6.9); /* GPU */ //std::cout<<"Topic["<<i<<"] = "<<topic[i].value<<"\n"; hipDeviceSynchronize(); sub_msg(topic,i); i++; publisher *topic1=new publisher[n]; hipLaunchKernelGGL(( publish_msg), dim3(1),dim3(2), 0, 0, topic1,7.7); //std::cout<<"Topic["<<i<<"] = "<<topic1[i].value<<"\n"; hipDeviceSynchronize(); //sub_msg(topic1,i); return 0; }
f2cdefc4c9ef840a4b9b83c50eac96feb9c4d954.cu
#include <iostream> #include <cuda.h> #include <cstdlib> class Unified { public: void *operator new(size_t len) { void *ptr; cudaMallocManaged(&ptr, len); return ptr; } void operator delete(void *ptr) { cudaFree(ptr); } void* operator new[] (std::size_t size) { void *ptr; cudaMallocManaged(&ptr,size); return ptr; } void operator delete[] (void* ptr) { cudaFree(ptr); } }; class publisher : public Unified { public: float value; __device__ void setValue(float v) { value=v; } }; __global__ void publish_msg(publisher *topic,float num) { int i=threadIdx.x + blockIdx.x*blockDim.x; topic[i].setValue(num); } class subscriber : public Unified { public: float value; __device__ void getValue(float v) { value=v; } }; /* GPU kernel: set an array of topic to a value */ __host__ void sub_msg(publisher *topic,int i) { std::cout<<"Topic["<<i<<"] = "<<topic[i].value<<"\n"; } int main(int argc,char *argv[]) { int n=1; int i=0; publisher *topic=new publisher[n]; publish_msg<<<1,1>>>(topic,6.9); /* GPU */ //std::cout<<"Topic["<<i<<"] = "<<topic[i].value<<"\n"; cudaDeviceSynchronize(); sub_msg(topic,i); i++; publisher *topic1=new publisher[n]; publish_msg<<<1,2>>>(topic1,7.7); //std::cout<<"Topic["<<i<<"] = "<<topic1[i].value<<"\n"; cudaDeviceSynchronize(); //sub_msg(topic1,i); return 0; }
708d85eaa2df31291d726964142d2f44ed99fc79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/softmax_kernel_util.h" #include "oneflow/core/kernel/kernel_util.cuh" #include "oneflow/core/ndarray/ndarray_util.h" #include "oneflow/user/kernels/math_unary_elementwise_func.h" namespace oneflow { namespace { constexpr int64_t kSoftmaxGpuBlockSize = 128; int64_t GetSoftmaxBlockSize() { return kSoftmaxGpuBlockSize; } int64_t GetSoftmaxNumBlocks(const int64_t num_instances) { return ::min(static_cast<int32_t>(num_instances), kCudaMaxBlocksNum); } template<typename T> struct ComputeDataType { using type = T; }; template<> struct ComputeDataType<half> { using type = float; }; template<typename T> __global__ void BroadcastSubExpGpuImpl(const int64_t num_instances, const int64_t num_classes, const T* x, const T* y, T* z) { using ComputeType = typename ComputeDataType<T>::type; const int64_t tid = threadIdx.x; __shared__ ComputeType row_sub; for (int64_t row = blockIdx.x; row < num_instances; row += gridDim.x) { const int64_t row_offset = row * num_classes; const T* x_row = x + row_offset; T* z_row = z + row_offset; if (tid == 0) { row_sub = static_cast<ComputeType>(y[row]); } __syncthreads(); const ComputeType row_sub_t = row_sub; for (int64_t col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) { z_row[col] = static_cast<T>( ExpFunctor<ComputeType>::Forward(static_cast<ComputeType>(x_row[col]) - row_sub_t)); } } } template<typename T> int64_t GetMinNumClasses() { return 32; } template<typename T> void BroadcastSubExpGpu(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const T* x, const T* y, T* z) { hipLaunchKernelGGL(( BroadcastSubExpGpuImpl), dim3(GetSoftmaxNumBlocks(num_instances)), dim3(GetSoftmaxBlockSize()), 0, ctx->cuda_stream(), num_instances, num_classes, x, y, z); } template<> void BroadcastSubExpGpu<float16>(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const float16* x, const float16* y, float16* z) { BroadcastSubExpGpu<half>(ctx, num_instances, num_classes, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(z)); } } // namespace template<SoftmaxAlgorithm softmax_algorithm, typename T> struct SoftmaxKernelUtil<DeviceType::kGPU, softmax_algorithm, T> { static void ComputeProb(DeviceCtx* ctx, const int64_t n, const int64_t w, const T* in, T* prob, void* temp_storage, const size_t temp_storage_bytes) { auto Val = NdarrayUtil<DeviceType::kGPU, T>::GetValNdarrayBuilder(); auto Var = NdarrayUtil<DeviceType::kGPU, T>::GetVarNdarrayBuilder(); const size_t min_temp_storage_bytes = SoftmaxComputeProbTempStorageSize<T, softmax_algorithm>(n, w); CHECK_GE(temp_storage_bytes, min_temp_storage_bytes); const size_t reduce_temp_storage_bytes = SoftmaxReduceOperationStorageSize<T>(n, w); T* reduce_storage = reinterpret_cast<T*>(temp_storage); auto reduce_storage_var = Var({static_cast<int64_t>(reduce_temp_storage_bytes / sizeof(T))}, reduce_storage); T* tmp = reinterpret_cast<T*>(reinterpret_cast<unsigned char*>(temp_storage) + reduce_temp_storage_bytes); // max | tmp[i] = Max_j(in[i][j]) NdarrayUtil<DeviceType::kGPU, T>::ReduceMax(ctx, Var({n, 1}, tmp), Val({n, w}, in), reduce_storage_var); // sub | prob[i][j] = in[i][j] - tmp[i] // exp | prob[i][j] = exp(prob[i][j]) if (w >= GetMinNumClasses<T>()) { BroadcastSubExpGpu(ctx, n, w, in, tmp, prob); } else { NdarrayUtil<DeviceType::kGPU, T>::BroadcastSub(ctx, Var({n, w}, prob), Val({n, w}, in), Val({n, 1}, tmp)); NdarrayUtil<DeviceType::kGPU, T>::InplaceExp(ctx, Var({n, w}, prob)); } // sum | tmp[i] = Sum_j(prob[i][j]) NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(ctx, Var({n, 1}, tmp), Val({n, w}, prob), reduce_storage_var); // div | prob[i][j] /= tmp[i] NdarrayUtil<DeviceType::kGPU, T>::InplaceBroadcastDiv(ctx, Var({n, w}, prob), Val({n, 1}, tmp)); } static void ComputeDiff(DeviceCtx* ctx, const int64_t n, const int64_t w, const T* dy, const T* out, T* dx, void* temp_storage, const size_t temp_storage_bytes) { auto Val = NdarrayUtil<DeviceType::kGPU, T>::GetValNdarrayBuilder(); auto Var = NdarrayUtil<DeviceType::kGPU, T>::GetVarNdarrayBuilder(); const size_t min_temp_storage_bytes = SoftmaxComputeProbTempStorageSize<T, softmax_algorithm>(n, w); CHECK_GE(temp_storage_bytes, min_temp_storage_bytes); const size_t reduce_temp_storage_bytes = SoftmaxReduceOperationStorageSize<T>(n, w); T* reduce_storage = reinterpret_cast<T*>(temp_storage); auto reduce_storage_var = Var({static_cast<int64_t>(reduce_temp_storage_bytes / sizeof(T))}, reduce_storage); T* sum_vec = reinterpret_cast<T*>(reinterpret_cast<unsigned char*>(temp_storage) + reduce_temp_storage_bytes); // it's safe to use dx as tmp // dot product | get dot product sum_vec[i] from out[i] * dy[i] T* tmp = dx; NdarrayUtil<DeviceType::kGPU, T>::Mul(ctx, Var({n * w}, tmp), Val({n * w}, out), Val({n * w}, dy)); NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(ctx, Var({n, 1}, sum_vec), Val({n, w}, tmp), reduce_storage_var); // sub | dx[i][j] = dy[i][j] - sum_vec[i] NdarrayUtil<DeviceType::kGPU, T>::BroadcastSub(ctx, Var({n, w}, dx), Val({n, w}, dy), Val({n, 1}, sum_vec)); // elementwise multiplication | dx[i][j] *= out[i][j] NdarrayUtil<DeviceType::kGPU, T>::InplaceMul(ctx, Var({n * w}, dx), Val({n * w}, out)); } }; #define INSTANTIATE_SOFTMAX_KERNEL_UTIL(data_type) \ template struct SoftmaxKernelUtil<DeviceType::kGPU, SoftmaxAlgorithm::kSoftmax, data_type>; INSTANTIATE_SOFTMAX_KERNEL_UTIL(float16) INSTANTIATE_SOFTMAX_KERNEL_UTIL(float) INSTANTIATE_SOFTMAX_KERNEL_UTIL(double) #undef INSTANTIATE_SOFTMAX_KERNEL_UTIL } // namespace oneflow
708d85eaa2df31291d726964142d2f44ed99fc79.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/user/kernels/softmax_kernel_util.h" #include "oneflow/core/kernel/kernel_util.cuh" #include "oneflow/core/ndarray/ndarray_util.h" #include "oneflow/user/kernels/math_unary_elementwise_func.h" namespace oneflow { namespace { constexpr int64_t kSoftmaxGpuBlockSize = 128; int64_t GetSoftmaxBlockSize() { return kSoftmaxGpuBlockSize; } int64_t GetSoftmaxNumBlocks(const int64_t num_instances) { return std::min(static_cast<int32_t>(num_instances), kCudaMaxBlocksNum); } template<typename T> struct ComputeDataType { using type = T; }; template<> struct ComputeDataType<half> { using type = float; }; template<typename T> __global__ void BroadcastSubExpGpuImpl(const int64_t num_instances, const int64_t num_classes, const T* x, const T* y, T* z) { using ComputeType = typename ComputeDataType<T>::type; const int64_t tid = threadIdx.x; __shared__ ComputeType row_sub; for (int64_t row = blockIdx.x; row < num_instances; row += gridDim.x) { const int64_t row_offset = row * num_classes; const T* x_row = x + row_offset; T* z_row = z + row_offset; if (tid == 0) { row_sub = static_cast<ComputeType>(y[row]); } __syncthreads(); const ComputeType row_sub_t = row_sub; for (int64_t col = tid; col < num_classes; col += kSoftmaxGpuBlockSize) { z_row[col] = static_cast<T>( ExpFunctor<ComputeType>::Forward(static_cast<ComputeType>(x_row[col]) - row_sub_t)); } } } template<typename T> int64_t GetMinNumClasses() { return 32; } template<typename T> void BroadcastSubExpGpu(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const T* x, const T* y, T* z) { BroadcastSubExpGpuImpl<<<GetSoftmaxNumBlocks(num_instances), GetSoftmaxBlockSize(), 0, ctx->cuda_stream()>>>(num_instances, num_classes, x, y, z); } template<> void BroadcastSubExpGpu<float16>(DeviceCtx* ctx, const int64_t num_instances, const int64_t num_classes, const float16* x, const float16* y, float16* z) { BroadcastSubExpGpu<half>(ctx, num_instances, num_classes, reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(z)); } } // namespace template<SoftmaxAlgorithm softmax_algorithm, typename T> struct SoftmaxKernelUtil<DeviceType::kGPU, softmax_algorithm, T> { static void ComputeProb(DeviceCtx* ctx, const int64_t n, const int64_t w, const T* in, T* prob, void* temp_storage, const size_t temp_storage_bytes) { auto Val = NdarrayUtil<DeviceType::kGPU, T>::GetValNdarrayBuilder(); auto Var = NdarrayUtil<DeviceType::kGPU, T>::GetVarNdarrayBuilder(); const size_t min_temp_storage_bytes = SoftmaxComputeProbTempStorageSize<T, softmax_algorithm>(n, w); CHECK_GE(temp_storage_bytes, min_temp_storage_bytes); const size_t reduce_temp_storage_bytes = SoftmaxReduceOperationStorageSize<T>(n, w); T* reduce_storage = reinterpret_cast<T*>(temp_storage); auto reduce_storage_var = Var({static_cast<int64_t>(reduce_temp_storage_bytes / sizeof(T))}, reduce_storage); T* tmp = reinterpret_cast<T*>(reinterpret_cast<unsigned char*>(temp_storage) + reduce_temp_storage_bytes); // max | tmp[i] = Max_j(in[i][j]) NdarrayUtil<DeviceType::kGPU, T>::ReduceMax(ctx, Var({n, 1}, tmp), Val({n, w}, in), reduce_storage_var); // sub | prob[i][j] = in[i][j] - tmp[i] // exp | prob[i][j] = exp(prob[i][j]) if (w >= GetMinNumClasses<T>()) { BroadcastSubExpGpu(ctx, n, w, in, tmp, prob); } else { NdarrayUtil<DeviceType::kGPU, T>::BroadcastSub(ctx, Var({n, w}, prob), Val({n, w}, in), Val({n, 1}, tmp)); NdarrayUtil<DeviceType::kGPU, T>::InplaceExp(ctx, Var({n, w}, prob)); } // sum | tmp[i] = Sum_j(prob[i][j]) NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(ctx, Var({n, 1}, tmp), Val({n, w}, prob), reduce_storage_var); // div | prob[i][j] /= tmp[i] NdarrayUtil<DeviceType::kGPU, T>::InplaceBroadcastDiv(ctx, Var({n, w}, prob), Val({n, 1}, tmp)); } static void ComputeDiff(DeviceCtx* ctx, const int64_t n, const int64_t w, const T* dy, const T* out, T* dx, void* temp_storage, const size_t temp_storage_bytes) { auto Val = NdarrayUtil<DeviceType::kGPU, T>::GetValNdarrayBuilder(); auto Var = NdarrayUtil<DeviceType::kGPU, T>::GetVarNdarrayBuilder(); const size_t min_temp_storage_bytes = SoftmaxComputeProbTempStorageSize<T, softmax_algorithm>(n, w); CHECK_GE(temp_storage_bytes, min_temp_storage_bytes); const size_t reduce_temp_storage_bytes = SoftmaxReduceOperationStorageSize<T>(n, w); T* reduce_storage = reinterpret_cast<T*>(temp_storage); auto reduce_storage_var = Var({static_cast<int64_t>(reduce_temp_storage_bytes / sizeof(T))}, reduce_storage); T* sum_vec = reinterpret_cast<T*>(reinterpret_cast<unsigned char*>(temp_storage) + reduce_temp_storage_bytes); // it's safe to use dx as tmp // dot product | get dot product sum_vec[i] from out[i] * dy[i] T* tmp = dx; NdarrayUtil<DeviceType::kGPU, T>::Mul(ctx, Var({n * w}, tmp), Val({n * w}, out), Val({n * w}, dy)); NdarrayUtil<DeviceType::kGPU, T>::ReduceSum(ctx, Var({n, 1}, sum_vec), Val({n, w}, tmp), reduce_storage_var); // sub | dx[i][j] = dy[i][j] - sum_vec[i] NdarrayUtil<DeviceType::kGPU, T>::BroadcastSub(ctx, Var({n, w}, dx), Val({n, w}, dy), Val({n, 1}, sum_vec)); // elementwise multiplication | dx[i][j] *= out[i][j] NdarrayUtil<DeviceType::kGPU, T>::InplaceMul(ctx, Var({n * w}, dx), Val({n * w}, out)); } }; #define INSTANTIATE_SOFTMAX_KERNEL_UTIL(data_type) \ template struct SoftmaxKernelUtil<DeviceType::kGPU, SoftmaxAlgorithm::kSoftmax, data_type>; INSTANTIATE_SOFTMAX_KERNEL_UTIL(float16) INSTANTIATE_SOFTMAX_KERNEL_UTIL(float) INSTANTIATE_SOFTMAX_KERNEL_UTIL(double) #undef INSTANTIATE_SOFTMAX_KERNEL_UTIL } // namespace oneflow
810ab844b30af4997384d47a6dd1ec11d5a1ccdd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> // includes cuda.h and hip/hip_runtime_api.h #include "spray_k.cuh" #include<helper_cuda.h> #include<helper_math.h> #include "utility.h" #include "tables.h" __constant__ FlipConstant dparam; __constant__ int NX; __constant__ int NY; __constant__ int NZ; __constant__ int NXMC; __constant__ int NYMC; __constant__ int NZMC; texture<uint, 1, hipReadModeElementType> edgeTex; texture<uint, 1, hipReadModeElementType> triTex; texture<uint, 1, hipReadModeElementType> numVertsTex; __device__ float racc = 0.; __device__ float wacc = 0.; __device__ float3 pacc; __device__ float sradiusInv; void copyparamtoGPU(FlipConstant hparam) { checkCudaErrors(hipMemcpyToSymbol(dparam, &hparam, sizeof(FlipConstant))); } void copyNXNYNZtoGPU(int nx, int ny, int nz) { checkCudaErrors(hipMemcpyToSymbol(NX, &nx, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NY, &ny, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NZ, &nz, sizeof(int))); } void copyNXNYNZtoGPU_MC(int nx, int ny, int nz) { checkCudaErrors(hipMemcpyToSymbol(NXMC, &nx, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NYMC, &ny, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(NZMC, &nz, sizeof(int))); } __device__ inline void getijk(int &i, int &j, int &k, int &idx) { i = idx / (NZ*NY); j = idx / NZ%NY; k = idx%NZ; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos) { pos = (pos - dparam.gmin) / dparam.cellsize; i = (pos.x >= 0 && pos.x<NX) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<NY) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<NZ) ? ((int)pos.z) : 0; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos, int w, int h, int d, float dx) { pos = (pos - dparam.gmin) / dx; i = (pos.x >= 0 && pos.x<w) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<h) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<d) ? ((int)pos.z) : 0; } __device__ inline int getidx(int i, int j, int k) { return (i*NZ*NY + j*NZ + k); } __device__ inline int getidx(int i, int j, int k, int w, int h, int d) { return (i*h*d + j*d + k); } __device__ inline float getRfromMass(float m) { return pow(m*0.75f / M_PI / dparam.waterrho, 0.333333); } __device__ inline float getMassfromR(float r) { return dparam.waterrho*M_PI*4.0 / 3 * r*r*r; } // __global__ void cptdivergence(farray outdiv, farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); if (mark[idx] == TYPEFLUID) div = (ux(i + 1, j, k) - ux(i, j, k) + uy(i, j + 1, k) - uy(i, j, k) + uz(i, j, k + 1) - uz(i, j, k)) / h; outdiv[idx] = div; } } __device__ inline int clampidx(int i, int j, int k) { i = max(0, min(i, NX - 1)); j = max(0, min(j, NY - 1)); k = max(0, min(k, NZ - 1)); return (i*NZ*NY + j*NZ + k); } __device__ inline float trilinear(farray u, float x, float y, float z, int w, int h, int d) { x = fmaxf(0.0f, fminf(x, w)); y = fmaxf(0.0f, fminf(y, h)); z = fmaxf(0.0f, fminf(z, d)); int i = fminf(x, w - 2); int j = fminf(y, h - 2); int k = fminf(z, d - 2); return (k + 1 - z)*((j + 1 - y)*((i + 1 - x)*u(i, j, k) + (x - i)*u(i + 1, j, k)) + (y - j)*((i + 1 - x)*u(i, j + 1, k) + (x - i)*u(i + 1, j + 1, k))) + (z - k)*((j + 1 - y)*((i + 1 - x)*u(i, j, k + 1) + (x - i)*u(i + 1, j, k + 1)) + (y - j)*((i + 1 - x)*u(i, j + 1, k + 1) + (x - i)*u(i + 1, j + 1, k + 1))); } __device__ float3 getVectorFromGrid(float3 pos, farray phigrax, farray phigray, farray phigraz) { float3 res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //ux,uy,uz(staggered grid) res.x = trilinear(phigrax, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.y = trilinear(phigray, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.z = trilinear(phigraz, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } __device__ float getScaleFromFrid(float3 pos, farray phi) { float res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //ux,uy,uz(staggered grid) res = trilinear(phi, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } //Jacobi iteration: Ax=b //todo: check this function and maybe get another solver. __global__ void JacobiIter(farray outp, farray p, farray b, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float resp = 0, h = dparam.cellsize.x; float p1, p2, p3, p4, p5, p6; float p0 = p[idx]; int i, j, k; if (mark[idx] == TYPEFLUID) { getijk(i, j, k, idx); p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : p(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : p(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : p(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : p(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : p(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : p(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 - h*h*b(i, j, k)) / 6.0f; } outp[idx] = resp; } } __global__ void setPressBoundary(farray press) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0) press[idx] = press(i + 1, j, k); if (j == 0) press[idx] = press(i, j + 1, k); if (k == 0) press[idx] = press(i, j, k + 1); if (i == NX - 1) press[idx] = press(i - 1, j, k); if (j == NY - 1) press[idx] = press(i, j - 1, k); if (k == NZ - 1) press[idx] = press(i, j, k - 1); } } // __global__ void subGradPress(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } __device__ float3 getParticleVelFromGrid(float3 pos, farray ux, farray uy, farray uz) { float3 vel; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //ux,uy,uz(staggered grid) vel.x = trilinear(ux, x, y - 0.5f, z - 0.5f, NX + 1, NY, NZ); vel.y = trilinear(uy, x - 0.5f, y, z - 0.5f, NX, NY + 1, NZ); vel.z = trilinear(uz, x - 0.5f, y - 0.5f, z, NX, NY, NZ + 1); return vel; } __global__ void mapvelg2p_flip(float3 *ppos, float3 *vel, char* parflag, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = getParticleVelFromGrid(ipos, ux, uy, uz); vel[idx] += gvel; } } __device__ inline float sharp_kernel(float r2, float h) { return fmax(h*h / fmax(r2, 0.0001f) - 1.0f, 0.0f); } __global__ void mapvelp2g_slow(float3 *pos, float3 *vel, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float w, weight, RE = 1.4, dis2, usum; float3 gpos; float scale = 1 / dparam.cellsize.x; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = 0; getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].x; } usum = (weight>0) ? (usum / weight) : 0.0f; ux(i, j, k) = usum; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot((pos[p] * scale) - gpos, (pos[p] * scale) - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].y; } usum = (weight>0) ? (usum / weight) : 0.0f; uy(i, j, k) = usum; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].z; } usum = (weight>0.00001) ? (usum / weight) : 0.0f; uz(i, j, k) = usum; } } __device__ inline bool verifycellidx(int i, int j, int k) { if (i<0 || i>NX - 1 || j<0 || j>NY - 1 || k<0 || k>NZ - 1) return false; return true; } __device__ inline bool verifycellidx(int i, int j, int k, int w, int h, int d) { if (i<0 || i>w - 1 || j<0 || j>h - 1 || k<0 || k>d - 1) return false; return true; } __global__ void addgravityforce_k(float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEFLUID || parflag[idx] == TYPESOLID) vel[idx] += dt*dparam.gravity; } } __global__ void addbuoyancyforce_k(float dheight, float3 *pos, float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIR) vel[idx] -= dt*dparam.gravity * 1.1f; //todo: else if (parflag[idx] == TYPEAIRSOLO) vel[idx] -= dt*dparam.gravity * 1.1f; else if (parflag[idx] == TYPESOLID) vel[idx] -= dt*dparam.gravity * 0.55f; // else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void addbuoyancyforce_vel(float velMax, float3 *pos, float3 *vel, char* parflag, int pnum, float dt, float buoyanceRateAir, float buoyanceRateSolo) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { float rate = fmax(velMax - vel[idx].z, 0.0f) / velMax; if (parflag[idx] == TYPEAIR) vel[idx].z -= dt*dparam.gravity.z * rate * buoyanceRateAir; //todo: else if (parflag[idx] == TYPEAIRSOLO) vel[idx].z -= dt*dparam.gravity.z *rate* buoyanceRateSolo; else if (parflag[idx] == TYPESOLID) vel[idx].z += dt*dparam.gravity.z * 0.1f;//0.55f; // else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void advectparticle(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); //vel[idx] += dt*dparam.gravity; ipos += gvel*dt; if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //check boundary ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, ipos.z); if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void advectparticle_RK2(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint = getParticleVelFromGrid(midpoint, ux, uy, uz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back if (parflag[idx] != TYPESOLID) { pvel[idx] = ivel; ppos[idx] = ipos; } else pvel[idx] = ivel; } } __global__ void flipAirVacuum(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == TYPEVACUUM) mark[idx] = TYPEAIR; } } __global__ void markair(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { mark[idx] = TYPEAIR; } } __global__ void markforsmoke(charray mark, farray spraydense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { /* if(spraydense[idx]>0 )*/ mark[idx] = TYPEFLUID; } } __global__ void markfluid(charray mark, float3 *pos, char *parflag, int pnum) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { int i, j, k; //todo: ???? Should spray particle count??? or should we have a more accurate mark method. // if( parflag[idx]==TYPEFLUID) { getijkfrompos(i, j, k, pos[idx]); mark(i, j, k) = TYPEFLUID; // } } } //fluid particle __global__ void markfluid_dense(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int fluidParCntPerGridThres) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) cntair++; } } if (cntfluidsolid == 0 && cntair == 0) mark[idx] = TYPEVACUUM; else if (cntfluidsolid>cntair) mark[idx] = TYPEFLUID; else mark[idx] = TYPEAIR; } } __global__ void markBoundaryCell(charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY; } } __global__ void setgridcolor_k(float* color, ECOLORMODE mode, farray p, farray ux, farray uy, farray uz, farray div, farray phi, charray mark, farray ls, farray tp, float sigma, float temperatureMax, float temperatureMin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float3 rescolor = make_float3(0.0); int cellindex = NY / 2; if (mode == COLOR_PRESS) { if (j != cellindex || p[idx] == 0) rescolor = make_float3(0, 0, 1); else if (p[idx]>0) rescolor = make_float3(0, 1, 0); else if (p[idx]<0) rescolor = make_float3(1, 0, 0); //rescolor = mapColorBlue2Red( 30000*abs(p[idx]) ); } else if (mode == COLOR_UX) { if (j != cellindex || ux(i + 1, j, k) + ux(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(ux(i + 1, j, k) + ux(i, j, k))); } else if (mode == COLOR_UY) { if (j != cellindex || uy(i, j + 1, k) + uy(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(uy(i, j + 1, k) + uy(i, j, k))); } else if (mode == COLOR_UZ) { if (j != cellindex/*||uz(i,j,k+1)+uz(i,j,k)<0*/) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(5 * abs(uz(i, j, k))); } else if (mode == COLOR_DIV) { if (j != cellindex || div[idx] == 0) rescolor = make_float3(0, 0, 1); else if (div[idx]>0) rescolor = make_float3(0, 1, 0); else if (div[idx]<0) rescolor = make_float3(1, 1, 0); } else if (mode == COLOR_PHI) { if (phi[idx]>3 * NX - 1 || j != cellindex) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5f + phi[idx]); } else if (mode == COLOR_MARK) { if (j != cellindex) rescolor = make_float3(0, 0, 1); else { if (mark[idx] == TYPEAIR) rescolor = make_float3(0, 1, 0); else if (mark[idx] == TYPEFLUID) rescolor = make_float3(1, 0, 0); else if (mark[idx] == TYPEVACUUM) rescolor = make_float3(1, 1, 0); else if (mark[idx] == TYPEBOUNDARY) rescolor = make_float3(0, 1, 1); else rescolor = make_float3(0, 0, 1); //rescolor = mapColorBlue2Red( (int)(mark[idx])+1.0f ) ; } } else if (mode == COLOR_LS) { if (j == cellindex && ls[idx]>0) rescolor = mapColorBlue2Red(abs(ls[idx] / dparam.cellsize.x)); else rescolor = make_float3(0, 0, 1); } else if (mode == COLOR_TP) { if (j != cellindex || i == 0 || i == NX - 1 || k == 0 || k == NZ - 1) rescolor = make_float3(0, 0, 1); else // rescolor = mapColorBlue2Red( abs(tp[idx]*dparam.cellsize.x*5/sigma) ); //rescolor = mapColorBlue2Red( abs(tp[idx]-353)/5.0f ); rescolor = mapColorBlue2Red((tp[idx] - temperatureMin) / (temperatureMax - temperatureMin)*6.0f); } color[idx * 3] = rescolor.x; color[idx * 3 + 1] = rescolor.y; color[idx * 3 + 2] = rescolor.z; } } __host__ __device__ inline float3 mapColorBlue2Red(float v) { float3 color; if (v<0) return make_float3(0.0f, 0.0f, 1.0f); int ic = (int)v; float f = v - ic; switch (ic) { case 0: { color.x = 0; color.y = f / 2; color.z = 1; } break; case 1: { color.x = 0; color.y = f / 2 + 0.5f; color.z = 1; } break; case 2: { color.x = f / 2; color.y = 1; color.z = 1 - f / 2; } break; case 3: { color.x = f / 2 + 0.5f; color.y = 1; color.z = 0.5f - f / 2; } break; case 4: { color.x = 1; color.y = 1.0f - f / 2; color.z = 0; } break; case 5: { color.x = 1; color.y = 0.5f - f / 2; color.z = 0; } break; default: { color.x = 1; color.y = 0; color.z = 0; } break; } return color; } __global__ void initphi(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) phi[idx] = -0.5; else phi[idx] = NX * 3; } } __global__ void initSolidPhi(farray phi, uint *gridstart, uint *gridend, char *pflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { bool flag = false; uint start = gridstart[idx]; if (start != CELL_UNDEF) { for (; start<gridend[idx]; start++) { if (pflag[start] == TYPESOLID) flag = true; } } if (flag) phi[idx] = -0.5f; else phi[idx] = 3 * NX; } } __device__ void solvedistance(float a, float b, float c, float &x) { float d = fmin(a, fmin(b, c)) + 1; if (d>fmax(a, fmax(b, c))) { d = (a + b + c + sqrt(3 - (a - b)*(a - b) - (a - c)*(a - c) - (b - c)*(b - c))) / 3; } if (d<x) x = d; } __global__ void sweepphi(farray phi) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepphibytype(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) return; int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepu(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray phi, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; // if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ((mark(i, j, k) == TYPEAIR && mark(i - 1, j, k) == TYPEAIR) || (mark(i, j, k) == TYPEBOUNDARY && mark(i - 1, j, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ - 1) continue; wx = -di*(phi(i, j, k) - phi(i - 1, j, k)); if (wx<0) continue; wy = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j + dj, k) - phi(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j, k + dk) - phi(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ((mark(i, j, k) == TYPEAIR && mark(i, j - 1, k) == TYPEAIR) || (mark(i, j, k) == TYPEBOUNDARY && mark(i, j - 1, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(phi(i, j, k) - phi(i, j - 1, k)); if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j - 1, k) - phi(i + di, j, k) - phi(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (phi(i, j, k) + phi(i, j - 1, k) - phi(i, j, k + dk) - phi(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ((mark(i, j, k) == TYPEAIR && mark(i, j, k - 1) == TYPEAIR) || (mark(i, j, k) == TYPEBOUNDARY && mark(i, j, k - 1) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(phi(i, j, k) - phi(i, j, k - 1)); if (wz<0) continue; wy = (phi(i, j, k) + phi(i, j, k - 1) - phi(i, j + dj, k) - phi(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j, k - 1) - phi(i + di, j, k) - phi(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } __global__ void setSmokeBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (j == 0) ux(i, j, k) = ux(i, j + 1, k); else if (j == NY - 1) ux(i, j, k) = ux(i, j - 1, k); else if (k == 0) ux(i, j, k) = ux(i, j, k + 1); else if (k == NZ - 1) ux(i, j, k) = ux(i, j, k - 1); else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (i == 0) uy(i, j, k) = uy(i + 1, j, k); else if (i == NX - 1) uy(i, j, k) = uy(i - 1, j, k); else if (k == 0) uy(i, j, k) = uy(i, j, k + 1); else if (k == NZ - 1) uy(i, j, k) = uy(i, j, k - 1); else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 2) uz(i, j, k) = 0.0f; else if (i == 0) uz(i, j, k) = uz(i + 1, j, k); else if (i == NX - 1) uz(i, j, k) = uz(i - 1, j, k); else if (j == 0) uz(i, j, k) = uz(i, j + 1, k); else if (j == NY - 1) uz(i, j, k) = uz(i, j - 1, k); else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void setWaterBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 1) //ceiling uz(i, j, k) = 0.0f; else if (k == uz.zn - 2) //ceiling. uz(i, j, k) = (uz(i, j, k - 1)<0) ? (uz(i, j, k - 1)) : 0; else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void computeDeltaU(farray ux, farray uy, farray uz, farray uxold, farray uyold, farray uzold) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) uxold[idx] = ux[idx] - uxold[idx]; if (idx < dparam.gvnum.y) uyold[idx] = uy[idx] - uyold[idx]; if (idx < dparam.gvnum.z) uzold[idx] = uz[idx] - uzold[idx]; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p); int gridindex = getidx(i, j, k); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD_MC(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p, NXMC, NYMC, NZMC, dparam.cellsize.x / NXMC*NX); int gridindex = getidx(i, j, k, NXMC, NYMC, NZMC); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStartD(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index float3* sortedPos, // output: sorted positions float3* sortedVel, // output: sorted velocities char* sortedflag, float* sortedmass, float* sortedTemperature, float* sortedheat, float* sortedsolubility, float* sortedgascontain, uint * gridParticleHash, // input: sorted grid hashes uint * gridParticleIndex,// input: sorted particle indices float3* oldPos, // input: sorted position array float3* oldVel, // input: sorted velocity array char* oldflag, float* oldmass, float* oldtemperature, float* oldheat, float* oldsolubility, float* oldgascontain, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1]; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleIndex[index]; float3 pos = oldPos[sortedIndex]; // macro does either global read or texture fetch float3 vel = oldVel[sortedIndex]; // see particles_kernel.cuh sortedPos[index] = pos; sortedVel[index] = vel; sortedflag[index] = oldflag[sortedIndex]; sortedmass[index] = oldmass[sortedIndex]; sortedTemperature[index] = oldtemperature[sortedIndex]; sortedheat[index] = oldheat[sortedIndex]; sortedsolubility[index] = oldsolubility[sortedIndex]; sortedgascontain[index] = oldgascontain[sortedIndex]; } } __global__ void advectux(farray outux, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); float3 pos = make_float3(i, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX || j == NY - 1 || k == NZ - 1) outux[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = ux[idx]; vel.y = (uy(i - 1, j, k) + uy(i - 1, j + 1, k) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = (uz(i - 1, j, k) + uz(i - 1, j, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(ux, oldpos.x, oldpos.y - 0.5f, oldpos.z - 0.5f, ux.xn, ux.yn, ux.zn); outux[idx] = oldu * velocitydissipation; } } } __global__ void advectuy(farray outuy, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.y) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uy.xn, uy.yn, uy.zn); float3 pos = make_float3(i + 0.5, j, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY || k == NZ - 1) outuy[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j - 1, k) + ux(i + 1, j - 1, k) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = uy[idx]; vel.z = (uz(i, j - 1, k) + uz(i, j - 1, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uy, oldpos.x - 0.5f, oldpos.y, oldpos.z - 0.5f, uy.xn, uy.yn, uy.zn); outuy[idx] = oldu * velocitydissipation; } } } __global__ void advectuz(farray outuz, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); float3 pos = make_float3(i + 0.5, j + 0.5, k); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ) outuz[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k - 1) + ux(i + 1, j, k - 1) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = (uy(i, j, k - 1) + uy(i, j + 1, k - 1) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = uz[idx]; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uz, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z, uz.xn, uz.yn, uz.zn); //float oldu = -dparam.dt*3.8f; outuz[idx] = oldu * velocitydissipation; } } } __global__ void advectscaler(farray outscalar, farray scalar, farray ux, farray uy, farray uz, float densedissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { //get pos of ux point int i, j, k; getijk(i, j, k, idx); float3 pos = make_float3(i + 0.5, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ - 1) outscalar[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k) + ux(i + 1, j, k))*0.5f; vel.y = (uy(i, j, k) + uy(i, j + 1, k))*0.5f; vel.z = (uz(i, j, k) + uz(i, j, k + 1))*0.5f; //enforce wind as an external velocity field. vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float olds = trilinear(scalar, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z - 0.5f, NX, NY, NZ); outscalar[idx] = olds * densedissipation; } } } __global__ void setsmokedense(farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, dense.xn, dense.yn, dense.zn); if (i>28 && i<36 && j>28 && j<36 && k<6) dense[idx] = dparam.m0*6.0f; } } __global__ void setsmokevel(farray uz, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; // if( k>1 && k<NZ-1 ) // if( dense(i,j,k-1)>0 ) // uz[idx] = 4.0f; if (k>1 && k<NZ - 1) { float alpha = 1000.0f; uz(i, j, k) += alpha * dense(i, j, k - 1); } } } __global__ void setsmokevel_nozzle(farray ux, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; //float alpha = 10000.0f; if (i>1 && i<NX - 1) if (dense(i - 1, j, k)>0) ux[idx] = 8.0f; //uz(i,j,k) += alpha * dense(i,j,k-1); } } surface<void, cudaSurfaceType3D> surfaceWrite; __global__ void writedens2surface_k(farray dens) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); // float4 idens = make_float4( 0.0f ); // if(i>10&&i<50 &&j>10&&j<50&&k>10&&k<50 ) // idens = make_float4( 1.0f ); float4 idens = make_float4(dens[idx] * 10000); surf3Dwrite(idens, surfaceWrite, i*sizeof(float4), j, k); //why *sizeof(float4)? } } void writedens2surface(hipArray* cudaarray, int blocknum, int threadnum, farray dense) { hipBindSurfaceToArray(surfaceWrite, cudaarray); //kernel writedens2surface_k << <blocknum, threadnum >> >(dense); } __device__ float smooth_kernel(float r2, float h) { return fmax(1.0f - r2 / (h*h), 0.0f); } __device__ float3 sumcellspring(float3 ipos, float3 *pos, float* pmass, char* parflag, uint *gridstart, uint *gridend, int gidx, float idiameter) { if (gridstart[gidx] == CELL_UNDEF) return make_float3(0.0f); uint start = gridstart[gidx]; uint end = gridend[gidx]; float dist, w; float3 spring = make_float3(0.0f); float r = 0; for (uint p = start; p<end; ++p) { //if( parflag[p]!=TYPESOLID ) //solid { dist = length(pos[p] - ipos); r = idiameter;//+getRfromMass( pmass[p] ); w = pmass[p] * smooth_kernel(dist*dist, r); if (dist>0.1f*idiameter) // spring += w*(ipos - pos[p]) / dist; } } return spring; } __global__ void correctparticlepos(float3* outpos, float3* ppos, float *pmass, char* parflag, int pnum, uint* gridstart, uint *gridend, float correctionspring, float correctionradius, float3 *pepos, float *peradius, int penum) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID/* || parflag[idx]==TYPEAIR*/ || parflag[idx] == TYPEAIRSOLO) { outpos[idx] = ppos[idx]; return; } float3 ipos = ppos[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float3 spring = make_float3(0.0f); float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float re = correctionradius*dparam.cellsize.x; // float re= getRfromMass( pmass[idx] ); int lv = 1; // float idiameter = 2*pow(0.75*pmass[idx]/dparam.waterrho/M_PI, 1.0/3); //SPH for (int di = -lv; di <= lv; di++) for (int dj = -lv; dj <= lv; dj++) for (int dk = -lv; dk <= lv; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { spring += sumcellspring(ipos, ppos, pmass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk), re); } } // //emptyempty // float w, dist; // for( int p=0; p<penum; p++ ) // { // if( peradius[p]>0.5f*dparam.cellsize.x ) // // { // dist=length(pepos[p]-ipos); // w = pmass[idx]*smooth_kernel(dist*dist, peradius[p]); // // if( dist>0.1f*peradius[p] ) // // spring += w*(ipos-pepos[p]) / dist; // } // } spring *= correctionspring*re; if (length(dparam.dt*spring)>0.3f*dparam.cellsize.x) ipos += dparam.cellsize.x * 0.3f * spring / length(spring); else ipos += dparam.dt*spring; ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, fmin(tmax.z, ipos.z)); outpos[idx] = ipos; } } __device__ void sumcelldens(float &phi, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis = length(pos[p] - gpos); if (phi>dis) phi = dis; } } } //MC //[2012][TVCG]Preserving Fluid Sheets with Adaptively Sampled Anisotropic Particles __global__ void genWaterDensfield(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NX + 1)*(NY + 1)*(NZ + 1)) { float h = dparam.cellsize.x; float phi = 8 * fMCDensity*h; //from flip3d_vs //get position int i, j, k; getijk(i, j, k, idx, NX + 1, NY + 1, NZ + 1); float3 p = make_float3(i, j, k)*h; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk)) { sumcelldens(phi, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); } } phi = fMCDensity*h - phi; if (i*j*k == 0 || i == NX || j == NY || k == NZ) phi = fmin(phi, -0.1f); outdens[idx] = phi; } } __device__ float3 sumcelldens2(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]CGFParallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield2(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 1.0f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens2(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, MCParType); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_Gas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, SCENE scene) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || (parflag[p] == TYPEAIRSOLO && scene != SCENE_INTERACTION)) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]CGFParallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_Gas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.8f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_Gas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, scene); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_liquidAndGas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, float sradiusInv, float radius, float racc,float wacc, float3 pacc) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; //float r = R / 2.; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO || parflag[p] == TYPEFLUID) { dis = length(pos[p] - gpos); // { // float s = dot(pos[p] - gpos, pos[p] - gpos)*sradiusInv;//mantaflow // w = max(0., (1. - s)); // wacc += w; // racc += radius * w; // pacc += pos[p] * w; // // } if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]CGFParallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_liquidAndGas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. //float r = 2.5f*sqrt(3.)*1.01*0.5*h; //mantaFlow flip03_gen float r = 0.55*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); //mantaflow //float racc, wacc; //float3 pacc = make_float3(0.); // float phiv = r; // sradiusInv = 1. / (4. *r * r); // int radius = int(1. * r) + 1; // float3 gridPos = make_float3(i + 0.5, j + 0.5, k + 0.5)* h; float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_liquidAndGas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, sradiusInv, r,racc,wacc,pacc); // printf("%f !!!!", pacc.x); ///////////////////////// // racc /= wacc; // pacc /= wacc; // phiv = fabs(length(gridPos-pacc)); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. // phi = phiv; //mantaflow if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens3(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float h, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { //GY:CFG2012Parallel Surface Reconstruction for Particle-Based Fluids // [2007CAVW]A Unified Particle Model for Fluid-Solid Interactions // 2012 VRIPHYSAn Efficient Surface Reconstruction Pipeline for Particle-Based Fluids dis = length(pos[p] - gpos); //v-xi if (dis<h) { // w = h*h -dis*dis; // // w = w*w*w; // res += pos[p] * w; // wsum += w; w = dis / (4 * h); // |v-xi|/R [2007 CAVW] R=2h=4r w = 1 - w*w; // 1-s~2 w = max(w*w*w, 0.0); // k(s) res += pos[p] * w; wsum += w; } } } return res; } //MC //[2012]VRIPHYSAn Efficient Surface Reconstruction Pipeline for Particle-Based Fluids __global__ void genWaterDensfield_GY(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType, float3 centertmp) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.75f*h; float thigh = 0.51; float tlow = 0.49; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; // float3 center = make_float3(0.0f); float wsum = 0.0f; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens3(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h, MCParType); } } if (wsum>0) { center /= wsum; //~v float3 delta = center - centertmp; float Ev = max(delta.x, max(delta.y, delta.z)) / (4 * h); // // float Ev = 3.8; centertmp = center; // centertmp:center Evdelta float gamma = (thigh - Ev) / (thigh - tlow); float f = (Ev<tlow) ? 1 : gamma*gamma*gamma - 3 * gamma*gamma + 3 * gamma; // phi = r - length( p - center ); phi = (length(p - center) - r*f); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = fmin(phi, -10.0f); outdens[idx] = phi; } } __global__ void markSolid_sphere(float3 spherepos, float sphereradius, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if ((i>NX/2-2) &&i<2.5*NX/3 && j>3.5*NY/9 && j< 6*NY/9 && k<NZ/5) mark[idx] = TYPEBOUNDARY; } } __global__ void markSolid_waterfall(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_waterfall_liquid(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z*0.7f) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z*0.7f) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_terrain(charray mark, charray mark_terrain) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark_terrain[idx] == TYPEBOUNDARY) mark[idx] = TYPEBOUNDARY; } } //MC __global__ void genSphereDensfield(farray outdens, float3 center, float radius) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { //float3 center = make_float3(0.5f); float phi; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -0.1; else { float3 p = make_float3(i, j, k)*dparam.cellsize.x / (NXMC / NX); phi = radius - length(p - center); } outdens[idx] = phi; } } //-----MC from cuda sdk 4.2 // classify voxel based on number of vertices it will generate // one thread per voxel (cell) __global__ void classifyVoxel(uint* voxelVerts, uint *voxelOccupied, farray volume, float isoValue) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<NXMC*NYMC*NZMC) { int i, j, k; getijk(i, j, k, idx, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate flag indicating if each vertex is inside or outside isosurface uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // read number of vertices from texture uint numVerts = tex1Dfetch(numVertsTex, cubeindex); voxelVerts[idx] = numVerts; voxelOccupied[idx] = (numVerts > 0); }//endif } // compact voxel array __global__ void compactVoxels(uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint i = __mul24(blockId, blockDim.x) + threadIdx.x; if (voxelOccupied[i] && (i < numVoxels)) { compactedVoxelArray[voxelOccupiedScan[i]] = i; } } // compute interpolated vertex along an edge __device__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1) { float t = (isolevel - f0) / (f1 - f0); return lerp(p0, p1, t); } // calculate triangle normal __device__ float3 calcNormal(float3 *v0, float3 *v1, float3 *v2) { float3 edge0 = *v1 - *v0; float3 edge1 = *v2 - *v0; // note - it's faster to perform normalization in vertex shader rather than here return cross(edge0, edge1); } __device__ int GetVertexID(int i, int j, int k) { return 3 * (i*(NZMC + 1)*(NYMC + 1) + j*(NZMC + 1) + k); } __device__ int GetEdgeID(int nX, int nY, int nZ, int edge) { // return GetVertexID( nX,nY,nZ ); switch (edge) { case 0: return GetVertexID(nX, nY, nZ) + 1; case 1: return GetVertexID(nX + 1, nY, nZ); case 2: return GetVertexID(nX, nY + 1, nZ) + 1; case 3: return GetVertexID(nX, nY, nZ); case 4: return GetVertexID(nX, nY, nZ + 1) + 1; case 5: return GetVertexID(nX + 1, nY, nZ + 1); case 6: return GetVertexID(nX, nY + 1, nZ + 1) + 1; case 7: return GetVertexID(nX, nY, nZ + 1); case 8: return GetVertexID(nX, nY, nZ) + 2; case 9: return GetVertexID(nX + 1, nY, nZ) + 2; case 10: return GetVertexID(nX + 1, nY + 1, nZ) + 2; case 11: return GetVertexID(nX, nY + 1, nZ) + 2; default: // Invalid edge no. return -1; } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles2(float3 *pos, float3 *norm, uint *compactedVoxelArray, uint *numVertsScanned, farray volume, float isoValue, uint activeVoxels, uint maxVerts) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; float3 *v[3]; uint edge; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); v[0] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); v[1] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); v[2] = &vertlist[(edge*NTHREADS) + threadIdx.x]; // calculate triangle surface normal float3 n = calcNormal(v[0], v[1], v[2]); /*if (index < (maxVerts - 3)) */{ pos[index] = *v[0]; norm[index] = n; pos[index + 1] = *v[1]; norm[index + 1] = n; pos[index + 2] = *v[2]; norm[index + 2] = n; } } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles_indices(float3 *pTriVertex, uint *pTriIndices, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels, uint maxVerts, uint *MCEdgeIdxMapped, uint *numVertsScanned) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge, mappededgeidx; for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; //vertex index to write back, sort by each triangle. //triangle edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 1] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 2] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); } } __global__ void markActiveEdge_MC(uint *outmark, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge; for (int idxVert = 0; idxVert<numVerts; idxVert++) { //outmark0 edge = tex1Dfetch(triTex, (cubeindex * 16) + idxVert); outmark[GetEdgeID(i, j, k, edge)] = 1; } //debug // for( int edge=0; edge<12; edge++ ) // outmark[GetEdgeID(i,j,k,edge)] = 1; } // __global__ void calnormal_k(float3 *ppos, float3 *pnor, int pnum, uint *indices, int indicesnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < indicesnum / 3) //face number { int i1 = indices[idx * 3 + 0]; int i2 = indices[idx * 3 + 1]; int i3 = indices[idx * 3 + 2]; float3 p1 = ppos[i1]; float3 p2 = ppos[i2]; float3 p3 = ppos[i3]; //compute float3 nor = cross(p2 - p1, p3 - p1); //write back atomicAdd(&pnor[i1].x, nor.x); atomicAdd(&pnor[i2].x, nor.x); atomicAdd(&pnor[i3].x, nor.x); atomicAdd(&pnor[i1].y, nor.y); atomicAdd(&pnor[i2].y, nor.y); atomicAdd(&pnor[i3].y, nor.y); atomicAdd(&pnor[i1].z, nor.z); atomicAdd(&pnor[i2].z, nor.z); atomicAdd(&pnor[i3].z, nor.z); } } // __global__ void normalizeTriangleNor_k(float3 *pnor, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < pnum) //vertex number { if (length(pnor[idx])>0) pnor[idx] = normalize(pnor[idx]); } } void allocateTextures(uint **d_edgeTable, uint **d_triTable, uint **d_numVertsTable) { checkCudaErrors(hipMalloc((void**)d_edgeTable, 256 * sizeof(uint))); checkCudaErrors(hipMemcpy((void *)*d_edgeTable, (void *)edgeTable, 256 * sizeof(uint), hipMemcpyHostToDevice)); hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindUnsigned); checkCudaErrors(hipBindTexture(0, edgeTex, *d_edgeTable, channelDesc)); checkCudaErrors(hipMalloc((void**)d_triTable, 256 * 16 * sizeof(uint))); checkCudaErrors(hipMemcpy((void *)*d_triTable, (void *)triTable, 256 * 16 * sizeof(uint), hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, triTex, *d_triTable, channelDesc)); checkCudaErrors(hipMalloc((void**)d_numVertsTable, 256 * sizeof(uint))); checkCudaErrors(hipMemcpy((void *)*d_numVertsTable, (void *)numVertsTable, 256 * sizeof(uint), hipMemcpyHostToDevice)); checkCudaErrors(hipBindTexture(0, numVertsTex, *d_numVertsTable, channelDesc)); } //1*nout(outCPU) __global__ void arrayproduct_k(float* out, float* x, float *y, int n) { extern __shared__ float sdata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; sdata[tid] = (i >= n) ? 0 : (x[i] * y[i]); __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = sdata[0]; } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID) //todo: should add typesolid or not. { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; //notice: xAIR0 sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void buildprecondition_pcg(farray P, charray mark, farray ans, farray input, int n) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<n) { ans[idx] = 1.0f / 6 * input[idx]; } } __global__ void copyParticle2GL_vel_k(float3* ppos, float3 *pvel, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; if (pflag[idx] == TYPEFLUID) { rendercolor[idx * 3] = 1.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 0.0f; } else if (pflag[idx] == TYPEAIR) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 1.0f; } else if (pflag[idx] == TYPESOLID) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 1.0f; rendercolor[idx * 3 + 2] = 0.0f; } } } __global__ void copyParticle2GL_radius_k(float3* ppos, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor, float minmass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; minmass *= 1.2f; //trick float rate = (pmass[idx] - minmass*dparam.m0) / (dparam.m0 - minmass*dparam.m0); rate = fmax(0.0f, fmin(1.0f, rate)); { float3 color = mapColorBlue2Red(powf(rate, 1.0f / 3)*6.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } } __device__ inline void atomicaddfloat3(float3 *a, int idx, float3 b) { atomicAdd(&a[idx].x, b.x); atomicAdd(&a[idx].y, b.y); atomicAdd(&a[idx].z, b.z); } __global__ void smooth_computedisplacement(float3 *displacement, int *weight, float3 *ppos, uint *indices, int trianglenum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<trianglenum) { uint p1 = indices[idx * 3]; uint p2 = indices[idx * 3 + 1]; uint p3 = indices[idx * 3 + 2]; atomicaddfloat3(displacement, p1, ppos[p2] - ppos[p1]); atomicaddfloat3(displacement, p1, ppos[p3] - ppos[p1]); atomicaddfloat3(displacement, p2, ppos[p1] - ppos[p2]); atomicaddfloat3(displacement, p2, ppos[p3] - ppos[p2]); atomicaddfloat3(displacement, p3, ppos[p1] - ppos[p3]); atomicaddfloat3(displacement, p3, ppos[p2] - ppos[p3]); atomicAdd(&weight[p1], 2); atomicAdd(&weight[p2], 2); atomicAdd(&weight[p3], 2); } } __global__ void smooth_addDisplacement(float3 *displacement, int *weight, float3 *ppos, int vertexnum, float param) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<vertexnum) { if (weight[idx]>0) ppos[idx] += param * displacement[idx] / weight[idx]; displacement[idx] = make_float3(0.0f); weight[idx] = 0; } } //diffuse density field. __global__ void diffuse_dense(farray outp, farray inp, charray mark, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outp.xn * outp.yn * outp.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inp[idx]; int i, j, k; getijk(i, j, k, idx, outp.xn, outp.yn, outp.zn); if (mark(i, j, k) == TYPEBOUNDARY) outp[idx] = 0.0f; else { p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outp[idx] = resp; } } } //diffuse velocity field. __global__ void diffuse_velocity(farray outv, farray inv, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outv.xn * outv.yn * outv.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inv[idx]; int i, j, k; getijk(i, j, k, idx, outv.xn, outv.yn, outv.zn); if (i == 0 || j == 0 || k == 0 || i >= outv.xn - 1 || j >= outv.yn - 1 || k >= outv.zn - 1) outv[idx] = p0; else { p1 = inv(i + 1, j, k); p2 = inv(i, j + 1, k); p3 = inv(i, j, k + 1); p4 = inv(i - 1, j, k); p5 = inv(i, j - 1, k); p6 = inv(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outv[idx] = resp; } } } //maxLength, hashPointsblockhash __global__ void createAABB_q(float3* points, int nPoints, uint3* faces, int nFaces, float *maxLength, float3* hashPoints) { int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= nFaces) return; __shared__ float maxArray[256]; uint p1 = faces[index].x; uint p2 = faces[index].y; uint p3 = faces[index].z; // float3 px = points[p1]; float3 py = points[p2]; float3 pz = points[p3]; AABB aabb; aabb.xMin = (px.x>py.x) ? py.x : px.x; aabb.xMin = (aabb.xMin>pz.x) ? pz.x : aabb.xMin; aabb.xMax = (px.x<py.x) ? py.x : px.x; aabb.xMax = (aabb.xMax<pz.x) ? pz.x : aabb.xMax; aabb.yMin = (px.y>py.y) ? py.y : px.y; aabb.yMin = (aabb.yMin>pz.y) ? pz.y : aabb.yMin; aabb.yMax = (px.y<py.y) ? py.y : px.y; aabb.yMax = (aabb.yMax<pz.y) ? pz.y : aabb.yMax; aabb.zMin = (px.z>py.z) ? py.z : px.z; aabb.zMin = (aabb.zMin>pz.z) ? pz.z : aabb.zMin; aabb.zMax = (px.z<py.z) ? py.z : px.z; aabb.zMax = (aabb.zMax<pz.z) ? pz.z : aabb.zMax; float tempMaxLength = aabb.xMax - aabb.xMin; tempMaxLength = (tempMaxLength>aabb.yMax - aabb.yMin) ? (tempMaxLength) : (aabb.yMax - aabb.yMin); tempMaxLength = (tempMaxLength>aabb.zMax - aabb.zMin) ? (tempMaxLength) : (aabb.zMax - aabb.zMin); maxArray[threadIdx.x] = tempMaxLength; hashPoints[index] = make_float3((aabb.xMin + aabb.xMax) / 2, (aabb.yMin + aabb.yMax) / 2, (aabb.zMin + aabb.zMax) / 2); __syncthreads(); for (int i = blockDim.x / 2; i>0; i /= 2) { if (threadIdx.x < i) maxArray[threadIdx.x] = max(maxArray[threadIdx.x], maxArray[i + threadIdx.x]); __syncthreads(); } if (threadIdx.x == 0) maxLength[blockIdx.x] = maxArray[0]; } __global__ void calcHash_radix_q( uint2* gridParticleIndex, // output float3* posArray, // input: positions uint numParticles, float3 t_min, float3 t_max) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 pos = posArray[index]; uint hash; int gz = (pos.z - t_min.z) / dparam.triHashSize.z; int gy = (pos.y - t_min.y) / dparam.triHashSize.y; int gx = (pos.x - t_min.x) / dparam.triHashSize.x; if (gx < 0 || gx > dparam.triHashRes.x - 1 || gy < 0 || gy > dparam.triHashRes.y - 1 || gz < 0 || gz > dparam.triHashRes.z - 1) hash = CELL_UNDEF; else hash = __mul24(__mul24(gz, (int)dparam.triHashRes.y) + gy, (int)dparam.triHashRes.x) + gx; // store grid hash and particle index gridParticleIndex[index] = make_uint2(hash, index); } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStart_radix_q(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index uint3* sortedFaces, uint2 * gridParticleHash, // input: sorted grid hashes uint3* oldFaces, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index].x; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1].x; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleHash[index].y; sortedFaces[index] = oldFaces[sortedIndex]; // see particles_kernel.cuh } } __global__ void calculateNormal(float3* points, uint3* faces, float3* normals, int num) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index < num) { uint3 face = faces[index]; float3 v1 = points[face.x]; float3 v2 = points[face.y]; float3 v3 = points[face.z]; float3 tmp; tmp.x = (v1.y - v2.y)*(v1.z - v3.z) - (v1.z - v2.z)*(v1.y - v3.y); tmp.y = (v1.z - v2.z)*(v1.x - v3.x) - (v1.x - v2.x)*(v1.z - v3.z); tmp.z = (v1.x - v2.x)*(v1.y - v3.y) - (v1.y - v2.y)*(v1.x - v3.x); normals[index] = normalize(tmp); } } //temp_yanglp: __device__ float IntersectTriangle_q(float3& pos, float radius, float3& v0, float3& v1, float3& v2, float3 n) { //compute the distance of pos and triangle plane float d = dot(pos - v0, n); if (abs(d)>radius) return -1; float dislimit = radius*radius - d*d; // float3 pTri = pos - d*n; float3 tempcross; float d0 = dot(pTri - v0, pTri - v0); float d1 = dot(pTri - v1, pTri - v1); float d2 = dot(pTri - v2, pTri - v2); // int tt = (dot(cross(pTri - v0, v1 - v0), n)>0) ? 1 : 0; tt += (dot(cross(pTri - v1, v2 - v1), n)>0) ? 2 : 0; tt += (dot(cross(pTri - v2, v0 - v2), n)>0) ? 4 : 0; //cuPrintf("tt=%d\n",tt); if (tt == 7 || tt == 0) { return abs(d); } // float distemp; float dis = (d0<dislimit) ? (d0) : dislimit; //dis dis = (d1<dis) ? (d1) : dis; dis = (d2<dis) ? (d2) : dis; // if (dot(v1 - v0, pTri - v0)*dot(v0 - v1, pTri - v1)>0) { tempcross = cross(v1 - v0, pTri - v0); distemp = dot(tempcross, tempcross) / dot(v1 - v0, v1 - v0); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v2 - v1, pTri - v1)*dot(v1 - v2, pTri - v2)>0) { tempcross = cross(v2 - v1, pTri - v1); distemp = dot(tempcross, tempcross) / dot(v2 - v1, v2 - v1); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v0 - v2, pTri - v2)*dot(v2 - v0, pTri - v0)>0) { tempcross = cross(v0 - v2, pTri - v2); distemp = dot(tempcross, tempcross) / dot(v0 - v2, v0 - v2); dis = (distemp<dis) ? (distemp) : dis; } if (dis > dislimit - 0.001) return -1; return sqrt(dis + d*d); } // calculate address in grid from position (clamping to edges) __device__ uint calcGridHash_q(int3 gridPos) { return __umul24(__umul24(gridPos.z, dparam.triHashRes.y), dparam.triHashRes.x) + __umul24(gridPos.y, dparam.triHashRes.x) + gridPos.x; } // collide a particle against all other particles in a given cell __device__ float3 collideCell(int3 gridPos, float3 pos, float radius, float3* surPoints, uint3* surIndex, float3* surfaceNor, uint* cellStart, uint* cellEnd, int scene) { uint gridHash = calcGridHash_q(gridPos); float dis_n, wib = 0; float3 force = make_float3(0.0f); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != CELL_UNDEF) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j<endIndex; j++) { //cuPrintf("j=%d\n", j); dis_n = IntersectTriangle_q(pos, radius, surPoints[surIndex[j].x], surPoints[surIndex[j].y], surPoints[surIndex[j].z], surfaceNor[j]); wib = 1 - dis_n / radius; if (dis_n >= 0 && wib > 0.00001) { force += (radius - dis_n) * (surfaceNor[j]) * 10; } } } return force; } __device__ void mindis_cell(float& mindisair, float& mindisfluid, float3 gpos, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, int gidx, float radius) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { dis = length(pos[p] - gpos);// // dis = fabs(length(pos[p] - gpos))- radius;// mantaflow if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO)//todo: SOLOls mindisair = (dis<mindisair) ? dis : mindisair; else if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) mindisfluid = (dis<mindisfluid) ? dis : mindisfluid; } } //level set //[2012]MultiFLIP for Energetic Two-Phase Fluid Simulation __global__ void genlevelset(farray lsfluid, farray lsair, charray mark, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, float fMCDensity, float offset) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) // { //float ls; float h = dparam.cellsize.x; mark[idx] = TYPEVACUUM; float r = 0.5f*h; //0.36f*h; //float r = 0.5*sqrt(3.)*1.01*2.5; //0.5*1.01 mantaflow //get position int i, j, k; getijk(i, j, k, idx, NX, NY, NZ); float3 gpos = (make_float3(i, j, k) + make_float3(0.5f, 0.5f, 0.5f))*dparam.cellsize.x; // shifted by half cell float mindisair = 2.5f*h, mindisfluid = 2.5f*h; //2.5 cellsize //float mindisair = r, mindisfluid = r; // mindis- r mantaflow int level = 2; for (int di = -level; di <= level; ++di) for (int dj = -level; dj <= level; ++dj) for (int dk = -level; dk <= level; ++dk) //27 { if (verifycellidx(i + di, j + dj, k + dk)) { mindis_cell(mindisair, mindisfluid, gpos, pos, parflag, pmass, gridstart, gridend, getidx(i + di, j + dj, k + dk), r); } } mindisair -= r; // mataflow mindisfluid -= r; lsfluid[idx] = mindisfluid; // lsair[idx] = mindisair - offset*h; //todo: lscorrectposmarkgridmark lsair[idx] = mindisair; } } __device__ void sumcell_fluidSolid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_fluidSolid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 0; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 0; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_air(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_air(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_solid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_solid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } // __global__ void cptdivergence_bubble(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls, farray sf) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float jx0, jx1, jy0, jy1, jz0, jz1, J; //surface tension, [2005]Discontinuous Fluids float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } J = (jx1 - jx0 + jy1 - jy0 + jz1 - jz0) / h / h; div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; div += J; //surfacetension } outdiv[idx] = div; } } // __global__ void cptdivergence_bubble2(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); //ux1 = airux(i+1,j,k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); //ux1 = airux(i+1,j,k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); //ux0 = airux(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); //ux0 = airux(i,j,k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); // uy0 = airuy(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); //uy0 = airuy(i,j,k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); //uz0 = airuz(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); //uz0 = airuz(i,j,k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } __global__ void cptdivergence_bubble3(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * waterux(i+1,j,k) + (1-theta) * airux(i+1,j,k); ux1 = airux(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * airux(i+1,j,k) + (1-theta) * waterux(i+1,j,k); ux1 = airux(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * waterux(i,j,k) + (1-theta) * airux(i,j,k); ux0 = airux(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * airux(i,j,k) + (1-theta) * waterux(i,j,k); ux0 = airux(i, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * wateruy(i,j+1,k) + (1-theta) * airuy(i,j+1,k); uy1 = airuy(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * airuy(i,j+1,k) + (1-theta) * wateruy(i,j+1,k); uy1 = airuy(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * wateruy(i,j,k) + (1-theta) * airuy(i,j,k); uy0 = airuy(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * airuy(i,j,k) + (1-theta) * wateruy(i,j,k); uy0 = airuy(i, j, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * wateruz(i,j,k+1) + (1-theta) * airuz(i,j,k+1); uz1 = airuz(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * airuz(i,j,k+1) + (1-theta) * wateruz(i,j,k+1); uz1 = airuz(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * wateruz(i,j,k) + (1-theta) * airuz(i,j,k); uz0 = airuz(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * airuz(i,j,k) + (1-theta) * wateruz(i,j,k); uz0 = airuz(i, j, k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } // __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_bubble(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op_bubble(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID || A[idx] == TYPEAIR) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } //(TYPEFLUID)AIR(AIRSOLO)CIP. __global__ void advectparticle_RK2_bubble(float3 *ppos, float3 *pvel, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIRSOLO) //AIRSOLO return; //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); char partype = parflag[idx]; //pos-->grid xyz float3 gvel = make_float3(0.0f); if (partype == TYPEFLUID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (partype == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); else //TYPEAIRSOLO return; if (velmode == CIP /*|| partype==TYPEAIR*/) //todo: cip ivel = gvel; else ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint; if (partype == TYPEFLUID) gvelmidpoint = getParticleVelFromGrid(midpoint, waterux, wateruy, wateruz); else gvelmidpoint = getParticleVelFromGrid(midpoint, airux, airuy, airuz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back: TYPEAIR+TYPESOLIDTYPESOLOreturnTYPEFLUID pvel[idx] = ivel; // if( partype==TYPEFLUID ) // ppos[idx] = ipos; } } __global__ void mapvelg2p_flip_bubble(float3 *ppos, float3 *vel, char* parflag, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = make_float3(0.0f); if (parflag[idx] == TYPEFLUID || parflag[idx] == TYPESOLID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (parflag[idx] == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); vel[idx] += gvel; } } __global__ void compsurfacetension_k(farray sf, charray mark, farray phigrax, farray phigray, farray phigraz, float sigma) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) { int i, j, k; getijk(i, j, k, idx); float len, h = dparam.cellsize.x; float res, grax1, gray1, graz1, grax0, gray0, graz0; float3 phigracenter = make_float3(phigrax[idx], phigray[idx], phigraz[idx]); len = length(phigracenter); if (len == 0) res = 0; else { phigracenter /= len; if (verifycellidx(i + 1, j, k)) { len = length(make_float3(phigrax(i + 1, j, k), phigray(i + 1, j, k), phigraz(i + 1, j, k))); if (len == 0) grax1 = phigracenter.x; else grax1 = phigrax(i + 1, j, k) / len; } else grax1 = phigracenter.x; if (verifycellidx(i - 1, j, k)) { len = length(make_float3(phigrax(i - 1, j, k), phigray(i - 1, j, k), phigraz(i - 1, j, k))); if (len == 0) grax0 = phigracenter.x; else grax0 = phigrax(i - 1, j, k) / len; } else grax0 = phigracenter.x; if (verifycellidx(i, j + 1, k)) { len = length(make_float3(phigrax(i, j + 1, k), phigray(i, j + 1, k), phigraz(i, j + 1, k))); if (len == 0) gray1 = phigracenter.y; else gray1 = phigray(i, j + 1, k) / len; } else gray1 = phigracenter.y; if (verifycellidx(i, j - 1, k)) { len = length(make_float3(phigrax(i, j - 1, k), phigray(i, j - 1, k), phigraz(i, j - 1, k))); if (len == 0) gray0 = phigracenter.y; else gray0 = phigray(i, j - 1, k) / len; } else gray0 = phigracenter.y; if (verifycellidx(i, j, k + 1)) { len = length(make_float3(phigrax(i, j, k + 1), phigray(i, j, k + 1), phigraz(i, j, k + 1))); if (len == 0) graz1 = phigracenter.z; else graz1 = phigraz(i, j, k + 1) / len; } else graz1 = phigracenter.z; if (verifycellidx(i, j, k - 1)) { len = length(make_float3(phigrax(i, j, k - 1), phigray(i, j, k - 1), phigraz(i, j, k - 1))); if (len == 0) graz0 = phigracenter.z; else graz0 = phigraz(i, j, k - 1) / len; } else graz0 = phigracenter.z; res = (grax1 - grax0 + gray1 - gray0 + graz1 - graz0) / h * 0.5f; //res = (grax1-phigracenter.x + gray1-phigracenter.y + graz1-phigracenter.z) / h ; } sf[idx] = res*sigma; } else sf[idx] = 0; } } __global__ void enforcesurfacetension_p(float3* ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray sf, farray phigrax, farray phigray, farray phigraz, charray mark, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID/* || pflag[idx]==TYPEAIRSOLO*/ || pflag[idx] == TYPEFLUID) return; if( (scene != SCENE_MELTANDBOIL&&scene != SCENE_MELTANDBOIL_HIGHRES && pflag[idx] == TYPEAIRSOLO) || ((scene != SCENE_ALL && pflag[idx] == TYPEAIRSOLO))) return; //1. compute the cell, and get the ls, get sf. float3 ipos = ppos[idx]; float ilsmerge = getScaleFromFrid(ipos, lsmerge); float isf = getScaleFromFrid(ipos, sf); float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); float lendir = length(dir); if (lendir == 0) return; float3 f; dir /= lendir; ilsmerge /= lendir; // int i, j, k; getijkfrompos(i, j, k, ipos); int cnt = (mark(i, j, k) == TYPEAIR) ? 1 : 0; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (verifycellidx(i + di, j + dj, k + dk)) if (mark(i + di, j + dj, k + dk) == TYPEAIR) cnt++; if (cnt == 0) return; // if(abs(ls_p)<threshold), enforce a surface tension force, change the velocity. if (abs(ilsmerge)<dparam.cellsize.x) { f = -isf*dir; pvel[idx] += f*dparam.dt; } } } //levelset __global__ void markLS_bigpositive(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] / dparam.cellsize.x; if (ls[idx] >1.99f) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //sweep } else mark[idx] = TYPEFLUID; } } __global__ void setLSback_bigpositive(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] * dparam.cellsize.x; } } __global__ void preparels(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] / dparam.cellsize.x; if (ls[idx] >0) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //sweep } else mark[idx] = TYPEFLUID; } } __global__ void setLSback(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] * dparam.cellsize.x; } } __global__ void mergeLSAndMarkGrid(farray lsmerge, charray mark, farray lsfluid, farray lsair) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx< dparam.gnum) { float h = dparam.cellsize.x; if (lsair[idx] >4.99f * h) { lsmerge[idx] = lsfluid[idx]; if (lsfluid[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEFLUID; } else if (lsfluid[idx]>4.99f*h) { lsmerge[idx] = lsair[idx]; if (lsair[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEAIR; } else if (lsair[idx]>0.8f*h && lsfluid[idx]>0.8f*h) { mark[idx] = TYPEVACUUM; lsmerge[idx] = min(lsfluid[idx], lsair[idx]); } else { lsmerge[idx] = (lsfluid[idx] - lsair[idx])*0.5f; if (lsmerge[idx]>0) mark[idx] = TYPEAIR; else mark[idx] = TYPEFLUID; } //todo: ls int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY, lsmerge[idx] = -0.5f*h; //todo: debug: //lsmerge[idx] = -lsmerge[idx]; } } __global__ void sweepu_k_bubble(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray ls, charray mark, char sweepflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; // if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i - 1, j, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ -1) continue; wx = -di*(ls(i, j, k) - ls(i - 1, j, k)); if (wx<0) continue; wy = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j + dj, k) - ls(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j, k + dk) - ls(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i, j - 1, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(ls(i, j, k) - ls(i, j - 1, k)); if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j - 1, k) - ls(i + di, j, k) - ls(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (ls(i, j, k) + ls(i, j - 1, k) - ls(i, j, k + dk) - ls(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ((mark(i, j, k) != sweepflag && mark(i, j, k - 1) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(ls(i, j, k) - ls(i, j, k - 1)); if (wz<0) continue; wy = (ls(i, j, k) + ls(i, j, k - 1) - ls(i, j + dj, k) - ls(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j, k - 1) - ls(i + di, j, k) - ls(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } //"" __global__ void correctbubblepos(farray ls, farray phigrax, farray phigray, farray phigraz, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) // ipos = ipos - d*dir; else if (iflag == TYPEFLUID) { ipos = ipos - d*dir; dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; ipos = ipos + s*(rs - s*d)*dir; } // cnt++; } else if (iflag == TYPEFLUID && s*d<rs*0.5f && s*d >= 0) //todo: rs*0.5f0.5 { ipos = ipos + s*(rs - s*d)*dir; } ppos[idx] = ipos; } } //"". //ls __global__ void correctbubblepos_air(farray lsmerge, farray phigrax, farray phigray, farray phigraz, farray lsair, farray phigrax_air, farray phigray_air, farray phigraz_air, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsmerge) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) // ipos = ipos - d*dir; // cnt++; } if (iflag == TYPEFLUID) //level setlsmerge { dir = getVectorFromGrid(ipos, phigrax_air, phigray_air, phigraz_air); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsair) / dirlen; if (d<-1.3f*rs) ipos = ipos - (d - rs)*dir; } ppos[idx] = ipos; } } //levelset __global__ void computePhigra(farray phigrax, farray phigray, farray phigraz, farray ls) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float h = dparam.cellsize.x; float lsx1, lsx0, lsy1, lsy0, lsz1, lsz0, lscenter = ls[idx]; lsx1 = (verifycellidx(i + 1, j, k)) ? ls(i + 1, j, k) : lscenter; lsx0 = (verifycellidx(i - 1, j, k)) ? ls(i - 1, j, k) : lscenter; lsy1 = (verifycellidx(i, j + 1, k)) ? ls(i, j + 1, k) : lscenter; lsy0 = (verifycellidx(i, j - 1, k)) ? ls(i, j - 1, k) : lscenter; lsz1 = (verifycellidx(i, j, k + 1)) ? ls(i, j, k + 1) : lscenter; lsz0 = (verifycellidx(i, j, k - 1)) ? ls(i, j, k - 1) : lscenter; //todo: phigrax[idx] = ((lsx1 - lsx0)*0.5f) / h; phigray[idx] = ((lsy1 - lsy0)*0.5f) / h; phigraz[idx] = ((lsz1 - lsz0)*0.5f) / h; //phigrax[idx] = (lsx1-lscenter)/h; //phigray[idx] = (lsy1-lscenter)/h; //phigraz[idx] = (lsz1-lscenter)/h; } } __global__ void copyParticle2GL_phi(float3* ppos, char *pflag, float *pmass, float *pTemperature, int pnum, float *renderpos, float *rendercolor, farray ls, farray phigrax, farray phigray, farray phigraz, char typeflag, float Tmax, float Tmin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //todo: if (pflag[idx] == typeflag/* || ppos[idx].y<NY*0.5f*dparam.cellsize.x */) { renderpos[idx * 3] = -2.0f; renderpos[idx * 3 + 1] = 0.0f; renderpos[idx * 3 + 2] = 0.0f; float3 color = make_float3(0.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; return; } renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; float3 color; if (pflag[idx] == TYPEAIR) color = mapColorBlue2Red(0.0f); else if (pflag[idx] == TYPEFLUID) color = mapColorBlue2Red(2.0f); else if (pflag[idx] == TYPESOLID) color = mapColorBlue2Red(4.0f); else color = mapColorBlue2Red(6.0f); //color=mapColorBlue2Red( (pTemperature[idx]-Tmin)/(Tmax-Tmin)*6.0f ); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } //surface tension. [2005]Discontinuous Fluids __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz, farray sf, farray lsmerge, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; float J = 0.0f, theta; if (idx<dparam.gvnum.x) { J = 0.0f; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i - 1, j, k)) / (lsmerge(i, j, k) - lsmerge(i - 1, j, k)); J = theta*sf(i - 1, j, k) + (1.0f - theta)*sf(i, j, k); } ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k) - J) / h; } } if (idx<dparam.gvnum.y) { J = 0.0f; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j - 1, k)) / (lsmerge(i, j, k) - lsmerge(i, j - 1, k)); J = theta*sf(i, j - 1, k) + (1.0f - theta)*sf(i, j, k); } uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k) - J) / h; } } if (idx<dparam.gvnum.z) { J = 0.0f; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j, k - 1)) / (lsmerge(i, j, k) - lsmerge(i, j, k - 1)); J = theta*sf(i, j, k - 1) + (1.0f - theta)*sf(i, j, k); } uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1) - J) / h; } } } __global__ void sweepVacuum(charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] != TYPEAIR) return; //mark for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (mark(i + di, j + dj, k + dk) == TYPEVACUUM) mark[idx] = TYPEVACUUM; } } __global__ void markDeleteAirParticle(float3* ppos, char* pflag, float *pmass, uint *preservemark, int pnum, charray mark, farray lsmerge, farray lsair, uint *cnt) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { //fluid and solid particles are preserved, air and airsolo particles are verified. if (pflag[idx] == TYPESOLID) { preservemark[idx] = 1; return; } int i, j, k; getijkfrompos(i, j, k, ppos[idx]); if (pflag[idx] == TYPEFLUID) { float lsm = getScaleFromFrid(ppos[idx], lsmerge); float lsa = getScaleFromFrid(ppos[idx], lsair); if ( /*lsm>1.2f*dparam.cellsize.x || */lsa<-1.0*dparam.cellsize.x) preservemark[idx] = 0, cnt[0]++; else preservemark[idx] = 1; return; } int cnt = 0; for (int di = -1; di <= 1; di += 1) for (int dj = -1; dj <= 1; dj += 1) for (int dk = -1; dk <= 1; dk += 1) if (verifycellidx(i + di, j + dj, k + dk) && mark(i + di, j + dj, k + dk) == TYPEVACUUM) cnt++; if (cnt == 0 && pmass[idx]>0.000001f) //notice: preservemark[idx] = 1; else preservemark[idx] = 0; } } // compact voxel array __global__ void deleteparticles(uint *preserveflag, uint *preserveflagscan, int pnum, float3 *outpos, float3 *pos, float3 *outvel, float3 *vel, float *outmass, float* mass, char *outflag, char *flag, float *outTemperature, float *temperature, float *outheat, float *heat, float *outsolubility, float *solubility, float *outgascontain, float *gascontain) { uint idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (preserveflag[idx] == 1) { //deleteflagscan "". uint outidx = preserveflagscan[idx]; outpos[outidx] = pos[idx]; outvel[outidx] = vel[idx]; outmass[outidx] = mass[idx]; outflag[outidx] = flag[idx]; outTemperature[outidx] = temperature[idx]; outheat[outidx] = heat[idx]; outsolubility[outidx] = solubility[idx]; outgascontain[outidx] = gascontain[idx]; } } } __device__ int cntairparticle(float3 *ppos, char *pflag, int igrid, uint *gridstart, uint *gridend, const float3 &ipos, float r) { uint start = gridstart[igrid]; int res = 0; float dis; if (start == CELL_UNDEF) return res; for (int p = start; p<gridend[igrid]; p++) { dis = length(ppos[p] - ipos); if (dis<r && (pflag[p] == TYPEAIR || pflag[p] == TYPEAIRSOLO)) { ++res; } } return res; } __device__ inline bool isInBoundaryCell(int x, int y, int z) { int level = 2; if (x <= level || x >= NX - 1 - level || y <= level || y >= NY - 1 - level) return true; else return false; } __global__ void verifySoloAirParticle(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray airux, farray airuy, farray airuz, uint *gridstart, uint *gridend, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; if (iflag == TYPEFLUID || iflag == TYPESOLID) //TYPEAIR, TYPEAIRSOLO can go on. return; float3 ipos = ppos[idx]; float ls = getScaleFromFrid(ipos, lsmerge); float h = dparam.cellsize.x; int i, j, k; getijkfrompos(i, j, k, ipos); //a key adjustment, the tolerent will affect the result directly. int cnt = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) cnt += cntairparticle(ppos, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, h); float tol1 = -1.45f, tol2 = -0.5f; if (scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene==SCENE_ALL) tol1 = 0.05f, tol2 = -0.8f; else if (scene == SCENE_INTERACTION) tol1 = 0.2f, tol2 = -0.5f; if ((cnt >= 10 || ls>tol1*h) && pflag[idx] == TYPEAIRSOLO && !isInBoundaryCell(i, j, k)) //decide whether the air solo particle should be transfered to air particle. { if (cnt >= 3) pflag[idx] = TYPEAIR; } else if (iflag == TYPEAIR && (isInBoundaryCell(i, j, k) || ls<tol2*h || cnt <= 1)) { //todo: or not??? //pvel[idx]= pvel[idx]*0.8f + 0.2f*getParticleVelFromGrid(ipos,airux,airuy,airuz); pvel[idx] = getParticleVelFromGrid(ipos, airux, airuy, airuz); pflag[idx] = TYPEAIRSOLO; } } } __device__ float sumdensity(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { // notice: should include liquid particle, not just spray particle. if (pflag[p] != TYPEAIR && pflag[p] != TYPEAIRSOLO) continue; dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); //todo: m0 or pmass[p]? } return res; } __global__ void calcDensPress_Air(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIR && pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.airm0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho*0.5f); } } __device__ float3 sumforce(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h && (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR)) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSoloAirP(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH, float maxVelForBubble) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO && pflag[idx] != TYPEAIR) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); //todo: ?? force *= dparam.airm0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; //restrict the vel below a threshold. // if( length(ivel) > maxVelForBubble ) // ivel = normalize(ivel) * maxVelForBubble; // // advect particle, using rho!!!! // ppos[idx]=ipos; pvel[idx] = ivel; } } __device__ float sumdensity_SLCouple(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); } return res; } //solid-liquid coupling, in SPH framework __global__ void calcDensPressSPH_SLCouple(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity_SLCouple(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.m0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho); } } __device__ float3 sumforce_SLCouple(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, kvis=0.0f; if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSPH_SLCouple(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEFLUID) //fluidsolid return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce_SLCouple(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); // force=make_float3(0.0f); //todo: ?? //add gravity here? or external force part; force *= dparam.m0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; // advect particle, using rho!!!! ppos[idx] = ipos; pvel[idx] = ivel; } } __global__ void updateFixedHeat(farray fixedHeat, int frame) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= NX / 4 && i<NX*0.75 && j >= NY / 4 && j<NY*0.75 && k <= 3 /*k<=20 && k>=19*/) fixedHeat[idx] = 273.0f + 100.0f * min(frame / 40.f, 1.0f); else fixedHeat[idx] = UNDEF_TEMPERATURE; } } __global__ void addHeatAtBottom(farray Tp, int frame, float heatIncreaseBottom) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= 1 && i<NX - 1 && j >= 1 && j<NY - 1 && k <= 3 /*k<=20 && k>=19*/) Tp[idx] += heatIncreaseBottom;//1.5f; //Tp[idx] = 350.0f;//273.0f + 100.0f * min(frame/40.f, 1.0f ); Tp[idx] = min(378.0f, Tp[idx]); } } // __global__ void compb_heat(farray Tp_old, farray Tp, farray fixedheat, charray mark, float *heatAlphaArray) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float alpha = heatAlphaArray[mark[idx]]; //tpbfixedheat // if( fixedheat[idx]!=UNDEF_TEMPERATURE ) // Tp[idx]=fixedheat[idx], Tp_old[idx] = fixedheat[idx]*dparam.cellsize.x*dparam.cellsize.x/alpha/dparam.dt; // else Tp_old[idx] = Tp[idx] * dparam.cellsize.x*dparam.cellsize.x / alpha / dparam.dt; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_heat(farray ans, charray mark, farray x, int n, float *heatAlphaArray, farray fixedHeat, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { float h = dparam.cellsize.x; float dt = dparam.dt; float alpha = heatAlphaArray[mark[idx]]; if (mark[idx] != TYPEBOUNDARY/* && mark[idx]!=TYPEVACUUM*/) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = (h*h / alpha / dt + 6.0f)*center; //trick: freeair if (scene == SCENE_BOILING || scene == SCENE_BOILING_HIGHRES || scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene ==SCENE_ALL) { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY || mark(i + 1, j, k) == TYPEVACUUM) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY || mark(i, j + 1, k) == TYPEVACUUM) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY || mark(i, j, k + 1) == TYPEVACUUM) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY || mark(i - 1, j, k) == TYPEVACUUM) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY || mark(i, j - 1, k) == TYPEVACUUM) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY || mark(i, j, k - 1) == TYPEVACUUM) ? center : x(i, j, k - 1)); } else { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1)); } ans[idx] = sum; } } } //Ans = x + a*y __global__ void pcg_op_heat(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { // if( A[idx]==TYPEFLUID || A[idx]==TYPEAIR ) if (A[idx] != TYPEBOUNDARY) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void setBoundaryHeat(farray tp) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == NX - 1) tp[idx] = tp(i - 1, j, k); else if (i == 0) tp[idx] = tp(i + 1, j, k); else if (j == NY - 1) tp[idx] = tp(i, j - 1, k); else if (j == 0) tp[idx] = tp(i, j + 1, k); else if (k == NZ - 1) tp[idx] = tp(i, j, k - 1); else if (k == 0) tp[idx] = tp(i, j, k + 1); } } __global__ void compTpChange(farray tp, farray tpsave, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) tpsave[idx] = tp[idx] - tpsave[idx]; else tpsave[idx] = 0; } } __device__ void sumHeat(float &heatsum, float &weight, float3 gpos, float3 *pos, float *pTemperature, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = sharp_kernel(dis2, RE); weight += w; heatsum += w*pTemperature[p]; } } __global__ void mapHeatp2g_hash(float3 *ppos, float *pTemperature, int pnum, farray heat, uint* gridstart, uint *gridend, float defaulttemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; float weight = 0.0f, heatsum = 0; float3 gpos; getijk(i, j, k, idx); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumHeat(heatsum, weight, gpos, ppos, pTemperature, gridstart, gridend, getidx(i + di, j + dj, k + dk)); heatsum = (weight>0) ? (heatsum / weight) : defaulttemperature; heat(i, j, k) = heatsum; } } __global__ void mapHeatg2p(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; pTemperature[idx] = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. } } __global__ void mapHeatg2p_MeltAndBoil(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float newtemp = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. if (parflag[idx] == TYPESOLID) pTemperature[idx] = 0.95f*(pTemperature[idx]) + 0.05f*newtemp; else pTemperature[idx] = newtemp; } } __global__ void initHeatParticle(float *pTemperature, float *pHeat, float defaultSolidT, float defaultLiquidT, float LiquidHeatTh, char *pflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) { pTemperature[idx] = defaultSolidT; pHeat[idx] = 0; } else { pTemperature[idx] = defaultLiquidT; pHeat[idx] = LiquidHeatTh; } } } //Temperature0=273.15K, Solubility0=1.0f (1) __global__ void initsolubility_k(float *psolubility, float* pgascontain, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate, float initgasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID || pflag[idx] == TYPESOLID) { psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. pgascontain[idx] = initgasrate*psolubility[idx]; } else { psolubility[idx] = 0; pgascontain[idx] = 0; } } } //Temperature0=273.15K, Solubility0=1.0f (1) __global__ void updatesolubility(float *psolubility, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID) psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. } } //addparnums0 __global__ void GenerateGasParticle_k(float *psolubility, float *paircontain, float3 *ppos, float3 *pvel, float *pmass, char *pflag, float *pTemperature, float *pLHeat, int pnum, uint *gridstart, uint *gridend, int *addparnums, float *randfloat, int randcnts, int frame, farray gTemperature, float LiquidHeatTh, int *seedcell, int seednum, float vaporGenRate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { float gcontain = 0, gsolubility = 0, gairexist = 0; int liquidParCnt = 0, gasParCnt = 0; float airparticlemass0 = dparam.airm0; //todo float vaporsum = 0;//, vaporrate = 0.01f; float3 gaspos = make_float3(0), gasvel = make_float3(0); int i, j, k; getijk(i, j, k, idx); if (k <= 1 || isInBoundaryCell(i, j, k)) return; // float3 gpos = make_float3(i, j, k)*dparam.cellsize.x; uint start = gridstart[idx]; if (start == CELL_UNDEF) return; //1. for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; vaporsum += max(0.0f, pLHeat[p] - LiquidHeatTh) * vaporGenRate * airparticlemass0; liquidParCnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; gaspos += ppos[p]; gasvel += pvel[p]; gasParCnt++; } } bool hasseed = false; for (int i = 0; i<seednum; i++) if (seedcell[i] == idx) hasseed = true; // int addcnt = 0; int randbase = (idx*frame) % (randcnts - 200); //randpos and randfloat are in [0,1] float3 randpos = make_float3(randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts]); float randnum = randfloat[(randbase + addcnt++) % randcnts]; float r = dparam.cellsize.x * 0.25f; if (gcontain - gsolubility + vaporsum > airparticlemass0 && (hasseed || gasParCnt>0)) { int addindex = atomicAdd(&addparnums[0], 1) + pnum; pmass[addindex] = airparticlemass0;//dparam.m0; //todo: if (gasParCnt>0) { ppos[addindex] = gaspos / gasParCnt + (max(0.5f, randnum)*r) * (randpos - make_float3(0.5f)) * 2; // pvel[addindex] = make_float3(0.0f);//gasvel/gasParCnt; // } else { ppos[addindex] = gpos + dparam.cellsize.x*randpos; pvel[addindex] = make_float3(0.0f); } pflag[addindex] = TYPEAIRSOLO; pTemperature[addindex] = gTemperature[idx]; // pLHeat[addindex] = 0; //heat paircontain[addindex] = 0.0f; psolubility[addindex] = 0.0f; // for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { paircontain[p] = min(paircontain[p], psolubility[p]); pLHeat[p] = min(pLHeat[p], LiquidHeatTh); //todo: decrease the liquids mass. } } } } } //addparnums0 __global__ void updatebubblemass(float *psolubility, float *paircontain, float3 *ppos, float *pmass, char *pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum / 8) //8 { float gcontain = 0, gsolubility = 0, gairexist = 0; int fpcnt = 0, apcnt = 0; float airparticlemass0 = dparam.airm0; //todo int i, j, k; getijk(i, j, k, idx, NX / 2, NY / 2, NZ / 2); i *= 2, j *= 2, k *= 2; // float3 gpos; int gidx; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { gidx = getidx(i + di, j + dj, k + dk); // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; if (gridstart[gidx] == CELL_UNDEF) continue; //1. for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; fpcnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; apcnt++; } } } //2. float maxradius = 1.5f*dparam.cellsize.x; float maxmass = getMassfromR(maxradius); float massaddlimit = 3.0f*dparam.airm0; //3 float addmass; if (gcontain>gsolubility) { //todo: if (abs(gcontain - gsolubility) < 2.5*airparticlemass0/*1.3f*gsolubility*/) // return; //2.1: float needadd = gcontain - gsolubility; if (apcnt>0) { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { addmass = min(massaddlimit, maxmass - pmass[p]); addmass = max(0.0f, min(needadd, addmass)); needadd -= addmass; // pmass[p] += addmass; if (needadd <= 0) break; } } } } //2.3: float actualadd = gcontain - gsolubility - needadd, eachchange; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (actualadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (actualadd <= 0) break; if (pflag[p] == TYPEFLUID) { if (paircontain[p] - psolubility[p]>0) { eachchange = min(actualadd, paircontain[p] - psolubility[p]); paircontain[p] -= eachchange; actualadd -= eachchange; } } } } } //end if( gcontain>gsolubility ) else if (gairexist>0) //3: { //todo: if (abs(gcontain - gsolubility) < 3.6f*airparticlemass0/*1.3f*gsolubility*/) // return; //3.1: float needminus = gsolubility - gcontain; // float masschangesum = 0; // if (gairexist<needminus) needminus = gairexist; if (needminus>0)//minus some of them to 0 mass, use another kernel to delete it. { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needminus <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && needminus>0; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { float masschange = min(pmass[p], needminus); // pmass[p] -= masschange; needminus -= masschange; masschangesum += masschange; } } } } //3.2: . change the fluid particls. for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (masschangesum <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && masschangesum>0; p++) { if (pflag[p] == TYPEFLUID) { float containchange = min(max(0.0f, psolubility[p] - paircontain[p]), masschangesum); // paircontain[p] += containchange; masschangesum -= containchange; } } } } } } //emptyAIR //markgrid, correctpos, heattransfer. __global__ void updateEmptyBubbles(float3 *pepos, float3 *pedir, float *peradius, int penum, float3 *parpos, float3 *parvel, float *parmass, float* parTemperature, char *parflag, float *parsolubility, float *paraircontain, int parnum, int *addparnums, uint *gridstart, uint *gridend, farray gTemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<penum) { int airRscale = 2; float3 ipos = pepos[idx], idir = pedir[idx]; float iradius = peradius[idx]; float rthresholdleave = 1.0f*dparam.cellsize.x; //todo: // float rthreshold = max(0.0f, iradius + 0.1f*dparam.cellsize.x); // rthreshold = min(rthreshold, rthresholdleave); int i, j, k; getijkfrompos(i, j, k, ipos); // float massorigin = dparam.waterrho * 4 / 3 * M_PI*(pow(iradius, 3))*0.5; float masscantake = dparam.waterrho * 4 / 3 * M_PI*(pow(rthreshold, 3) - pow(iradius, 3))*0.5, massadd = 0; //todo int range = 2; for (int di = -range; di <= range &&masscantake>0; di++) for (int dj = -range; dj <= range&&masscantake>0; dj++) for (int dk = -range; dk <= range&&masscantake>0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i, j, k); for (uint p = gridstart[grididx]; p<gridend[grididx] && masscantake>0; p++) // { if (parflag[p] != TYPEFLUID) continue; float gasreslease = max(0.0f, paraircontain[p] - parsolubility[p]); if (gasreslease <= 0) continue; gasreslease = min(gasreslease, masscantake); massadd += gasreslease; masscantake -= gasreslease; //paraircontain[p] -= gasreslease; } } float newiradius = pow((massadd + massorigin) / dparam.waterrho / 4 * 3 / M_PI, 1.0 / 3); ipos += (newiradius - iradius)*idir; float ss = dparam.samplespace; if (newiradius + 1e-5 >= rthresholdleave) // { int num = ceil(newiradius / ss); for (float x = -num*ss; x <= newiradius; x += ss)for (float y = -num*ss; y <= newiradius; y += ss)for (float z = -num*ss; z <= newiradius; z += ss) { if (x*x + y*y + z*z>newiradius*newiradius) continue; int addindex = atomicAdd(&addparnums[0], 1) + parnum; parmass[addindex] = dparam.airm0; //todo: parpos[addindex] = ipos + make_float3(x, y, z); parflag[addindex] = TYPEAIR; parvel[addindex] = make_float3(0.0f); parTemperature[addindex] = gTemperature[getidx(i, j, 1)]; //todo: paraircontain[addindex] = 0.0f; parsolubility[addindex] = 0.0f; } ipos.z = 1.1f*dparam.cellsize.x; // newiradius = 0; } peradius[idx] = newiradius; pepos[idx] = ipos; } } __device__ void mat4_mul(matrix4* dst, const matrix4* m0, const matrix4* m1) { int row; int col; int i; for (row = 0; row < 4; row++) for (col = 0; col < 4; col++) for (i = 0; i < 4; i++) dst->m[row * 4 + col] += m0->m[row * 4 + i] * m1->m[i * 4 + col]; } __device__ void mat4_mulvec3_as_mat3(float3* dst, const matrix4* m, const float3* v) { float new_x; float new_y; float new_z; new_x = v->x*m->m[0 + 4 * 0] + v->y*m->m[0 + 4 * 1] + v->z*m->m[0 + 4 * 2]; new_y = v->x*m->m[1 + 4 * 0] + v->y*m->m[1 + 4 * 1] + v->z*m->m[1 + 4 * 2]; new_z = v->x*m->m[2 + 4 * 0] + v->y*m->m[2 + 4 * 1] + v->z*m->m[2 + 4 * 2]; dst->x = new_x; dst->y = new_y; dst->z = new_z; } __global__ void MeltingSolidByHeat(float *pTemperature, float *pLHeat, char *pflag, int pnum, float LiquidHeatTh, float meltTemperature, int *numchange) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]>LiquidHeatTh) { pflag[idx] = TYPEFLUID; pLHeat[idx] = LiquidHeatTh; atomicAdd(&numchange[0], 1); } } } __global__ void FreezingSolidByHeat(float3* ppos, float *pLHeat, char *pflag, int pnum, int *numchange, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPEFLUID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]<0) { //determine a new position which is appropriate for solid. // int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float mindis = 1000; int minidx = -1; int width = 1; int cntsolid = 0; float h = dparam.cellsize.x; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start == CELL_UNDEF) continue; for (int p = start; p<gridend[gidx]; p++) { if (pflag[p] == TYPESOLID) { float dis = length(ppos[p] - ipos); if (dis< h) cntsolid++; if (length(ppos[p] - ipos)<mindis) mindis = length(ppos[p] - ipos), minidx = p; } } } if (minidx != -1 && mindis<dparam.cellsize.x && cntsolid>2)// { pflag[idx] = TYPESOLID; pLHeat[idx] = 0; atomicAdd(&numchange[0], 1); if (mindis > dparam.samplespace) { ipos = normalize(ipos - ppos[minidx])*dparam.samplespace + ppos[minidx]; ppos[idx] = ipos; } } } } } //air solo particledrag forcedragparam __global__ void calDragForce(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray ux, farray uy, farray uz, float dragparamsolo, float dragparamgrid, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx], ivel = pvel[idx]; //compute the grid index int i, j, k; getijkfrompos(i, j, k, ipos); //compute drag "force" (actually not "force", is velocity change, tuning alpha is very important) float3 gridvel = getParticleVelFromGrid(ipos, ux, uy, uz); float3 gridpos = make_float3(i, j, k); float3 dragf_b = dragparamsolo * length(gridvel - ivel) * (gridvel - ivel); //grid's velocitybubble1 /* float alpha = 0.5f;*/ float3 velChange_g = -dragf_b*dragparamgrid*dparam.dt; // //update for grid float ux0, ux1, uy0, uy1, uz0, uz1; float3 weight = ipos / dparam.cellsize.x - gridpos; // in [0-1] ux0 = velChange_g.x*(1 - weight.x), ux1 = velChange_g.x*weight.x; uy0 = velChange_g.y*(1 - weight.y), uy1 = velChange_g.y*weight.y; uz0 = velChange_g.z*(1 - weight.z), uz1 = velChange_g.z*weight.z; atomicAdd(&(ux.data[getidx(i, j, k, NX + 1, NY, NZ)]), ux0); atomicAdd(&(ux.data[getidx(i + 1, j, k, NX + 1, NY, NZ)]), ux1); atomicAdd(&(uy.data[getidx(i, j, k, NX, NY + 1, NZ)]), uy0); atomicAdd(&(uy.data[getidx(i, j + 1, k, NX, NY + 1, NZ)]), uy1); atomicAdd(&(uz.data[getidx(i, j, k, NX, NY, NZ + 1)]), uz0); atomicAdd(&(uz.data[getidx(i, j, k + 1, NX, NY, NZ + 1)]), uz1); //update for particletodoInteraction if (scene == SCENE_INTERACTION || scene == SCENE_INTERACTION_HIGHRES) pvel[idx] += dragf_b*dparam.dt; } } __global__ void accumulate_GPU_k(int num, float3* out, float3* a)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float* b)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float3* b) { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k_float(int num, float* out, float* a)//dsum, a.data, flag, n { extern __shared__ float fddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; fddata[tid] = (i >= num) ? 0 : a[i]; //solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) fddata[tid] += fddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = fddata[0]; } __global__ void compute_cI_k(int pnum, char* parflag, float3 *parPos, float3 *parVel, float3* c, float3* weight, float3 rg) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID) { float dis = length(parPos[idx] - rg); if (dis>1e-6) { c[idx] = cross(parPos[idx] - rg, parVel[idx]); weight[idx] = make_float3(dis, 0, 0); } else c[idx] = weight[idx] = make_float3(0); } else { c[idx] = weight[idx] = make_float3(0); //c[idx] = make_float3(0,0,0); } } } __global__ void setVelZeroSolid_k(float3 *parvel, char *parflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) parvel[idx] = make_float3(0); } __global__ void computeVelSolid_k(float3* parPos, char* parflag, float3* parVel, int pnum, float3 rg, float3 R, float3 T) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 v_half = cross(R, parPos[idx] - rg); //` v_half += T; // v_half = 0.5*(parVel[idx] + v_half); parVel[idx] = v_half; // parVel[idx] = make_float3(0); } } __device__ inline float3 transposeParticle(float3 p, matrix3x3 rm) { float3 res; res.x = p.x*rm.x00 + p.y*rm.x10 + p.z*rm.x20; res.y = p.x*rm.x01 + p.y*rm.x11 + p.z*rm.x21; res.z = p.x*rm.x02 + p.y*rm.x12 + p.z*rm.x22; return res; } //rotation matrix "rm" __global__ void computePosSolid_k(float3* parvel, float3* parPos, char* parflag, int pnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 transp = parPos[idx] - rg0; transp = transposeParticle(transp, rm); parPos[idx] = transp + rg; //if (length(parPos[idx])<10.5) //parPos[idx] -= parvel[idx] * 0.00001; } } __global__ void computeSolidVertex_k(float3* vertexpos, int vnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<vnum) { float3 transp = vertexpos[idx] - rg0; transp = transposeParticle(transp, rm); vertexpos[idx] = transp + rg; } } __global__ void set_nonsolid_2_zero(char* pflag, int pnum, float3* Pos, float3* Vel) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] != TYPESOLID) { Pos[idx] = make_float3(0, 0, 0); Vel[idx] = make_float3(0, 0, 0); //Mass[idx] = 0.; } } //fluid, air, airsolosolidsolid __global__ void CollisionWithSolid_k(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, farray sux, farray suy, farray suz, SCENE scene, float bounceVelParam, float bouncePosParam) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 0.5f) // { float3 svel = getParticleVelFromGrid(ipos, sux, suy, suz); float3 rvel = ivel - svel; float d = dparam.cellsize.x * 0.5f; float3 phigrad; phigrad.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); phigrad.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); phigrad.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); if (length(phigrad) > 0) { phigrad = normalize(phigrad); // if (dot(rvel, phigrad)<0 || scene == SCENE_FREEZING) // { ivel -= bounceVelParam * dot(rvel, phigrad)*phigrad; // if (scene == SCENE_FREEZING) ivel -= 0.1f* (rvel - dot(rvel, phigrad)*phigrad); // } ipos += bouncePosParam * phigrad * (0.5f - iphi) * dparam.cellsize.x; } } // ipos += ivel*dparam.dt; // float rate = 0.5f, ratevel = -0.5f; if (pflag[idx] == TYPEAIRSOLO) rate = 0.8f, ratevel = -0.5f; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); // if( ipos.x>tmax.x ) // ivel.x *=ratevel, ipos.x=tmax.x; // if( ipos.x<tmin.x ) // ivel.x *= ratevel, ipos.x=tmin.x; // if( ipos.y>tmax.y ) // ivel.y *=ratevel, ipos.y=tmax.y; // if( ipos.y<tmin.y ) // ivel.y *= ratevel, ipos.y=tmin.y; // if( ipos.z>tmax.z ) // ivel.z *=ratevel, ipos.z=tmax.z; // if( ipos.z<tmin.z ) // ivel.z *= ratevel, ipos.z=tmin.z; if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; // pvel[idx] = ivel; ppos[idx] = ipos; } } //melting and freezingfluid, air, airsolosolidsolid __global__ void CollisionWithSolid_Freezing(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, uint* gridstart, uint* gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 1.0f) // { float r = 0.25f*dparam.cellsize.x; float3 collisionpos = make_float3(0), dir; float depth = 0, dis, adhesionDis = 0; int cntcollide = 0, cntadhesion = 0; float h = 4 * r; for (int di = -1; di <= 1; di++)for (int dj = -1; dj <= 1; dj++)for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i + di, j + dj, k + dk); int start = gridstart[grididx]; if (start == CELL_UNDEF) continue; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<2 * r) // { collisionpos += ppos[p]; depth = max(depth, 2 * r - dis); cntcollide++; } else if (dis< h) { adhesionDis += dis; cntadhesion++; } } } } float3 n; float d = dparam.cellsize.x * 0.5f; n.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); n.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); n.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); float3 originalvel = ivel; if (length(n) > 0) { n = normalize(n); // if (cntcollide>0) // { collisionpos /= cntcollide; if (length(n) > 0) { //correct vel and pos; ivel -= dot(originalvel, n)*n; // //ivel *= 1.1f; ipos += depth * n; } } else if (cntadhesion>0) // { float alpha = 0.1f; ivel -= n * alpha * length(ivel); } } } // // float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); if (ipos.x>tmax.x) ivel.x *= -0.5f, ipos.x = tmax.x; if (ipos.x<tmin.x) ivel.x *= -0.5f, ipos.x = tmin.x; if (ipos.y>tmax.y) ivel.y *= -0.5f, ipos.y = tmax.y; if (ipos.y<tmin.y) ivel.y *= -0.5f, ipos.y = tmin.y; if (ipos.z>tmax.z) ivel.z *= -0.5f, ipos.z = tmax.z; if (ipos.z<tmin.z) ivel.z *= -0.5f, ipos.z = tmin.z; ipos += ivel*dparam.dt; // pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void buoyancyForSolid(float3 *ppos, float3 *pvel, char *pflag, int pnum, uint *gridstart, uint *gridend, float SolidBuoyanceParam) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { int cnt = 0; int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float r = dparam.cellsize.x; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start != CELL_UNDEF) { for (uint p = start; p<gridend[gidx]; p++) if (pflag[p] == TYPEFLUID && length(ppos[p] - ipos)<r) cnt++; } } } if (cnt>2) pvel[idx].z += (dparam.waterrho - dparam.solidrho) * SolidBuoyanceParam * dparam.dt; } } __global__ void solidCollisionWithBound(float3 *ppos, float3 *pvel, char *pflag, int pnum, float SolidbounceParam, int nSolPoint) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //check position float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; //float eps=1e-6; // //() if (ipos.x<tmin.x) ivel.x += (tmin.x - ipos.x) * SolidbounceParam * nSolPoint; if (ipos.x>tmax.x) ivel.x -= (ipos.x - tmax.x) * SolidbounceParam * nSolPoint; if (ipos.y<tmin.y) ivel.y += (tmin.y - ipos.y) * SolidbounceParam * nSolPoint; if (ipos.y>tmax.y) ivel.y -= (ipos.y - tmax.y) * SolidbounceParam * nSolPoint; if (ipos.z<tmin.z) ivel.z += (tmin.z - ipos.z) * SolidbounceParam * nSolPoint; if (ipos.z>tmax.z) ivel.z -= (ipos.z - tmax.z) * SolidbounceParam * nSolPoint; pvel[idx] = ivel; //ppos[idx]=ipos; // } } //there is a problem here, remember to solve it. // __global__ void genAirFromSolid_k( float3 *ppos, float3 *pvel, char *pflag, float *psolubility, float *paircontain, float *pmass, float *pTemperature,int pnum, // charray lsmark, farray phisolid, farray Tgrid, int *addnum, float *randfloat, int nrandnum, int frame ) // { // int idx=__mul24( blockIdx.x, blockDim.x )+threadIdx.x; // if( idx<dparam.gnum &&lsmark[idx]==TYPEFLUID && phisolid[idx]>0 ) // // { // int i,j,k; // getijk( i,j,k,idx); // bool flag=false; // for( int di=-1; di<=1; di++ ) for( int dj=-1; dj<=1; dj++ ) for( int dk=-1; dk<=1; dk++ ) // { // if(verifycellidx(i+di,j+dj,k+dk) && phisolid( i+di,j+dj,k+dk)<0 ) // flag=true; // } // if( !flag ) // return; // // int cnt= (idx*frame) % ( nrandnum-100 ); // if( randfloat[cnt++]>0.95 ) //if randnum>thresold, generate a airsolo bubble // { // int addidx=atomicAdd( addnum, 1 ); // float3 addpos= (make_float3(randfloat[cnt], randfloat[cnt], randfloat[cnt]) + make_float3(i,j,k) ) * dparam.cellsize.x; // ppos[pnum+addidx] = addpos; // pvel[pnum+addidx]=make_float3(0); // pflag[pnum+addidx]=TYPEAIRSOLO; // psolubility[pnum+addidx]=0; // paircontain[pnum+addidx]=0; // pmass[pnum+addidx]=dparam.airm0; // pTemperature[pnum+addidx]=getScaleFromFrid( addpos, Tgrid ); // } // } // } //latent heat()latent heatlatent heatphase change. __global__ void updateLatentHeat_k(float *parTemperature, float *parLHeat, char *partype, int pnum, float meltingpoint, float boilingpoint, float LiquidHeatTh) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (partype[idx] == TYPESOLID && parTemperature[idx]>meltingpoint) { parLHeat[idx] += parTemperature[idx] - meltingpoint; parTemperature[idx] = meltingpoint; } if (partype[idx] == TYPEFLUID) { if (parTemperature[idx]<meltingpoint) { parLHeat[idx] -= meltingpoint - parTemperature[idx]; parTemperature[idx] = meltingpoint; } else if (parTemperature[idx]>boilingpoint) { parLHeat[idx] += parTemperature[idx] - boilingpoint; // parLHeat[idx] = min( parLHeat[idx], LiquidHeatTh+5 ); parTemperature[idx] = boilingpoint; } else parLHeat[idx] = LiquidHeatTh; } } } __global__ void pouringwater(float3* pos, float3* vel, float* parmass, char* parflag, float *ptemperature, float *pLHeat, float *pGasContain, int parnum, float3 *ppourpos, float3 *ppourvel, char pourflag, int pournum, float *randfloat, int randnum, int frame, float posrandparam, float velrandparam, float defaultLiquidT, float LiquidHeatTh) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pournum) { // int randbase = (frame + idx) % (randnum - 6); float3 randvel = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; randbase += 3; float3 randpos = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; pos[parnum + idx] = ppourpos[idx] + randpos * posrandparam*dparam.samplespace; vel[parnum + idx] = ppourvel[idx] + randvel * velrandparam; parmass[parnum + idx] = dparam.m0; parflag[parnum + idx] = pourflag; ptemperature[parnum + idx] = defaultLiquidT; pLHeat[parnum + idx] = LiquidHeatTh; pGasContain[parnum + idx] = 0; } } inline __device__ float getlen(float x, float y) { return sqrt(x*x + y*y); } __global__ void initheat_grid_k(farray tp, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float x = i, z = k; float r = NX*0.15; if (getlen(x - NX / 4, z - NZ / 4) <= r) tp[idx] = 100, mark[idx] = TYPESOLID; else if (getlen(x - NX / 4 * 3, z - NZ / 4 * 3) <= r) tp[idx] = 0, mark[idx] = TYPEFLUID; else if (z<NZ / 2) tp[idx] = 20, mark[idx] = TYPEVACUUM; else tp[idx] = 80, mark[idx] = TYPEAIR; } } __global__ void set_softparticle_position(float3* solidParPos, float3* mParPos, float3* solidParVelFLIP,float3* mParVel, char* partype) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) if (partype[idx]==TYPESOLID) { mParPos[idx] = solidParPos[idx]; mParVel[idx] = (solidParVelFLIP[idx]+mParVel[idx])/2.0; // mParVel[idx] = solidParVelFLIP[idx]; } };
810ab844b30af4997384d47a6dd1ec11d5a1ccdd.cu
#include <cuda_runtime.h> // includes cuda.h and cuda_runtime_api.h #include "spray_k.cuh" #include<helper_cuda.h> #include<helper_math.h> #include "utility.h" #include "tables.h" __constant__ FlipConstant dparam; __constant__ int NX; __constant__ int NY; __constant__ int NZ; __constant__ int NXMC; __constant__ int NYMC; __constant__ int NZMC; texture<uint, 1, cudaReadModeElementType> edgeTex; texture<uint, 1, cudaReadModeElementType> triTex; texture<uint, 1, cudaReadModeElementType> numVertsTex; __device__ float racc = 0.; __device__ float wacc = 0.; __device__ float3 pacc; __device__ float sradiusInv; void copyparamtoGPU(FlipConstant hparam) { checkCudaErrors(cudaMemcpyToSymbol(dparam, &hparam, sizeof(FlipConstant))); } void copyNXNYNZtoGPU(int nx, int ny, int nz) { checkCudaErrors(cudaMemcpyToSymbol(NX, &nx, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NY, &ny, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NZ, &nz, sizeof(int))); } void copyNXNYNZtoGPU_MC(int nx, int ny, int nz) { checkCudaErrors(cudaMemcpyToSymbol(NXMC, &nx, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NYMC, &ny, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(NZMC, &nz, sizeof(int))); } __device__ inline void getijk(int &i, int &j, int &k, int &idx) { i = idx / (NZ*NY); j = idx / NZ%NY; k = idx%NZ; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos) { pos = (pos - dparam.gmin) / dparam.cellsize; i = (pos.x >= 0 && pos.x<NX) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<NY) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<NZ) ? ((int)pos.z) : 0; } __device__ inline void getijkfrompos(int &i, int &j, int &k, float3 pos, int w, int h, int d, float dx) { pos = (pos - dparam.gmin) / dx; i = (pos.x >= 0 && pos.x<w) ? ((int)pos.x) : 0; j = (pos.y >= 0 && pos.y<h) ? ((int)pos.y) : 0; k = (pos.z >= 0 && pos.z<d) ? ((int)pos.z) : 0; } __device__ inline int getidx(int i, int j, int k) { return (i*NZ*NY + j*NZ + k); } __device__ inline int getidx(int i, int j, int k, int w, int h, int d) { return (i*h*d + j*d + k); } __device__ inline float getRfromMass(float m) { return pow(m*0.75f / M_PI / dparam.waterrho, 0.333333); } __device__ inline float getMassfromR(float r) { return dparam.waterrho*M_PI*4.0 / 3 * r*r*r; } //计算散度 __global__ void cptdivergence(farray outdiv, farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); if (mark[idx] == TYPEFLUID) div = (ux(i + 1, j, k) - ux(i, j, k) + uy(i, j + 1, k) - uy(i, j, k) + uz(i, j, k + 1) - uz(i, j, k)) / h; outdiv[idx] = div; } } __device__ inline int clampidx(int i, int j, int k) { i = max(0, min(i, NX - 1)); j = max(0, min(j, NY - 1)); k = max(0, min(k, NZ - 1)); return (i*NZ*NY + j*NZ + k); } __device__ inline float trilinear(farray u, float x, float y, float z, int w, int h, int d) { x = fmaxf(0.0f, fminf(x, w)); y = fmaxf(0.0f, fminf(y, h)); z = fmaxf(0.0f, fminf(z, d)); int i = fminf(x, w - 2); int j = fminf(y, h - 2); int k = fminf(z, d - 2); return (k + 1 - z)*((j + 1 - y)*((i + 1 - x)*u(i, j, k) + (x - i)*u(i + 1, j, k)) + (y - j)*((i + 1 - x)*u(i, j + 1, k) + (x - i)*u(i + 1, j + 1, k))) + (z - k)*((j + 1 - y)*((i + 1 - x)*u(i, j, k + 1) + (x - i)*u(i + 1, j, k + 1)) + (y - j)*((i + 1 - x)*u(i, j + 1, k + 1) + (x - i)*u(i + 1, j + 1, k + 1))); } __device__ float3 getVectorFromGrid(float3 pos, farray phigrax, farray phigray, farray phigraz) { float3 res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //注意:ux,uy,uz的存储方式比较特殊(staggered grid),三维线性插值也要比较小心 res.x = trilinear(phigrax, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.y = trilinear(phigray, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); res.z = trilinear(phigraz, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } __device__ float getScaleFromFrid(float3 pos, farray phi) { float res; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //注意:ux,uy,uz的存储方式比较特殊(staggered grid),三维线性插值也要比较小心 res = trilinear(phi, x - 0.5f, y - 0.5f, z - 0.5f, NX, NY, NZ); return res; } //Jacobi iteration: Ax=b //todo: check this function and maybe get another solver. __global__ void JacobiIter(farray outp, farray p, farray b, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float resp = 0, h = dparam.cellsize.x; float p1, p2, p3, p4, p5, p6; float p0 = p[idx]; int i, j, k; if (mark[idx] == TYPEFLUID) { getijk(i, j, k, idx); p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : p(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : p(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : p(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : p(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : p(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : p(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 - h*h*b(i, j, k)) / 6.0f; } outp[idx] = resp; } } __global__ void setPressBoundary(farray press) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0) press[idx] = press(i + 1, j, k); if (j == 0) press[idx] = press(i, j + 1, k); if (k == 0) press[idx] = press(i, j, k + 1); if (i == NX - 1) press[idx] = press(i - 1, j, k); if (j == NY - 1) press[idx] = press(i, j - 1, k); if (k == NZ - 1) press[idx] = press(i, j, k - 1); } } //压强与速度的计算 __global__ void subGradPress(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } __device__ float3 getParticleVelFromGrid(float3 pos, farray ux, farray uy, farray uz) { float3 vel; float x = pos.x, y = pos.y, z = pos.z; x /= dparam.cellsize.x; y /= dparam.cellsize.y; z /= dparam.cellsize.z; //注意:ux,uy,uz的存储方式比较特殊(staggered grid),三维线性插值也要比较小心 vel.x = trilinear(ux, x, y - 0.5f, z - 0.5f, NX + 1, NY, NZ); vel.y = trilinear(uy, x - 0.5f, y, z - 0.5f, NX, NY + 1, NZ); vel.z = trilinear(uz, x - 0.5f, y - 0.5f, z, NX, NY, NZ + 1); return vel; } __global__ void mapvelg2p_flip(float3 *ppos, float3 *vel, char* parflag, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = getParticleVelFromGrid(ipos, ux, uy, uz); vel[idx] += gvel; } } __device__ inline float sharp_kernel(float r2, float h) { return fmax(h*h / fmax(r2, 0.0001f) - 1.0f, 0.0f); } __global__ void mapvelp2g_slow(float3 *pos, float3 *vel, int pnum, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float w, weight, RE = 1.4, dis2, usum; float3 gpos; float scale = 1 / dparam.cellsize.x; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = 0; getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].x; } usum = (weight>0) ? (usum / weight) : 0.0f; ux(i, j, k) = usum; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int p = 0; p<pnum; p++) { dis2 = dot((pos[p] * scale) - gpos, (pos[p] * scale) - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].y; } usum = (weight>0) ? (usum / weight) : 0.0f; uy(i, j, k) = usum; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = 0; getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int p = 0; p<pnum; p++) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); w = sharp_kernel(dis2, RE); weight += w; usum += w*vel[p].z; } usum = (weight>0.00001) ? (usum / weight) : 0.0f; uz(i, j, k) = usum; } } __device__ inline bool verifycellidx(int i, int j, int k) { if (i<0 || i>NX - 1 || j<0 || j>NY - 1 || k<0 || k>NZ - 1) return false; return true; } __device__ inline bool verifycellidx(int i, int j, int k, int w, int h, int d) { if (i<0 || i>w - 1 || j<0 || j>h - 1 || k<0 || k>d - 1) return false; return true; } __global__ void addgravityforce_k(float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEFLUID || parflag[idx] == TYPESOLID) vel[idx] += dt*dparam.gravity; } } __global__ void addbuoyancyforce_k(float dheight, float3 *pos, float3 *vel, char* parflag, int pnum, float dt) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIR) vel[idx] -= dt*dparam.gravity * 1.1f; //todo:这里的浮力可以小一些,让气泡上的慢一些,视频快一些,水看起来就不太粘了。 else if (parflag[idx] == TYPEAIRSOLO) vel[idx] -= dt*dparam.gravity * 1.1f; else if (parflag[idx] == TYPESOLID) vel[idx] -= dt*dparam.gravity * 0.55f; // else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // 液面下固体粒子受浮力 // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void addbuoyancyforce_vel(float velMax, float3 *pos, float3 *vel, char* parflag, int pnum, float dt, float buoyanceRateAir, float buoyanceRateSolo) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { float rate = fmax(velMax - vel[idx].z, 0.0f) / velMax; if (parflag[idx] == TYPEAIR) vel[idx].z -= dt*dparam.gravity.z * rate * buoyanceRateAir; //todo:这里的浮力可以小一些,让气泡上的慢一些,视频快一些,水看起来就不太粘了。 else if (parflag[idx] == TYPEAIRSOLO) vel[idx].z -= dt*dparam.gravity.z *rate* buoyanceRateSolo; else if (parflag[idx] == TYPESOLID) vel[idx].z += dt*dparam.gravity.z * 0.1f;//0.55f; // else if(parflag[idx] == TYPESOLID && pos[idx].z <= dheight) // 液面下固体粒子受浮力 // vel[idx] -= dt*dparam.gravity * 0.2f; } } __global__ void advectparticle(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); //vel[idx] += dt*dparam.gravity; ipos += gvel*dt; if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //check boundary ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, ipos.z); if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void advectparticle_RK2(float3 *ppos, float3 *pvel, int pnum, farray ux, farray uy, farray uz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); //pos-->grid xyz float3 gvel; gvel = getParticleVelFromGrid(ipos, ux, uy, uz); if (velmode == CIP) ivel = gvel; else if (velmode == FLIP) ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint = getParticleVelFromGrid(midpoint, ux, uy, uz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back if (parflag[idx] != TYPESOLID) { pvel[idx] = ivel; ppos[idx] = ipos; } else pvel[idx] = ivel; } } __global__ void flipAirVacuum(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == TYPEVACUUM) mark[idx] = TYPEAIR; } } __global__ void markair(charray mark) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { mark[idx] = TYPEAIR; } } __global__ void markforsmoke(charray mark, farray spraydense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { /* if(spraydense[idx]>0 )*/ mark[idx] = TYPEFLUID; } } __global__ void markfluid(charray mark, float3 *pos, char *parflag, int pnum) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { int i, j, k; //todo: ???? Should spray particle count??? or should we have a more accurate mark method. // if( parflag[idx]==TYPEFLUID) { getijkfrompos(i, j, k, pos[idx]); mark(i, j, k) = TYPEFLUID; //应该是不需要原子操作的,重复写不会有问题 } } } //判断一下格子里含有的fluid particle的数量,再决定格子的属性 __global__ void markfluid_dense(charray mark, float *parmass, char *parflag, int pnum, uint *gridstart, uint *gridend, int fluidParCntPerGridThres) { uint idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int cntfluidsolid = 0, cntair = 0; uint start = gridstart[idx]; uint end = gridend[idx]; if (start != CELL_UNDEF) { for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) cntfluidsolid++; else if (parflag[p] == TYPEAIR) cntair++; } } if (cntfluidsolid == 0 && cntair == 0) mark[idx] = TYPEVACUUM; else if (cntfluidsolid>cntair) mark[idx] = TYPEFLUID; else mark[idx] = TYPEAIR; } } __global__ void markBoundaryCell(charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY; } } __global__ void setgridcolor_k(float* color, ECOLORMODE mode, farray p, farray ux, farray uy, farray uz, farray div, farray phi, charray mark, farray ls, farray tp, float sigma, float temperatureMax, float temperatureMin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float3 rescolor = make_float3(0.0); int cellindex = NY / 2; if (mode == COLOR_PRESS) { if (j != cellindex || p[idx] == 0) rescolor = make_float3(0, 0, 1); else if (p[idx]>0) rescolor = make_float3(0, 1, 0); else if (p[idx]<0) rescolor = make_float3(1, 0, 0); //rescolor = mapColorBlue2Red( 30000*abs(p[idx]) ); } else if (mode == COLOR_UX) { if (j != cellindex || ux(i + 1, j, k) + ux(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(ux(i + 1, j, k) + ux(i, j, k))); } else if (mode == COLOR_UY) { if (j != cellindex || uy(i, j + 1, k) + uy(i, j, k)<0) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5*abs(uy(i, j + 1, k) + uy(i, j, k))); } else if (mode == COLOR_UZ) { if (j != cellindex/*||uz(i,j,k+1)+uz(i,j,k)<0*/) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(5 * abs(uz(i, j, k))); } else if (mode == COLOR_DIV) { if (j != cellindex || div[idx] == 0) rescolor = make_float3(0, 0, 1); else if (div[idx]>0) rescolor = make_float3(0, 1, 0); else if (div[idx]<0) rescolor = make_float3(1, 1, 0); } else if (mode == COLOR_PHI) { if (phi[idx]>3 * NX - 1 || j != cellindex) rescolor = make_float3(0, 0, 1); else rescolor = mapColorBlue2Red(0.5f + phi[idx]); } else if (mode == COLOR_MARK) { if (j != cellindex) rescolor = make_float3(0, 0, 1); else { if (mark[idx] == TYPEAIR) rescolor = make_float3(0, 1, 0); else if (mark[idx] == TYPEFLUID) rescolor = make_float3(1, 0, 0); else if (mark[idx] == TYPEVACUUM) rescolor = make_float3(1, 1, 0); else if (mark[idx] == TYPEBOUNDARY) rescolor = make_float3(0, 1, 1); else rescolor = make_float3(0, 0, 1); //rescolor = mapColorBlue2Red( (int)(mark[idx])+1.0f ) ; } } else if (mode == COLOR_LS) { if (j == cellindex && ls[idx]>0) rescolor = mapColorBlue2Red(abs(ls[idx] / dparam.cellsize.x)); else rescolor = make_float3(0, 0, 1); } else if (mode == COLOR_TP) { if (j != cellindex || i == 0 || i == NX - 1 || k == 0 || k == NZ - 1) rescolor = make_float3(0, 0, 1); else // rescolor = mapColorBlue2Red( abs(tp[idx]*dparam.cellsize.x*5/sigma) ); //rescolor = mapColorBlue2Red( abs(tp[idx]-353)/5.0f ); rescolor = mapColorBlue2Red((tp[idx] - temperatureMin) / (temperatureMax - temperatureMin)*6.0f); } color[idx * 3] = rescolor.x; color[idx * 3 + 1] = rescolor.y; color[idx * 3 + 2] = rescolor.z; } } __host__ __device__ inline float3 mapColorBlue2Red(float v) { float3 color; if (v<0) return make_float3(0.0f, 0.0f, 1.0f); int ic = (int)v; float f = v - ic; switch (ic) { case 0: { color.x = 0; color.y = f / 2; color.z = 1; } break; case 1: { color.x = 0; color.y = f / 2 + 0.5f; color.z = 1; } break; case 2: { color.x = f / 2; color.y = 1; color.z = 1 - f / 2; } break; case 3: { color.x = f / 2 + 0.5f; color.y = 1; color.z = 0.5f - f / 2; } break; case 4: { color.x = 1; color.y = 1.0f - f / 2; color.z = 0; } break; case 5: { color.x = 1; color.y = 0.5f - f / 2; color.z = 0; } break; default: { color.x = 1; color.y = 0; color.z = 0; } break; } return color; } __global__ void initphi(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) phi[idx] = -0.5; else phi[idx] = NX * 3; } } __global__ void initSolidPhi(farray phi, uint *gridstart, uint *gridend, char *pflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { bool flag = false; uint start = gridstart[idx]; if (start != CELL_UNDEF) { for (; start<gridend[idx]; start++) { if (pflag[start] == TYPESOLID) flag = true; } } if (flag) phi[idx] = -0.5f; else phi[idx] = 3 * NX; } } __device__ void solvedistance(float a, float b, float c, float &x) { float d = fmin(a, fmin(b, c)) + 1; if (d>fmax(a, fmax(b, c))) { d = (a + b + c + sqrt(3 - (a - b)*(a - b) - (a - c)*(a - c) - (b - c)*(b - c))) / 3; } if (d<x) x = d; } __global__ void sweepphi(farray phi) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepphibytype(farray phi, charray mark, char typeflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] == typeflag) return; int i, j, k; getijk(i, j, k, idx); float resphi = phi[idx]; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (verifycellidx(i + di, j, k) && verifycellidx(i, j + dj, k) && verifycellidx(i, j, k + dk)) solvedistance(phi(i + di, j, k), phi(i, j + dj, k), phi(i, j, k + dk), resphi); } phi[idx] = resphi; } } __global__ void sweepu(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray phi, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; //三个方向上的权重 if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ((mark(i, j, k) == TYPEAIR && mark(i - 1, j, k) == TYPEAIR) || (mark(i, j, k) == TYPEBOUNDARY && mark(i - 1, j, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ - 1) continue; wx = -di*(phi(i, j, k) - phi(i - 1, j, k)); if (wx<0) continue; wy = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j + dj, k) - phi(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (phi(i, j, k) + phi(i - 1, j, k) - phi(i, j, k + dk) - phi(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ((mark(i, j, k) == TYPEAIR && mark(i, j - 1, k) == TYPEAIR) || (mark(i, j, k) == TYPEBOUNDARY && mark(i, j - 1, k) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(phi(i, j, k) - phi(i, j - 1, k)); if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j - 1, k) - phi(i + di, j, k) - phi(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (phi(i, j, k) + phi(i, j - 1, k) - phi(i, j, k + dk) - phi(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ((mark(i, j, k) == TYPEAIR && mark(i, j, k - 1) == TYPEAIR) || (mark(i, j, k) == TYPEBOUNDARY && mark(i, j, k - 1) == TYPEBOUNDARY)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(phi(i, j, k) - phi(i, j, k - 1)); if (wz<0) continue; wy = (phi(i, j, k) + phi(i, j, k - 1) - phi(i, j + dj, k) - phi(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (phi(i, j, k) + phi(i, j, k - 1) - phi(i + di, j, k) - phi(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } __global__ void setSmokeBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (j == 0) ux(i, j, k) = ux(i, j + 1, k); else if (j == NY - 1) ux(i, j, k) = ux(i, j - 1, k); else if (k == 0) ux(i, j, k) = ux(i, j, k + 1); else if (k == NZ - 1) ux(i, j, k) = ux(i, j, k - 1); else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (i == 0) uy(i, j, k) = uy(i + 1, j, k); else if (i == NX - 1) uy(i, j, k) = uy(i - 1, j, k); else if (k == 0) uy(i, j, k) = uy(i, j, k + 1); else if (k == NZ - 1) uy(i, j, k) = uy(i, j, k - 1); else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 2) uz(i, j, k) = 0.0f; else if (i == 0) uz(i, j, k) = uz(i + 1, j, k); else if (i == NX - 1) uz(i, j, k) = uz(i - 1, j, k); else if (j == 0) uz(i, j, k) = uz(i, j + 1, k); else if (j == NY - 1) uz(i, j, k) = uz(i, j - 1, k); else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void setWaterBoundaryU_k(farray ux, farray uy, farray uz, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; if (idx < dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); { if (i <= 1 || i >= ux.xn - 2) ux(i, j, k) = 0.0f; else if (i>1 && i<NX - 1 && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i - 1, j, k) == TYPEBOUNDARY))) ux(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); { if (j <= 1 || j >= uy.yn - 2) uy(i, j, k) = 0.0f; else if (j>0 && j<NY && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j - 1, k) == TYPEBOUNDARY))) uy(i, j, k) = 0.0f; } } if (idx < dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); { if (k <= 1 || k >= uz.zn - 1) //特殊处理ceiling uz(i, j, k) = 0.0f; else if (k == uz.zn - 2) //ceiling. uz(i, j, k) = (uz(i, j, k - 1)<0) ? (uz(i, j, k - 1)) : 0; else if (k>0 && k<NZ && ((mark(i, j, k) == TYPEBOUNDARY) != (mark(i, j, k - 1) == TYPEBOUNDARY))) uz(i, j, k) = 0.0f; } } } __global__ void computeDeltaU(farray ux, farray uy, farray uz, farray uxold, farray uyold, farray uzold) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) uxold[idx] = ux[idx] - uxold[idx]; if (idx < dparam.gvnum.y) uyold[idx] = uy[idx] - uyold[idx]; if (idx < dparam.gvnum.z) uzold[idx] = uz[idx] - uzold[idx]; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p); int gridindex = getidx(i, j, k); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // From CUDA SDK: calculate grid hash value for each particle __global__ void calcHashD_MC(uint* gridParticleHash, // output uint* gridParticleIndex, // output float3* pos, // input: positions uint numParticles) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 p = pos[index]; // get address in grid int i, j, k; getijkfrompos(i, j, k, p, NXMC, NYMC, NZMC, dparam.cellsize.x / NXMC*NX); int gridindex = getidx(i, j, k, NXMC, NYMC, NZMC); // store grid hash and particle index gridParticleHash[index] = gridindex; gridParticleIndex[index] = index; } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStartD(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index float3* sortedPos, // output: sorted positions float3* sortedVel, // output: sorted velocities char* sortedflag, float* sortedmass, float* sortedTemperature, float* sortedheat, float* sortedsolubility, float* sortedgascontain, uint * gridParticleHash, // input: sorted grid hashes uint * gridParticleIndex,// input: sorted particle indices float3* oldPos, // input: sorted position array float3* oldVel, // input: sorted velocity array char* oldflag, float* oldmass, float* oldtemperature, float* oldheat, float* oldsolubility, float* oldgascontain, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1]; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleIndex[index]; float3 pos = oldPos[sortedIndex]; // macro does either global read or texture fetch float3 vel = oldVel[sortedIndex]; // see particles_kernel.cuh sortedPos[index] = pos; sortedVel[index] = vel; sortedflag[index] = oldflag[sortedIndex]; sortedmass[index] = oldmass[sortedIndex]; sortedTemperature[index] = oldtemperature[sortedIndex]; sortedheat[index] = oldheat[sortedIndex]; sortedsolubility[index] = oldsolubility[sortedIndex]; sortedgascontain[index] = oldgascontain[sortedIndex]; } } __global__ void advectux(farray outux, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); float3 pos = make_float3(i, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX || j == NY - 1 || k == NZ - 1) outux[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = ux[idx]; vel.y = (uy(i - 1, j, k) + uy(i - 1, j + 1, k) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = (uz(i - 1, j, k) + uz(i - 1, j, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(ux, oldpos.x, oldpos.y - 0.5f, oldpos.z - 0.5f, ux.xn, ux.yn, ux.zn); outux[idx] = oldu * velocitydissipation; } } } __global__ void advectuy(farray outuy, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.y) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uy.xn, uy.yn, uy.zn); float3 pos = make_float3(i + 0.5, j, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY || k == NZ - 1) outuy[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j - 1, k) + ux(i + 1, j - 1, k) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = uy[idx]; vel.z = (uz(i, j - 1, k) + uz(i, j - 1, k + 1) + uz(i, j, k) + uz(i, j, k + 1))*0.25f; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uy, oldpos.x - 0.5f, oldpos.y, oldpos.z - 0.5f, uy.xn, uy.yn, uy.zn); outuy[idx] = oldu * velocitydissipation; } } } __global__ void advectuz(farray outuz, farray ux, farray uy, farray uz, float velocitydissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { //get pos of ux point int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); float3 pos = make_float3(i + 0.5, j + 0.5, k); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ) outuz[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k - 1) + ux(i + 1, j, k - 1) + ux(i, j, k) + ux(i + 1, j, k))*0.25f; vel.y = (uy(i, j, k - 1) + uy(i, j + 1, k - 1) + uy(i, j, k) + uy(i, j + 1, k))*0.25f; vel.z = uz[idx]; //wind vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float oldu = trilinear(uz, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z, uz.xn, uz.yn, uz.zn); //float oldu = -dparam.dt*3.8f; outuz[idx] = oldu * velocitydissipation; } } } __global__ void advectscaler(farray outscalar, farray scalar, farray ux, farray uy, farray uz, float densedissipation, float3 wind) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { //get pos of ux point int i, j, k; getijk(i, j, k, idx); float3 pos = make_float3(i + 0.5, j + 0.5, k + 0.5); //get rid of boundary if (i*j*k == 0 || i == NX - 1 || j == NY - 1 || k == NZ - 1) outscalar[idx] = 0; else { //get this point's vel, for tracing back. float3 vel; vel.x = (ux(i, j, k) + ux(i + 1, j, k))*0.5f; vel.y = (uy(i, j, k) + uy(i, j + 1, k))*0.5f; vel.z = (uz(i, j, k) + uz(i, j, k + 1))*0.5f; //enforce wind as an external velocity field. vel += wind; //get oldpos float3 oldpos = pos - dparam.dt*vel / dparam.cellsize.x; //notice: scale velocity by N, from 0-1 world to 0-N world. //get ux float olds = trilinear(scalar, oldpos.x - 0.5f, oldpos.y - 0.5f, oldpos.z - 0.5f, NX, NY, NZ); outscalar[idx] = olds * densedissipation; } } } __global__ void setsmokedense(farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, dense.xn, dense.yn, dense.zn); if (i>28 && i<36 && j>28 && j<36 && k<6) dense[idx] = dparam.m0*6.0f; } } __global__ void setsmokevel(farray uz, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.z) { int i, j, k; getijk(i, j, k, idx, uz.xn, uz.yn, uz.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; // if( k>1 && k<NZ-1 ) // if( dense(i,j,k-1)>0 ) // uz[idx] = 4.0f; if (k>1 && k<NZ - 1) { float alpha = 1000.0f; uz(i, j, k) += alpha * dense(i, j, k - 1); } } } __global__ void setsmokevel_nozzle(farray ux, farray dense) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gvnum.x) { int i, j, k; getijk(i, j, k, idx, ux.xn, ux.yn, ux.zn); // if( i>20 && i<40 && j>20 && j<40 && k<10 ) // uz[idx] = 4.0f; //float alpha = 10000.0f; if (i>1 && i<NX - 1) if (dense(i - 1, j, k)>0) ux[idx] = 8.0f; //uz(i,j,k) += alpha * dense(i,j,k-1); } } surface<void, cudaSurfaceType3D> surfaceWrite; __global__ void writedens2surface_k(farray dens) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); // float4 idens = make_float4( 0.0f ); // if(i>10&&i<50 &&j>10&&j<50&&k>10&&k<50 ) // idens = make_float4( 1.0f ); float4 idens = make_float4(dens[idx] * 10000); surf3Dwrite(idens, surfaceWrite, i*sizeof(float4), j, k); //why *sizeof(float4)? } } void writedens2surface(cudaArray* cudaarray, int blocknum, int threadnum, farray dense) { cudaBindSurfaceToArray(surfaceWrite, cudaarray); //kernel writedens2surface_k << <blocknum, threadnum >> >(dense); } __device__ float smooth_kernel(float r2, float h) { return fmax(1.0f - r2 / (h*h), 0.0f); } __device__ float3 sumcellspring(float3 ipos, float3 *pos, float* pmass, char* parflag, uint *gridstart, uint *gridend, int gidx, float idiameter) { if (gridstart[gidx] == CELL_UNDEF) return make_float3(0.0f); uint start = gridstart[gidx]; uint end = gridend[gidx]; float dist, w; float3 spring = make_float3(0.0f); float r = 0; for (uint p = start; p<end; ++p) { //if( parflag[p]!=TYPESOLID ) //solid粒子也应该对别的粒子产生作用才对 { dist = length(pos[p] - ipos); r = idiameter;//+getRfromMass( pmass[p] ); w = pmass[p] * smooth_kernel(dist*dist, r); if (dist>0.1f*idiameter) //太近会产生非常大的弹力 spring += w*(ipos - pos[p]) / dist; } } return spring; } __global__ void correctparticlepos(float3* outpos, float3* ppos, float *pmass, char* parflag, int pnum, uint* gridstart, uint *gridend, float correctionspring, float correctionradius, float3 *pepos, float *peradius, int penum) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID/* || parflag[idx]==TYPEAIR*/ || parflag[idx] == TYPEAIRSOLO) { outpos[idx] = ppos[idx]; return; } float3 ipos = ppos[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float3 spring = make_float3(0.0f); float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.samplespace)); float re = correctionradius*dparam.cellsize.x; // float re= getRfromMass( pmass[idx] ); int lv = 1; // float idiameter = 2*pow(0.75*pmass[idx]/dparam.waterrho/M_PI, 1.0/3); //注意,应该比实际的半径大,相当于SPH中的核函数半径 for (int di = -lv; di <= lv; di++) for (int dj = -lv; dj <= lv; dj++) for (int dk = -lv; dk <= lv; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { spring += sumcellspring(ipos, ppos, pmass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk), re); } } // //增加empty气泡的作用,遍历所有的empty粒子 // float w, dist; // for( int p=0; p<penum; p++ ) // { // if( peradius[p]>0.5f*dparam.cellsize.x ) //太小不处理 // { // dist=length(pepos[p]-ipos); // w = pmass[idx]*smooth_kernel(dist*dist, peradius[p]); //质量用被弹开粒子的质量 // if( dist>0.1f*peradius[p] ) //太近会产生非常大的弹力 // spring += w*(ipos-pepos[p]) / dist; // } // } spring *= correctionspring*re; if (length(dparam.dt*spring)>0.3f*dparam.cellsize.x) ipos += dparam.cellsize.x * 0.3f * spring / length(spring); else ipos += dparam.dt*spring; ipos.x = fmax(tmin.x, fmin(tmax.x, ipos.x)); ipos.y = fmax(tmin.y, fmin(tmax.y, ipos.y)); ipos.z = fmax(tmin.z, fmin(tmax.z, ipos.z)); outpos[idx] = ipos; } } __device__ void sumcelldens(float &phi, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis = length(pos[p] - gpos); if (phi>dis) phi = dis; } } } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012][TVCG]Preserving Fluid Sheets with Adaptively Sampled Anisotropic Particles __global__ void genWaterDensfield(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NX + 1)*(NY + 1)*(NZ + 1)) { float h = dparam.cellsize.x; float phi = 8 * fMCDensity*h; //from flip3d_vs //get position int i, j, k; getijk(i, j, k, idx, NX + 1, NY + 1, NZ + 1); float3 p = make_float3(i, j, k)*h; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk)) { sumcelldens(phi, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); } } phi = fMCDensity*h - phi; if (i*j*k == 0 || i == NX || j == NY || k == NZ) phi = fmin(phi, -0.1f); outdens[idx] = phi; } } __device__ float3 sumcelldens2(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【CGF】Parallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield2(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 1.0f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens2(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, MCParType); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_Gas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, SCENE scene) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || (parflag[p] == TYPEAIRSOLO && scene != SCENE_INTERACTION)) { dis = length(pos[p] - gpos); if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【CGF】Parallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_Gas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.8f*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_Gas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, scene); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens_liquidAndGas(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float R, float sradiusInv, float radius, float racc,float wacc, float3 pacc) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; //float r = R / 2.; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO || parflag[p] == TYPEFLUID) { dis = length(pos[p] - gpos); // { // float s = dot(pos[p] - gpos, pos[p] - gpos)*sradiusInv;//mantaflow // w = max(0., (1. - s)); // wacc += w; // racc += radius * w; // pacc += pos[p] * w; // // } if (dis<R) { w = R*R - dis*dis; w = w*w*w; res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【CGF】Parallel Surface Reconstruction for Particle-Based Fluids __global__ void genWaterDensfield_liquidAndGas(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. //float r = 2.5f*sqrt(3.)*1.01*0.5*h; //mantaFlow flip03_gen float r = 0.55*h; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); //mantaflow 里面的算法 //float racc, wacc; //float3 pacc = make_float3(0.); // float phiv = r; // sradiusInv = 1. / (4. *r * r); // int radius = int(1. * r) + 1; // float3 gridPos = make_float3(i + 0.5, j + 0.5, k + 0.5)* h; float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; int rate = 2; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens_liquidAndGas(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h*rate, sradiusInv, r,racc,wacc,pacc); // printf("%f !!!!", pacc.x); ///////////////////////// // racc /= wacc; // pacc /= wacc; // phiv = fabs(length(gridPos-pacc)); } } if (wsum>0) { center /= wsum; phi = r - length(p - center); } else phi = -r; //todo: this may change corresponding to grid resolution. // phi = phiv; //mantaflow if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -1000.0f; //phi = fmin( phi, -10.0f); outdens[idx] = phi; } } __device__ float3 sumcelldens3(float& wsum, float3 gpos, float3 *pos, char *parflag, uint *gridstart, uint *gridend, int gidx, float h, char MCParType) { float3 res = make_float3(0.0f); if (gridstart[gidx] == CELL_UNDEF) return res; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis, w; for (uint p = start; p<end; ++p) { if (parflag[p] == MCParType) { //GY:参照论文【CFG2012】Parallel Surface Reconstruction for Particle-Based Fluids // [2007CAVW]A Unified Particle Model for Fluid-Solid Interactions // 【2012 VRIPHYS】An Efficient Surface Reconstruction Pipeline for Particle-Based Fluids dis = length(pos[p] - gpos); //v-xi if (dis<h) { // w = h*h -dis*dis; //之前的代码 // w = w*w*w; // res += pos[p] * w; // wsum += w; w = dis / (4 * h); // |v-xi|/R 见[2007 CAVW]下同 R=2h=4r w = 1 - w*w; // 1-s~2 w = max(w*w*w, 0.0); // k(s) res += pos[p] * w; wsum += w; } } } return res; } //得到网格上每一个结点的密度值,为MC算法做准备 //[2012]【VRIPHYS】An Efficient Surface Reconstruction Pipeline for Particle-Based Fluids __global__ void genWaterDensfield_GY(farray outdens, float3 *pos, char *parflag, uint *gridstart, uint *gridend, float fMCDensity, char MCParType, float3 centertmp) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { float phi; float h = dparam.cellsize.x / (NXMC / NX); //todo: this is not quite right, r should be 0.5*samplespace, i.e. 0.25f/gn. float r = 0.75f*h; float thigh = 0.51; float tlow = 0.49; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); float3 p = make_float3(i, j, k)* h; //网格的位置 float3 center = make_float3(0.0f); float wsum = 0.0f; for (int di = -2; di <= 1; ++di) for (int dj = -2; dj <= 1; ++dj) for (int dk = -2; dk <= 1; ++dk) { if (verifycellidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC)) { center += sumcelldens3(wsum, p, pos, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk, NXMC, NYMC, NZMC), h, MCParType); } } if (wsum>0) { center /= wsum; //~v float3 delta = center - centertmp; float Ev = max(delta.x, max(delta.y, delta.z)) / (4 * h); // // float Ev = 3.8; centertmp = center; // centertmp:存储的是上一次的center 求Ev的delta用 float gamma = (thigh - Ev) / (thigh - tlow); float f = (Ev<tlow) ? 1 : gamma*gamma*gamma - 3 * gamma*gamma + 3 * gamma; // phi = r - length( p - center ); phi = (length(p - center) - r*f); } else phi = -r; //todo: this may change corresponding to grid resolution. if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = fmin(phi, -10.0f); outdens[idx] = phi; } } __global__ void markSolid_sphere(float3 spherepos, float sphereradius, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if ((i>NX/2-2) &&i<2.5*NX/3 && j>3.5*NY/9 && j< 6*NY/9 && k<NZ/5) mark[idx] = TYPEBOUNDARY; } } __global__ void markSolid_waterfall(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_waterfall_liquid(int3 minpos, int3 maxpos, charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { int x, y, z; getijk(x, y, z, idx); if (x <= maxpos.x && (y >= maxpos.y || y <= minpos.y) && z <= maxpos.z*0.7f) mark[idx] = TYPEBOUNDARY; else if (x <= maxpos.x && (y>minpos.y || y<maxpos.y) && z <= minpos.z*0.7f) mark[idx] = TYPEBOUNDARY; } } //a trick part. __global__ void markSolid_terrain(charray mark, charray mark_terrain) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) { if (mark_terrain[idx] == TYPEBOUNDARY) mark[idx] = TYPEBOUNDARY; } } //得到网格上每一个结点的密度值,为MC算法做准备 __global__ void genSphereDensfield(farray outdens, float3 center, float radius) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < (NXMC + 1)*(NYMC + 1)*(NZMC + 1)) { //float3 center = make_float3(0.5f); float phi; //get position int i, j, k; getijk(i, j, k, idx, NXMC + 1, NYMC + 1, NZMC + 1); if (i*j*k == 0 || i == NXMC || j == NYMC || k == NZMC) phi = -0.1; else { float3 p = make_float3(i, j, k)*dparam.cellsize.x / (NXMC / NX); phi = radius - length(p - center); } outdens[idx] = phi; } } //-----MC 算法,from cuda sdk 4.2 // classify voxel based on number of vertices it will generate // one thread per voxel (cell) __global__ void classifyVoxel(uint* voxelVerts, uint *voxelOccupied, farray volume, float isoValue) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<NXMC*NYMC*NZMC) { int i, j, k; getijk(i, j, k, idx, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate flag indicating if each vertex is inside or outside isosurface uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // read number of vertices from texture uint numVerts = tex1Dfetch(numVertsTex, cubeindex); voxelVerts[idx] = numVerts; voxelOccupied[idx] = (numVerts > 0); }//endif } // compact voxel array __global__ void compactVoxels(uint *compactedVoxelArray, uint *voxelOccupied, uint *voxelOccupiedScan, uint numVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint i = __mul24(blockId, blockDim.x) + threadIdx.x; if (voxelOccupied[i] && (i < numVoxels)) { compactedVoxelArray[voxelOccupiedScan[i]] = i; } } // compute interpolated vertex along an edge __device__ float3 vertexInterp(float isolevel, float3 p0, float3 p1, float f0, float f1) { float t = (isolevel - f0) / (f1 - f0); return lerp(p0, p1, t); } // calculate triangle normal __device__ float3 calcNormal(float3 *v0, float3 *v1, float3 *v2) { float3 edge0 = *v1 - *v0; float3 edge1 = *v2 - *v0; // note - it's faster to perform normalization in vertex shader rather than here return cross(edge0, edge1); } __device__ int GetVertexID(int i, int j, int k) { return 3 * (i*(NZMC + 1)*(NYMC + 1) + j*(NZMC + 1) + k); } __device__ int GetEdgeID(int nX, int nY, int nZ, int edge) { // return GetVertexID( nX,nY,nZ ); switch (edge) { case 0: return GetVertexID(nX, nY, nZ) + 1; case 1: return GetVertexID(nX + 1, nY, nZ); case 2: return GetVertexID(nX, nY + 1, nZ) + 1; case 3: return GetVertexID(nX, nY, nZ); case 4: return GetVertexID(nX, nY, nZ + 1) + 1; case 5: return GetVertexID(nX + 1, nY, nZ + 1); case 6: return GetVertexID(nX, nY + 1, nZ + 1) + 1; case 7: return GetVertexID(nX, nY, nZ + 1); case 8: return GetVertexID(nX, nY, nZ) + 2; case 9: return GetVertexID(nX + 1, nY, nZ) + 2; case 10: return GetVertexID(nX + 1, nY + 1, nZ) + 2; case 11: return GetVertexID(nX, nY + 1, nZ) + 2; default: // Invalid edge no. return -1; } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles2(float3 *pos, float3 *norm, uint *compactedVoxelArray, uint *numVertsScanned, farray volume, float isoValue, uint activeVoxels, uint maxVerts) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; float3 *v[3]; uint edge; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); v[0] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); v[1] = &vertlist[(edge*NTHREADS) + threadIdx.x]; edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); v[2] = &vertlist[(edge*NTHREADS) + threadIdx.x]; // calculate triangle surface normal float3 n = calcNormal(v[0], v[1], v[2]); /*if (index < (maxVerts - 3)) */{ pos[index] = *v[0]; norm[index] = n; pos[index + 1] = *v[1]; norm[index + 1] = n; pos[index + 2] = *v[2]; norm[index + 2] = n; } } } // version that calculates flat surface normal for each triangle __global__ void generateTriangles_indices(float3 *pTriVertex, uint *pTriIndices, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels, uint maxVerts, uint *MCEdgeIdxMapped, uint *numVertsScanned) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; float3 voxelSize = dparam.cellsize / (NXMC / NX); // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float3 p; p.x = i*voxelSize.x; p.y = j*voxelSize.y; p.z = k*voxelSize.z; float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // calculate cell vertex positions float3 v[8]; v[0] = p; v[1] = p + make_float3(voxelSize.x, 0, 0); v[2] = p + make_float3(voxelSize.x, voxelSize.y, 0); v[3] = p + make_float3(0, voxelSize.y, 0); v[4] = p + make_float3(0, 0, voxelSize.z); v[5] = p + make_float3(voxelSize.x, 0, voxelSize.z); v[6] = p + make_float3(voxelSize.x, voxelSize.y, voxelSize.z); v[7] = p + make_float3(0, voxelSize.y, voxelSize.z); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // find the vertices where the surface intersects the cube // use shared memory to avoid using local __shared__ float3 vertlist[12 * NTHREADS]; vertlist[threadIdx.x] = vertexInterp(isoValue, v[0], v[1], field[0], field[1]); vertlist[NTHREADS + threadIdx.x] = vertexInterp(isoValue, v[1], v[2], field[1], field[2]); vertlist[(NTHREADS * 2) + threadIdx.x] = vertexInterp(isoValue, v[2], v[3], field[2], field[3]); vertlist[(NTHREADS * 3) + threadIdx.x] = vertexInterp(isoValue, v[3], v[0], field[3], field[0]); vertlist[(NTHREADS * 4) + threadIdx.x] = vertexInterp(isoValue, v[4], v[5], field[4], field[5]); vertlist[(NTHREADS * 5) + threadIdx.x] = vertexInterp(isoValue, v[5], v[6], field[5], field[6]); vertlist[(NTHREADS * 6) + threadIdx.x] = vertexInterp(isoValue, v[6], v[7], field[6], field[7]); vertlist[(NTHREADS * 7) + threadIdx.x] = vertexInterp(isoValue, v[7], v[4], field[7], field[4]); vertlist[(NTHREADS * 8) + threadIdx.x] = vertexInterp(isoValue, v[0], v[4], field[0], field[4]); vertlist[(NTHREADS * 9) + threadIdx.x] = vertexInterp(isoValue, v[1], v[5], field[1], field[5]); vertlist[(NTHREADS * 10) + threadIdx.x] = vertexInterp(isoValue, v[2], v[6], field[2], field[6]); vertlist[(NTHREADS * 11) + threadIdx.x] = vertexInterp(isoValue, v[3], v[7], field[3], field[7]); __syncthreads(); // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge, mappededgeidx; for (int idx2 = 0; idx2<numVerts; idx2 += 3) { uint index = numVertsScanned[voxel] + idx2; //vertex index to write back, sort by each triangle. //写入triangle包含的三个顶点的索引,索引是未经过处理的,即边的全局编号,之后单独处理 edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 1); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 1] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); edge = tex1Dfetch(triTex, (cubeindex * 16) + idx2 + 2); mappededgeidx = MCEdgeIdxMapped[GetEdgeID(i, j, k, edge)]; pTriIndices[index + 2] = mappededgeidx; //notice: indices begin from 0. pTriVertex[mappededgeidx] = (vertlist[(edge*NTHREADS) + threadIdx.x]); } } __global__ void markActiveEdge_MC(uint *outmark, uint *compactedVoxelArray, farray volume, float isoValue, uint activeVoxels) { uint blockId = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; uint idx = __mul24(blockId, blockDim.x) + threadIdx.x; if (idx > activeVoxels - 1) { idx = activeVoxels - 1; } int voxel = compactedVoxelArray[idx]; // compute position in 3d grid int i, j, k; getijk(i, j, k, voxel, NXMC, NYMC, NZMC); float field[8]; field[0] = volume(i, j, k); field[1] = volume(i + 1, j, k); field[2] = volume(i + 1, j + 1, k); field[3] = volume(i, j + 1, k); field[4] = volume(i, j, k + 1); field[5] = volume(i + 1, j, k + 1); field[6] = volume(i + 1, j + 1, k + 1); field[7] = volume(i, j + 1, k + 1); // recalculate flag uint cubeindex; cubeindex = uint(field[0] < isoValue); cubeindex += uint(field[1] < isoValue) * 2; cubeindex += uint(field[2] < isoValue) * 4; cubeindex += uint(field[3] < isoValue) * 8; cubeindex += uint(field[4] < isoValue) * 16; cubeindex += uint(field[5] < isoValue) * 32; cubeindex += uint(field[6] < isoValue) * 64; cubeindex += uint(field[7] < isoValue) * 128; // output triangle vertices uint numVerts = tex1Dfetch(numVertsTex, cubeindex); uint edge; for (int idxVert = 0; idxVert<numVerts; idxVert++) { //下面可能会重复写,但是应该没问题。注意这个函数执行前需要把outmark置0 edge = tex1Dfetch(triTex, (cubeindex * 16) + idxVert); outmark[GetEdgeID(i, j, k, edge)] = 1; } //debug // for( int edge=0; edge<12; edge++ ) // outmark[GetEdgeID(i,j,k,edge)] = 1; } //以三角形为核心来计算法线,原子写入到点的法线中。注意:法线不要归一化 __global__ void calnormal_k(float3 *ppos, float3 *pnor, int pnum, uint *indices, int indicesnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < indicesnum / 3) //face number { int i1 = indices[idx * 3 + 0]; int i2 = indices[idx * 3 + 1]; int i3 = indices[idx * 3 + 2]; float3 p1 = ppos[i1]; float3 p2 = ppos[i2]; float3 p3 = ppos[i3]; //compute float3 nor = cross(p2 - p1, p3 - p1); //write back atomicAdd(&pnor[i1].x, nor.x); atomicAdd(&pnor[i2].x, nor.x); atomicAdd(&pnor[i3].x, nor.x); atomicAdd(&pnor[i1].y, nor.y); atomicAdd(&pnor[i2].y, nor.y); atomicAdd(&pnor[i3].y, nor.y); atomicAdd(&pnor[i1].z, nor.z); atomicAdd(&pnor[i2].z, nor.z); atomicAdd(&pnor[i3].z, nor.z); } } //归一化顶点法线 __global__ void normalizeTriangleNor_k(float3 *pnor, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < pnum) //vertex number { if (length(pnor[idx])>0) pnor[idx] = normalize(pnor[idx]); } } void allocateTextures(uint **d_edgeTable, uint **d_triTable, uint **d_numVertsTable) { checkCudaErrors(cudaMalloc((void**)d_edgeTable, 256 * sizeof(uint))); checkCudaErrors(cudaMemcpy((void *)*d_edgeTable, (void *)edgeTable, 256 * sizeof(uint), cudaMemcpyHostToDevice)); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindUnsigned); checkCudaErrors(cudaBindTexture(0, edgeTex, *d_edgeTable, channelDesc)); checkCudaErrors(cudaMalloc((void**)d_triTable, 256 * 16 * sizeof(uint))); checkCudaErrors(cudaMemcpy((void *)*d_triTable, (void *)triTable, 256 * 16 * sizeof(uint), cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, triTex, *d_triTable, channelDesc)); checkCudaErrors(cudaMalloc((void**)d_numVertsTable, 256 * sizeof(uint))); checkCudaErrors(cudaMemcpy((void *)*d_numVertsTable, (void *)numVertsTable, 256 * sizeof(uint), cudaMemcpyHostToDevice)); checkCudaErrors(cudaBindTexture(0, numVertsTex, *d_numVertsTable, channelDesc)); } //计算两个1*n向量的点积,输出到out里(注意用归约求和的思想,out是一个数组,需要在CPU上累加起来) __global__ void arrayproduct_k(float* out, float* x, float *y, int n) { extern __shared__ float sdata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; sdata[tid] = (i >= n) ? 0 : (x[i] * y[i]); __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = sdata[0]; } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID) //todo: should add typesolid or not. { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; //notice: x必须在AIR类型的格子里是0,下面的式子才正确 sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void buildprecondition_pcg(farray P, charray mark, farray ans, farray input, int n) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<n) { ans[idx] = 1.0f / 6 * input[idx]; } } __global__ void copyParticle2GL_vel_k(float3* ppos, float3 *pvel, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; if (pflag[idx] == TYPEFLUID) { rendercolor[idx * 3] = 1.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 0.0f; } else if (pflag[idx] == TYPEAIR) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 0.0f; rendercolor[idx * 3 + 2] = 1.0f; } else if (pflag[idx] == TYPESOLID) { rendercolor[idx * 3] = 0.0f; rendercolor[idx * 3 + 1] = 1.0f; rendercolor[idx * 3 + 2] = 0.0f; } } } __global__ void copyParticle2GL_radius_k(float3* ppos, float *pmass, char *pflag, int pnum, float *renderpos, float *rendercolor, float minmass) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; minmass *= 1.2f; //trick float rate = (pmass[idx] - minmass*dparam.m0) / (dparam.m0 - minmass*dparam.m0); rate = fmax(0.0f, fmin(1.0f, rate)); { float3 color = mapColorBlue2Red(powf(rate, 1.0f / 3)*6.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } } __device__ inline void atomicaddfloat3(float3 *a, int idx, float3 b) { atomicAdd(&a[idx].x, b.x); atomicAdd(&a[idx].y, b.y); atomicAdd(&a[idx].z, b.z); } __global__ void smooth_computedisplacement(float3 *displacement, int *weight, float3 *ppos, uint *indices, int trianglenum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<trianglenum) { uint p1 = indices[idx * 3]; uint p2 = indices[idx * 3 + 1]; uint p3 = indices[idx * 3 + 2]; atomicaddfloat3(displacement, p1, ppos[p2] - ppos[p1]); atomicaddfloat3(displacement, p1, ppos[p3] - ppos[p1]); atomicaddfloat3(displacement, p2, ppos[p1] - ppos[p2]); atomicaddfloat3(displacement, p2, ppos[p3] - ppos[p2]); atomicaddfloat3(displacement, p3, ppos[p1] - ppos[p3]); atomicaddfloat3(displacement, p3, ppos[p2] - ppos[p3]); atomicAdd(&weight[p1], 2); atomicAdd(&weight[p2], 2); atomicAdd(&weight[p3], 2); } } __global__ void smooth_addDisplacement(float3 *displacement, int *weight, float3 *ppos, int vertexnum, float param) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<vertexnum) { if (weight[idx]>0) ppos[idx] += param * displacement[idx] / weight[idx]; displacement[idx] = make_float3(0.0f); weight[idx] = 0; } } //diffuse density field. __global__ void diffuse_dense(farray outp, farray inp, charray mark, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outp.xn * outp.yn * outp.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inp[idx]; int i, j, k; getijk(i, j, k, idx, outp.xn, outp.yn, outp.zn); if (mark(i, j, k) == TYPEBOUNDARY) outp[idx] = 0.0f; else { p1 = (mark(i + 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i + 1, j, k); p2 = (mark(i, j + 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j + 1, k); p3 = (mark(i, j, k + 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k + 1); p4 = (mark(i - 1, j, k) == TYPEBOUNDARY) ? p0 : inp(i - 1, j, k); p5 = (mark(i, j - 1, k) == TYPEBOUNDARY) ? p0 : inp(i, j - 1, k); p6 = (mark(i, j, k - 1) == TYPEBOUNDARY) ? p0 : inp(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outp[idx] = resp; } } } //diffuse velocity field. __global__ void diffuse_velocity(farray outv, farray inv, float alpha, float beta) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < outv.xn * outv.yn * outv.zn) { float resp = 0; float p1, p2, p3, p4, p5, p6; float p0 = inv[idx]; int i, j, k; getijk(i, j, k, idx, outv.xn, outv.yn, outv.zn); if (i == 0 || j == 0 || k == 0 || i >= outv.xn - 1 || j >= outv.yn - 1 || k >= outv.zn - 1) outv[idx] = p0; else { p1 = inv(i + 1, j, k); p2 = inv(i, j + 1, k); p3 = inv(i, j, k + 1); p4 = inv(i - 1, j, k); p5 = inv(i, j - 1, k); p6 = inv(i, j, k - 1); resp = (p1 + p2 + p3 + p4 + p5 + p6 + alpha*p0) / beta; outv[idx] = resp; } } } //maxLength, hashPoints是输出:最长边(每个block里),每个三角形一个用来hash的点 __global__ void createAABB_q(float3* points, int nPoints, uint3* faces, int nFaces, float *maxLength, float3* hashPoints) { int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= nFaces) return; __shared__ float maxArray[256]; uint p1 = faces[index].x; uint p2 = faces[index].y; uint p3 = faces[index].z; //得到三角形的三个顶点 float3 px = points[p1]; float3 py = points[p2]; float3 pz = points[p3]; AABB aabb; aabb.xMin = (px.x>py.x) ? py.x : px.x; aabb.xMin = (aabb.xMin>pz.x) ? pz.x : aabb.xMin; aabb.xMax = (px.x<py.x) ? py.x : px.x; aabb.xMax = (aabb.xMax<pz.x) ? pz.x : aabb.xMax; aabb.yMin = (px.y>py.y) ? py.y : px.y; aabb.yMin = (aabb.yMin>pz.y) ? pz.y : aabb.yMin; aabb.yMax = (px.y<py.y) ? py.y : px.y; aabb.yMax = (aabb.yMax<pz.y) ? pz.y : aabb.yMax; aabb.zMin = (px.z>py.z) ? py.z : px.z; aabb.zMin = (aabb.zMin>pz.z) ? pz.z : aabb.zMin; aabb.zMax = (px.z<py.z) ? py.z : px.z; aabb.zMax = (aabb.zMax<pz.z) ? pz.z : aabb.zMax; float tempMaxLength = aabb.xMax - aabb.xMin; tempMaxLength = (tempMaxLength>aabb.yMax - aabb.yMin) ? (tempMaxLength) : (aabb.yMax - aabb.yMin); tempMaxLength = (tempMaxLength>aabb.zMax - aabb.zMin) ? (tempMaxLength) : (aabb.zMax - aabb.zMin); maxArray[threadIdx.x] = tempMaxLength; hashPoints[index] = make_float3((aabb.xMin + aabb.xMax) / 2, (aabb.yMin + aabb.yMax) / 2, (aabb.zMin + aabb.zMax) / 2); __syncthreads(); for (int i = blockDim.x / 2; i>0; i /= 2) { if (threadIdx.x < i) maxArray[threadIdx.x] = max(maxArray[threadIdx.x], maxArray[i + threadIdx.x]); __syncthreads(); } if (threadIdx.x == 0) maxLength[blockIdx.x] = maxArray[0]; } __global__ void calcHash_radix_q( uint2* gridParticleIndex, // output float3* posArray, // input: positions uint numParticles, float3 t_min, float3 t_max) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index >= numParticles) return; float3 pos = posArray[index]; uint hash; int gz = (pos.z - t_min.z) / dparam.triHashSize.z; int gy = (pos.y - t_min.y) / dparam.triHashSize.y; int gx = (pos.x - t_min.x) / dparam.triHashSize.x; if (gx < 0 || gx > dparam.triHashRes.x - 1 || gy < 0 || gy > dparam.triHashRes.y - 1 || gz < 0 || gz > dparam.triHashRes.z - 1) hash = CELL_UNDEF; else hash = __mul24(__mul24(gz, (int)dparam.triHashRes.y) + gy, (int)dparam.triHashRes.x) + gx; // store grid hash and particle index gridParticleIndex[index] = make_uint2(hash, index); } // rearrange particle data into sorted order, and find the start of each cell // in the sorted hash array __global__ void reorderDataAndFindCellStart_radix_q(uint* cellStart, // output: cell start index uint* cellEnd, // output: cell end index uint3* sortedFaces, uint2 * gridParticleHash, // input: sorted grid hashes uint3* oldFaces, uint numParticles) { extern __shared__ uint sharedHash[]; // blockSize + 1 elements uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint hash; // handle case when no. of particles not multiple of block size if (index < numParticles) { hash = gridParticleHash[index].x; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread sharedHash[threadIdx.x + 1] = hash; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash sharedHash[0] = gridParticleHash[index - 1].x; } } __syncthreads(); if (index < numParticles) { // If this particle has a different cell index to the previous // particle then it must be the first particle in the cell, // so store the index of this particle in the cell. // As it isn't the first particle, it must also be the cell end of // the previous particle's cell if (index == 0 || hash != sharedHash[threadIdx.x]) { cellStart[hash] = index; if (index > 0) cellEnd[sharedHash[threadIdx.x]] = index; } if (index == numParticles - 1) { cellEnd[hash] = index + 1; } // Now use the sorted index to reorder the pos and vel data uint sortedIndex = gridParticleHash[index].y; sortedFaces[index] = oldFaces[sortedIndex]; // see particles_kernel.cuh } } __global__ void calculateNormal(float3* points, uint3* faces, float3* normals, int num) { uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; if (index < num) { uint3 face = faces[index]; float3 v1 = points[face.x]; float3 v2 = points[face.y]; float3 v3 = points[face.z]; float3 tmp; tmp.x = (v1.y - v2.y)*(v1.z - v3.z) - (v1.z - v2.z)*(v1.y - v3.y); tmp.y = (v1.z - v2.z)*(v1.x - v3.x) - (v1.x - v2.x)*(v1.z - v3.z); tmp.z = (v1.x - v2.x)*(v1.y - v3.y) - (v1.y - v2.y)*(v1.x - v3.x); normals[index] = normalize(tmp); } } //temp_yanglp: 检测一个小球与三角形是否相交,求出对粒子作用的顶点权重,返回值为负数,表示没有相交,正数表示相交 __device__ float IntersectTriangle_q(float3& pos, float radius, float3& v0, float3& v1, float3& v2, float3 n) { //compute the distance of pos and triangle plane float d = dot(pos - v0, n); if (abs(d)>radius) return -1; float dislimit = radius*radius - d*d; //球心在三角形平面的投影 float3 pTri = pos - d*n; float3 tempcross; float d0 = dot(pTri - v0, pTri - v0); float d1 = dot(pTri - v1, pTri - v1); float d2 = dot(pTri - v2, pTri - v2); //判断是否在三角形内 int tt = (dot(cross(pTri - v0, v1 - v0), n)>0) ? 1 : 0; tt += (dot(cross(pTri - v1, v2 - v1), n)>0) ? 2 : 0; tt += (dot(cross(pTri - v2, v0 - v2), n)>0) ? 4 : 0; //cuPrintf("tt=%d\n",tt); if (tt == 7 || tt == 0) { return abs(d); } //判断投影点与三角形顶点的距离是否符合条件 float distemp; float dis = (d0<dislimit) ? (d0) : dislimit; //dis表示到目前为止投影点到三角形的最小距离 dis = (d1<dis) ? (d1) : dis; dis = (d2<dis) ? (d2) : dis; //判断投影点与三角形边的距离 if (dot(v1 - v0, pTri - v0)*dot(v0 - v1, pTri - v1)>0) { tempcross = cross(v1 - v0, pTri - v0); distemp = dot(tempcross, tempcross) / dot(v1 - v0, v1 - v0); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v2 - v1, pTri - v1)*dot(v1 - v2, pTri - v2)>0) { tempcross = cross(v2 - v1, pTri - v1); distemp = dot(tempcross, tempcross) / dot(v2 - v1, v2 - v1); dis = (distemp<dis) ? (distemp) : dis; } if (dot(v0 - v2, pTri - v2)*dot(v2 - v0, pTri - v0)>0) { tempcross = cross(v0 - v2, pTri - v2); distemp = dot(tempcross, tempcross) / dot(v0 - v2, v0 - v2); dis = (distemp<dis) ? (distemp) : dis; } if (dis > dislimit - 0.001) return -1; return sqrt(dis + d*d); } // calculate address in grid from position (clamping to edges) __device__ uint calcGridHash_q(int3 gridPos) { return __umul24(__umul24(gridPos.z, dparam.triHashRes.y), dparam.triHashRes.x) + __umul24(gridPos.y, dparam.triHashRes.x) + gridPos.x; } // collide a particle against all other particles in a given cell __device__ float3 collideCell(int3 gridPos, float3 pos, float radius, float3* surPoints, uint3* surIndex, float3* surfaceNor, uint* cellStart, uint* cellEnd, int scene) { uint gridHash = calcGridHash_q(gridPos); float dis_n, wib = 0; float3 force = make_float3(0.0f); // get start of bucket for this cell uint startIndex = cellStart[gridHash]; if (startIndex != CELL_UNDEF) { // cell is not empty // iterate over particles in this cell uint endIndex = cellEnd[gridHash]; for (uint j = startIndex; j<endIndex; j++) { //cuPrintf("j=%d\n", j); dis_n = IntersectTriangle_q(pos, radius, surPoints[surIndex[j].x], surPoints[surIndex[j].y], surPoints[surIndex[j].z], surfaceNor[j]); wib = 1 - dis_n / radius; if (dis_n >= 0 && wib > 0.00001) { force += (radius - dis_n) * (surfaceNor[j]) * 10; } } } return force; } __device__ void mindis_cell(float& mindisair, float& mindisfluid, float3 gpos, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, int gidx, float radius) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis; for (uint p = start; p<end; ++p) { dis = length(pos[p] - gpos);//减掉半径,后面的数是较正一下 // dis = fabs(length(pos[p] - gpos))- radius;// 依据mantaflow if (parflag[p] == TYPEAIR || parflag[p] == TYPEAIRSOLO)//todo: 是不是加上SOLO的类型以防止ls随着标记变化的突变? mindisair = (dis<mindisair) ? dis : mindisair; else if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) mindisfluid = (dis<mindisfluid) ? dis : mindisfluid; } } //这个level set的值很可能有问题,从画出来的图可以看出来一些,直接影响后面所有的内容。 //[2012]【长文】MultiFLIP for Energetic Two-Phase Fluid Simulation __global__ void genlevelset(farray lsfluid, farray lsair, charray mark, float3 *pos, char *parflag, float *pmass, uint *gridstart, uint *gridend, float fMCDensity, float offset) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx < dparam.gnum) //每个格子一个值 { //float ls; float h = dparam.cellsize.x; mark[idx] = TYPEVACUUM; float r = 0.5f*h; //0.36f*h; //float r = 0.5*sqrt(3.)*1.01*2.5; //修改为0.5*1.01 依据mantaflow //get position int i, j, k; getijk(i, j, k, idx, NX, NY, NZ); float3 gpos = (make_float3(i, j, k) + make_float3(0.5f, 0.5f, 0.5f))*dparam.cellsize.x; // shifted by half cell float mindisair = 2.5f*h, mindisfluid = 2.5f*h; //2.5 cellsize //float mindisair = r, mindisfluid = r; //修正 mindis- 为 r 依据mantaflow int level = 2; for (int di = -level; di <= level; ++di) for (int dj = -level; dj <= level; ++dj) for (int dk = -level; dk <= level; ++dk) //周围27个格子就行 { if (verifycellidx(i + di, j + dj, k + dk)) { mindis_cell(mindisair, mindisfluid, gpos, pos, parflag, pmass, gridstart, gridend, getidx(i + di, j + dj, k + dk), r); } } mindisair -= r; //注释掉 依据mataflow mindisfluid -= r; lsfluid[idx] = mindisfluid; // lsair[idx] = mindisair - offset*h; //todo: 这里略微向外扩张了一下气体的ls,避免气体粒子correctpos时向内收缩导到气泡体积的减小。注意:这个修正会导致markgrid的不对,因此流体mark会大一层,其流动会受很大影响 lsair[idx] = mindisair; } } __device__ void sumcell_fluidSolid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEFLUID || parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_fluidSolid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 0; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 0; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_fluidSolid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_air(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPEAIR) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_air(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_air(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } __device__ void sumcell_solid(float3 &usum, float &weight, float3 gpos, float3 *pos, float3 *vel, float *mass, char *parflag, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { if (parflag[p] == TYPESOLID) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = mass[p] * sharp_kernel(dis2, RE); weight += w; usum += w*vel[p]; } } } __global__ void mapvelp2g_k_solid(float3 *pos, float3 *vel, float *mass, char *parflag, int pnum, farray ux, farray uy, farray uz, uint* gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float weight; float3 gpos, usum; int rangemax = 2, rangemin = 1; if (idx<dparam.gvnum.x) { // ux weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX + 1, NY, NZ); gpos.x = i, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemin; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.x = (weight>0) ? (usum.x / weight) : 0.0f; ux(i, j, k) = usum.x; } if (idx<dparam.gvnum.y) { // uy weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY + 1, NZ); gpos.x = i + 0.5, gpos.y = j, gpos.z = k + 0.5; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemin; dj++) for (int dk = -rangemax; dk <= rangemax; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.y = (weight>0) ? (usum.y / weight) : 0.0f; uy(i, j, k) = usum.y; } if (idx<dparam.gvnum.z) { // uz weight = 0, usum = make_float3(0.0f); getijk(i, j, k, idx, NX, NY, NZ + 1); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k; for (int di = -rangemax; di <= rangemax; di++) for (int dj = -rangemax; dj <= rangemax; dj++) for (int dk = -rangemax; dk <= rangemin; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumcell_solid(usum, weight, gpos, pos, vel, mass, parflag, gridstart, gridend, getidx(i + di, j + dj, k + dk)); usum.z = (weight>0) ? (usum.z / weight) : 0.0f; uz(i, j, k) = usum.z; } } //计算散度 __global__ void cptdivergence_bubble(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls, farray sf) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float jx0, jx1, jy0, jy1, jz0, jz1, J; //surface tension, [2005]Discontinuous Fluids float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k), jx1 = 0; else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); jx1 = theta * sf(i, j, k) + (1 - theta) * sf(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k), jx0 = 0; else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); jx0 = theta*sf(i, j, k) + (1 - theta)*sf(i - 1, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k), jy1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); jy1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k), jy0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); jy0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j - 1, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1), jz1 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); jz1 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k), jz0 = 0; else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); jz0 = theta*sf(i, j, k) + (1 - theta)*sf(i, j, k - 1); } J = (jx1 - jx0 + jy1 - jy0 + jz1 - jz0) / h / h; div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; div += J; //surfacetension } outdiv[idx] = div; } } //计算散度,不使用压强来施加表面张力 __global__ void cptdivergence_bubble2(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * waterux(i + 1, j, k) + (1 - theta) * airux(i + 1, j, k); //ux1 = airux(i+1,j,k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i + 1, j, k) - ls(i, j, k)); ux1 = theta * airux(i + 1, j, k) + (1 - theta) * waterux(i + 1, j, k); //ux1 = airux(i+1,j,k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * waterux(i, j, k) + (1 - theta) * airux(i, j, k); //ux0 = airux(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i - 1, j, k) - ls(i, j, k)); ux0 = theta * airux(i, j, k) + (1 - theta) * waterux(i, j, k); //ux0 = airux(i,j,k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * wateruy(i, j + 1, k) + (1 - theta) * airuy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j + 1, k) - ls(i, j, k)); uy1 = theta * airuy(i, j + 1, k) + (1 - theta) * wateruy(i, j + 1, k); //uy1 = airuy(i,j+1,k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * wateruy(i, j, k) + (1 - theta) * airuy(i, j, k); // uy0 = airuy(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j - 1, k) - ls(i, j, k)); uy0 = theta * airuy(i, j, k) + (1 - theta) * wateruy(i, j, k); //uy0 = airuy(i,j,k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * wateruz(i, j, k + 1) + (1 - theta) * airuz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k + 1) - ls(i, j, k)); uz1 = theta * airuz(i, j, k + 1) + (1 - theta) * wateruz(i, j, k + 1); //uz1 = airuz(i,j,k+1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * wateruz(i, j, k) + (1 - theta) * airuz(i, j, k); //uz0 = airuz(i,j,k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { theta = (0.0f - ls(i, j, k)) / (ls(i, j, k - 1) - ls(i, j, k)); uz0 = theta * airuz(i, j, k) + (1 - theta) * wateruz(i, j, k); //uz0 = airuz(i,j,k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } __global__ void cptdivergence_bubble3(farray outdiv, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, charray mark, farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { float div = 0, h = dparam.cellsize.x; int i, j, k; getijk(i, j, k, idx); float ux0, ux1, uy0, uy1, uz0, uz1; float theta; if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { //ux1 if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) != TYPEAIR) ux1 = waterux(i + 1, j, k); else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) != TYPEFLUID) ux1 = airux(i + 1, j, k); else if (mark[idx] == TYPEFLUID && mark(i + 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * waterux(i+1,j,k) + (1-theta) * airux(i+1,j,k); ux1 = airux(i + 1, j, k); } else if (mark[idx] == TYPEAIR && mark(i + 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i+1,j,k)-ls(i,j,k)); // ux1 = theta * airux(i+1,j,k) + (1-theta) * waterux(i+1,j,k); ux1 = airux(i + 1, j, k); } //ux0 if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) != TYPEAIR) ux0 = waterux(i, j, k); else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) != TYPEFLUID) ux0 = airux(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * waterux(i,j,k) + (1-theta) * airux(i,j,k); ux0 = airux(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i-1,j,k)-ls(i,j,k)); // ux0 = theta * airux(i,j,k) + (1-theta) * waterux(i,j,k); ux0 = airux(i, j, k); } //uy1 if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) != TYPEAIR) uy1 = wateruy(i, j + 1, k); else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) != TYPEFLUID) uy1 = airuy(i, j + 1, k); else if (mark[idx] == TYPEFLUID && mark(i, j + 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * wateruy(i,j+1,k) + (1-theta) * airuy(i,j+1,k); uy1 = airuy(i, j + 1, k); } else if (mark[idx] == TYPEAIR && mark(i, j + 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j+1,k)-ls(i,j,k)); // uy1 = theta * airuy(i,j+1,k) + (1-theta) * wateruy(i,j+1,k); uy1 = airuy(i, j + 1, k); } //uy0 if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) != TYPEAIR) uy0 = wateruy(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) != TYPEFLUID) uy0 = airuy(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * wateruy(i,j,k) + (1-theta) * airuy(i,j,k); uy0 = airuy(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j-1,k)-ls(i,j,k)); // uy0 = theta * airuy(i,j,k) + (1-theta) * wateruy(i,j,k); uy0 = airuy(i, j, k); } //uz1 if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) != TYPEAIR) uz1 = wateruz(i, j, k + 1); else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) != TYPEFLUID) uz1 = airuz(i, j, k + 1); else if (mark[idx] == TYPEFLUID && mark(i, j, k + 1) == TYPEAIR) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * wateruz(i,j,k+1) + (1-theta) * airuz(i,j,k+1); uz1 = airuz(i, j, k + 1); } else if (mark[idx] == TYPEAIR && mark(i, j, k + 1) == TYPEFLUID) { // theta = (0.0f-ls(i,j,k))/(ls(i,j,k+1)-ls(i,j,k)); // uz1 = theta * airuz(i,j,k+1) + (1-theta) * wateruz(i,j,k+1); uz1 = airuz(i, j, k + 1); } //uz0 if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) != TYPEAIR) uz0 = wateruz(i, j, k); else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) != TYPEFLUID) uz0 = airuz(i, j, k); else if (mark[idx] == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * wateruz(i,j,k) + (1-theta) * airuz(i,j,k); uz0 = airuz(i, j, k); } else if (mark[idx] == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) { // theta=(0.0f-ls(i,j,k))/(ls(i,j,k-1)-ls(i,j,k)); // uz0 = theta * airuz(i,j,k) + (1-theta) * wateruz(i,j,k); uz0 = airuz(i, j, k); } div = (ux1 - ux0 + uy1 - uy0 + uz1 - uz0) / h; } outdiv[idx] = div; } } //压强与速度的计算 __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; if (idx<dparam.gvnum.x) { //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k)) / h; } if (idx<dparam.gvnum.y) { //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k)) / h; } if (idx<dparam.gvnum.z) { //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1)) / h; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_bubble(farray ans, charray mark, farray x, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (mark[idx] == TYPEFLUID || mark[idx] == TYPEAIR) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = -6.0f*center; float h2_rev = dparam.cellsize.x*dparam.cellsize.x; sum += (mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k); sum += (mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k); sum += (mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1); sum += (mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k); sum += (mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k); sum += (mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1); ans[idx] = sum / h2_rev; } else ans[idx] = 0.0f; } } //Ans = x + a*y __global__ void pcg_op_bubble(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { if (A[idx] == TYPEFLUID || A[idx] == TYPEAIR) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } //注意:这个函数只更新流体粒子(TYPEFLUID)的位置,但更新AIR粒子的速度(不是AIRSOLO)(用CIP模式). __global__ void advectparticle_RK2_bubble(float3 *ppos, float3 *pvel, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz, float dt, char *parflag, VELOCITYMODEL velmode) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPEAIRSOLO) //对于小的气体粒子AIRSOLO,什么也不更新,跳过 return; //read in float3 ipos = ppos[idx], ivel = pvel[idx]; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.5f*dparam.cellsize.x)); char partype = parflag[idx]; //pos-->grid xyz float3 gvel = make_float3(0.0f); if (partype == TYPEFLUID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (partype == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); else //TYPEAIRSOLO 有自己的仿真方法,不参与这些仿真 return; if (velmode == CIP /*|| partype==TYPEAIR*/) //todo: 气体粒子用cip模式,减少乱跑的可能 ivel = gvel; else ivel = (1 - FLIP_ALPHA)*gvel + FLIP_ALPHA*pvel[idx]; //mid point: x(n+1/2) = x(n) + 0.5*dt*u(xn) float3 midpoint = ipos + gvel * dt * 0.5; float3 gvelmidpoint; if (partype == TYPEFLUID) gvelmidpoint = getParticleVelFromGrid(midpoint, waterux, wateruy, wateruz); else gvelmidpoint = getParticleVelFromGrid(midpoint, airux, airuy, airuz); // x(n+1) = x(n) + dt*u(x+1/2) ipos += gvelmidpoint * dt; //check boundary if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //write back: TYPEAIR+TYPESOLID只更新速度,TYPESOLO之前已经return,TYPEFLUID更新位置和速度。 pvel[idx] = ivel; // if( partype==TYPEFLUID ) // ppos[idx] = ipos; } } __global__ void mapvelg2p_flip_bubble(float3 *ppos, float3 *vel, char* parflag, int pnum, farray waterux, farray wateruy, farray wateruz, farray airux, farray airuy, farray airuz) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float3 gvel = make_float3(0.0f); if (parflag[idx] == TYPEFLUID || parflag[idx] == TYPESOLID) gvel = getParticleVelFromGrid(ipos, waterux, wateruy, wateruz); else if (parflag[idx] == TYPEAIR) gvel = getParticleVelFromGrid(ipos, airux, airuy, airuz); vel[idx] += gvel; } } __global__ void compsurfacetension_k(farray sf, charray mark, farray phigrax, farray phigray, farray phigraz, float sigma) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) { int i, j, k; getijk(i, j, k, idx); float len, h = dparam.cellsize.x; float res, grax1, gray1, graz1, grax0, gray0, graz0; float3 phigracenter = make_float3(phigrax[idx], phigray[idx], phigraz[idx]); len = length(phigracenter); if (len == 0) res = 0; else { phigracenter /= len; if (verifycellidx(i + 1, j, k)) { len = length(make_float3(phigrax(i + 1, j, k), phigray(i + 1, j, k), phigraz(i + 1, j, k))); if (len == 0) grax1 = phigracenter.x; else grax1 = phigrax(i + 1, j, k) / len; } else grax1 = phigracenter.x; if (verifycellidx(i - 1, j, k)) { len = length(make_float3(phigrax(i - 1, j, k), phigray(i - 1, j, k), phigraz(i - 1, j, k))); if (len == 0) grax0 = phigracenter.x; else grax0 = phigrax(i - 1, j, k) / len; } else grax0 = phigracenter.x; if (verifycellidx(i, j + 1, k)) { len = length(make_float3(phigrax(i, j + 1, k), phigray(i, j + 1, k), phigraz(i, j + 1, k))); if (len == 0) gray1 = phigracenter.y; else gray1 = phigray(i, j + 1, k) / len; } else gray1 = phigracenter.y; if (verifycellidx(i, j - 1, k)) { len = length(make_float3(phigrax(i, j - 1, k), phigray(i, j - 1, k), phigraz(i, j - 1, k))); if (len == 0) gray0 = phigracenter.y; else gray0 = phigray(i, j - 1, k) / len; } else gray0 = phigracenter.y; if (verifycellidx(i, j, k + 1)) { len = length(make_float3(phigrax(i, j, k + 1), phigray(i, j, k + 1), phigraz(i, j, k + 1))); if (len == 0) graz1 = phigracenter.z; else graz1 = phigraz(i, j, k + 1) / len; } else graz1 = phigracenter.z; if (verifycellidx(i, j, k - 1)) { len = length(make_float3(phigrax(i, j, k - 1), phigray(i, j, k - 1), phigraz(i, j, k - 1))); if (len == 0) graz0 = phigracenter.z; else graz0 = phigraz(i, j, k - 1) / len; } else graz0 = phigracenter.z; res = (grax1 - grax0 + gray1 - gray0 + graz1 - graz0) / h * 0.5f; //res = (grax1-phigracenter.x + gray1-phigracenter.y + graz1-phigracenter.z) / h ; } sf[idx] = res*sigma; } else sf[idx] = 0; } } __global__ void enforcesurfacetension_p(float3* ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray sf, farray phigrax, farray phigray, farray phigraz, charray mark, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID/* || pflag[idx]==TYPEAIRSOLO*/ || pflag[idx] == TYPEFLUID) return; if( (scene != SCENE_MELTANDBOIL&&scene != SCENE_MELTANDBOIL_HIGHRES && pflag[idx] == TYPEAIRSOLO) || ((scene != SCENE_ALL && pflag[idx] == TYPEAIRSOLO))) return; //1. compute the cell, and get the ls, get sf. float3 ipos = ppos[idx]; float ilsmerge = getScaleFromFrid(ipos, lsmerge); float isf = getScaleFromFrid(ipos, sf); float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); float lendir = length(dir); if (lendir == 0) return; float3 f; dir /= lendir; ilsmerge /= lendir; //周围最少一个格子是空气的 int i, j, k; getijkfrompos(i, j, k, ipos); int cnt = (mark(i, j, k) == TYPEAIR) ? 1 : 0; for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (verifycellidx(i + di, j + dj, k + dk)) if (mark(i + di, j + dj, k + dk) == TYPEAIR) cnt++; if (cnt == 0) return; // if(abs(ls_p)<threshold), enforce a surface tension force, change the velocity. if (abs(ilsmerge)<dparam.cellsize.x) { f = -isf*dir; pvel[idx] += f*dparam.dt; } } } //标记levelset里比较大的正数,他们是邻近域内没有粒子的 __global__ void markLS_bigpositive(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] / dparam.cellsize.x; if (ls[idx] >1.99f) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //标记为需要sweep的单元,并非真正的标记 } else mark[idx] = TYPEFLUID; } } __global__ void setLSback_bigpositive(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = ls[idx] * dparam.cellsize.x; } } __global__ void preparels(farray ls, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] / dparam.cellsize.x; if (ls[idx] >0) { ls[idx] = 5.0f; mark[idx] = TYPEAIR; //标记为需要sweep的单元,并非真正的标记 } else mark[idx] = TYPEFLUID; } } __global__ void setLSback(farray ls) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<(ls.xn*ls.yn*ls.zn)) { ls[idx] = -ls[idx] * dparam.cellsize.x; } } __global__ void mergeLSAndMarkGrid(farray lsmerge, charray mark, farray lsfluid, farray lsair) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx< dparam.gnum) { float h = dparam.cellsize.x; if (lsair[idx] >4.99f * h) { lsmerge[idx] = lsfluid[idx]; if (lsfluid[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEFLUID; } else if (lsfluid[idx]>4.99f*h) { lsmerge[idx] = lsair[idx]; if (lsair[idx]>0) mark[idx] = TYPEVACUUM; else mark[idx] = TYPEAIR; } else if (lsair[idx]>0.8f*h && lsfluid[idx]>0.8f*h) { mark[idx] = TYPEVACUUM; lsmerge[idx] = min(lsfluid[idx], lsair[idx]); } else { lsmerge[idx] = (lsfluid[idx] - lsair[idx])*0.5f; if (lsmerge[idx]>0) mark[idx] = TYPEAIR; else mark[idx] = TYPEFLUID; } //todo: 对于气体将出到水面的时候,ls还是会有问题 int i, j, k; getijk(i, j, k, idx); if (i == 0 || i == NX - 1 || j == 0 || j == NY - 1 || k == 0 || k == NZ - 1) mark[idx] = TYPEBOUNDARY, lsmerge[idx] = -0.5f*h; //todo: debug: //lsmerge[idx] = -lsmerge[idx]; } } __global__ void sweepu_k_bubble(farray outux, farray outuy, farray outuz, farray ux, farray uy, farray uz, farray ls, charray mark, char sweepflag) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; int i, j, k; float wx, wy, wz, wsum; //三个方向上的权重 if (idx < dparam.gvnum.x) { //copy outux[idx] = ux[idx]; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>1 && i<NX - 1 /*&& j>0 && j<N-1 && k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i - 1, j, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (j + dj<0 || j + dj>NY - 1 || k + dk<0 || k + dk >NZ -1) continue; wx = -di*(ls(i, j, k) - ls(i - 1, j, k)); if (wx<0) continue; wy = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j + dj, k) - ls(i - 1, j + dj, k))*0.5f; if (wy<0) continue; wz = (ls(i, j, k) + ls(i - 1, j, k) - ls(i, j, k + dk) - ls(i - 1, j, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outux(i, j, k) = wx*ux(i + di, j, k) + wy* ux(i, j + dj, k) + wz* ux(i, j, k + dk); } } } if (idx < dparam.gvnum.y) { //copy outuy[idx] = uy[idx]; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if ( /*i>0 && i<N-1 &&*/ j>1 && j<NY - 1 /*&& k>0 && k<N-1*/) { if ((mark(i, j, k) != sweepflag && mark(i, j - 1, k) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di>NX - 1 || k + dk<0 || k + dk >NZ - 1) continue; wy = -dj*(ls(i, j, k) - ls(i, j - 1, k)); if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j - 1, k) - ls(i + di, j, k) - ls(i + di, j - 1, k))*0.5f; if (wx<0) continue; wz = (ls(i, j, k) + ls(i, j - 1, k) - ls(i, j, k + dk) - ls(i, j - 1, k + dk))*0.5f; if (wz<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuy(i, j, k) = wx*uy(i + di, j, k) + wy* uy(i, j + dj, k) + wz* uy(i, j, k + dk); } } } if (idx < dparam.gvnum.z) { //copy outuz[idx] = uz[idx]; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if ( /*i>0 && i<N-1 && j>0 && j<N-1 &&*/ k>1 && k<NZ - 1) { if ((mark(i, j, k) != sweepflag && mark(i, j, k - 1) != sweepflag)) for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) { if (i + di<0 || i + di >NX - 1 || j + dj<0 || j + dj>NY - 1) continue; wz = -dk*(ls(i, j, k) - ls(i, j, k - 1)); if (wz<0) continue; wy = (ls(i, j, k) + ls(i, j, k - 1) - ls(i, j + dj, k) - ls(i, j + dj, k - 1))*0.5f; if (wy<0) continue; wx = (ls(i, j, k) + ls(i, j, k - 1) - ls(i + di, j, k) - ls(i + di, j, k - 1))*0.5f; if (wx<0) continue; wsum = wx + wy + wz; if (wsum == 0) wx = wy = wz = 1.0f / 3; else wx /= wsum, wy /= wsum, wz /= wsum; outuz(i, j, k) = wx*uz(i + di, j, k) + wy* uz(i, j + dj, k) + wz* uz(i, j, k + dk); } } } } //修正粒子的位置,当气体粒子跑到流体中时,"拉"它回来,反之亦然 __global__ void correctbubblepos(farray ls, farray phigrax, farray phigray, farray phigraz, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: 这里有问题 if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) //气体粒子只在错位比较明显的情况下才纠正,主要是为了防止气泡体积的收缩。 ipos = ipos - d*dir; else if (iflag == TYPEFLUID) { ipos = ipos - d*dir; dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, ls) / dirlen; ipos = ipos + s*(rs - s*d)*dir; } // cnt++; } else if (iflag == TYPEFLUID && s*d<rs*0.5f && s*d >= 0) //todo: rs*0.5f有点小问题,但不加这个0.5的话流体的体积会变化明显 { ipos = ipos + s*(rs - s*d)*dir; } ppos[idx] = ipos; } } //修正粒子的位置,当气体粒子跑到流体中时,"拉"它回来,反之亦然. //这里修正液体粒子位置时用的是气体的ls __global__ void correctbubblepos_air(farray lsmerge, farray phigrax, farray phigray, farray phigraz, farray lsair, farray phigrax_air, farray phigray_air, farray phigraz_air, float3 *ppos, char* pflag, int pnum, float *pphi) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; //test. todo. debug if (iflag == TYPEAIRSOLO || iflag == TYPESOLID) return; float3 ipos = ppos[idx]; int s = (iflag == TYPEFLUID) ? -1 : 1; float d, dirlen, rs = 0.5f*dparam.cellsize.x; float3 dir = getVectorFromGrid(ipos, phigrax, phigray, phigraz); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsmerge) / dirlen; //test // if( s*d<0 ) // ipos=ipos +rs*dir; //debug. pphi[idx] = d; //todo: 这里有问题 if (s*d<0 && abs(d)<0.5f*dparam.cellsize.x) //wrong way { if (iflag == TYPEAIR&& abs(d)>0.3f*dparam.cellsize.x) //气体粒子只在错位比较明显的情况下才纠正,主要是为了防止气泡体积的收缩。 ipos = ipos - d*dir; // cnt++; } if (iflag == TYPEFLUID) //对液体粒子使用气体的level set来处理,慢慢把液体“挤出”气泡之外,使得lsmerge计算更为准确 { dir = getVectorFromGrid(ipos, phigrax_air, phigray_air, phigraz_air); dirlen = length(dir); if (dirlen == 0) return; else dir = normalize(dir); d = getScaleFromFrid(ipos, lsair) / dirlen; if (d<-1.3f*rs) ipos = ipos - (d - rs)*dir; } ppos[idx] = ipos; } } //根据levelset计算梯度场,相当于一个方向 __global__ void computePhigra(farray phigrax, farray phigray, farray phigraz, farray ls) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float h = dparam.cellsize.x; float lsx1, lsx0, lsy1, lsy0, lsz1, lsz0, lscenter = ls[idx]; lsx1 = (verifycellidx(i + 1, j, k)) ? ls(i + 1, j, k) : lscenter; lsx0 = (verifycellidx(i - 1, j, k)) ? ls(i - 1, j, k) : lscenter; lsy1 = (verifycellidx(i, j + 1, k)) ? ls(i, j + 1, k) : lscenter; lsy0 = (verifycellidx(i, j - 1, k)) ? ls(i, j - 1, k) : lscenter; lsz1 = (verifycellidx(i, j, k + 1)) ? ls(i, j, k + 1) : lscenter; lsz0 = (verifycellidx(i, j, k - 1)) ? ls(i, j, k - 1) : lscenter; //todo: 这里需要考虑一下 phigrax[idx] = ((lsx1 - lsx0)*0.5f) / h; phigray[idx] = ((lsy1 - lsy0)*0.5f) / h; phigraz[idx] = ((lsz1 - lsz0)*0.5f) / h; //phigrax[idx] = (lsx1-lscenter)/h; //phigray[idx] = (lsy1-lscenter)/h; //phigraz[idx] = (lsz1-lscenter)/h; } } __global__ void copyParticle2GL_phi(float3* ppos, char *pflag, float *pmass, float *pTemperature, int pnum, float *renderpos, float *rendercolor, farray ls, farray phigrax, farray phigray, farray phigraz, char typeflag, float Tmax, float Tmin) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //todo: if (pflag[idx] == typeflag/* || ppos[idx].y<NY*0.5f*dparam.cellsize.x */) { renderpos[idx * 3] = -2.0f; renderpos[idx * 3 + 1] = 0.0f; renderpos[idx * 3 + 2] = 0.0f; float3 color = make_float3(0.0f); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; return; } renderpos[idx * 3] = ppos[idx].x; renderpos[idx * 3 + 1] = ppos[idx].y; renderpos[idx * 3 + 2] = ppos[idx].z; float3 color; if (pflag[idx] == TYPEAIR) color = mapColorBlue2Red(0.0f); else if (pflag[idx] == TYPEFLUID) color = mapColorBlue2Red(2.0f); else if (pflag[idx] == TYPESOLID) color = mapColorBlue2Red(4.0f); else color = mapColorBlue2Red(6.0f); //color=mapColorBlue2Red( (pTemperature[idx]-Tmin)/(Tmax-Tmin)*6.0f ); rendercolor[idx * 3] = color.x; rendercolor[idx * 3 + 1] = color.y; rendercolor[idx * 3 + 2] = color.z; } } //压强与速度的计算,加入surface tension. [2005]Discontinuous Fluids __global__ void subGradPress_bubble(farray p, farray ux, farray uy, farray uz, farray sf, farray lsmerge, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int i, j, k; float h = dparam.cellsize.x; float J = 0.0f, theta; if (idx<dparam.gvnum.x) { J = 0.0f; //ux getijk(i, j, k, idx, NX + 1, NY, NZ); if (i>0 && i<NX) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i - 1, j, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i - 1, j, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i - 1, j, k)) / (lsmerge(i, j, k) - lsmerge(i - 1, j, k)); J = theta*sf(i - 1, j, k) + (1.0f - theta)*sf(i, j, k); } ux(i, j, k) -= (p(i, j, k) - p(i - 1, j, k) - J) / h; } } if (idx<dparam.gvnum.y) { J = 0.0f; //uy getijk(i, j, k, idx, NX, NY + 1, NZ); if (j>0 && j<NY) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j - 1, k) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j - 1, k) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j - 1, k)) / (lsmerge(i, j, k) - lsmerge(i, j - 1, k)); J = theta*sf(i, j - 1, k) + (1.0f - theta)*sf(i, j, k); } uy(i, j, k) -= (p(i, j, k) - p(i, j - 1, k) - J) / h; } } if (idx<dparam.gvnum.z) { J = 0.0f; //uz getijk(i, j, k, idx, NX, NY, NZ + 1); if (k>0 && k<NZ) //look out for this condition { if ((mark(i, j, k) == TYPEAIR && mark(i, j, k - 1) == TYPEFLUID) || (mark(i, j, k) == TYPEFLUID && mark(i, j, k - 1) == TYPEAIR)) { theta = (0.0f - lsmerge(i, j, k - 1)) / (lsmerge(i, j, k) - lsmerge(i, j, k - 1)); J = theta*sf(i, j, k - 1) + (1.0f - theta)*sf(i, j, k); } uz(i, j, k) -= (p(i, j, k) - p(i, j, k - 1) - J) / h; } } } __global__ void sweepVacuum(charray mark) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (mark[idx] != TYPEAIR) return; //mark for (int di = -1; di <= 1; di += 2) for (int dj = -1; dj <= 1; dj += 2) for (int dk = -1; dk <= 1; dk += 2) if (mark(i + di, j + dj, k + dk) == TYPEVACUUM) mark[idx] = TYPEVACUUM; } } __global__ void markDeleteAirParticle(float3* ppos, char* pflag, float *pmass, uint *preservemark, int pnum, charray mark, farray lsmerge, farray lsair, uint *cnt) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { //fluid and solid particles are preserved, air and airsolo particles are verified. if (pflag[idx] == TYPESOLID) { preservemark[idx] = 1; return; } int i, j, k; getijkfrompos(i, j, k, ppos[idx]); if (pflag[idx] == TYPEFLUID) { float lsm = getScaleFromFrid(ppos[idx], lsmerge); float lsa = getScaleFromFrid(ppos[idx], lsair); if ( /*lsm>1.2f*dparam.cellsize.x || */lsa<-1.0*dparam.cellsize.x) preservemark[idx] = 0, cnt[0]++; else preservemark[idx] = 1; return; } int cnt = 0; for (int di = -1; di <= 1; di += 1) for (int dj = -1; dj <= 1; dj += 1) for (int dk = -1; dk <= 1; dk += 1) if (verifycellidx(i + di, j + dj, k + dk) && mark(i + di, j + dj, k + dk) == TYPEVACUUM) cnt++; if (cnt == 0 && pmass[idx]>0.000001f) //notice: 这里附带的删除了质量过小的气体粒子,与气体粒子的被吸收有关 preservemark[idx] = 1; else preservemark[idx] = 0; } } // compact voxel array __global__ void deleteparticles(uint *preserveflag, uint *preserveflagscan, int pnum, float3 *outpos, float3 *pos, float3 *outvel, float3 *vel, float *outmass, float* mass, char *outflag, char *flag, float *outTemperature, float *temperature, float *outheat, float *heat, float *outsolubility, float *solubility, float *outgascontain, float *gascontain) { uint idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (preserveflag[idx] == 1) { //deleteflagscan 存的是删除某些粒子之后的"索引". uint outidx = preserveflagscan[idx]; outpos[outidx] = pos[idx]; outvel[outidx] = vel[idx]; outmass[outidx] = mass[idx]; outflag[outidx] = flag[idx]; outTemperature[outidx] = temperature[idx]; outheat[outidx] = heat[idx]; outsolubility[outidx] = solubility[idx]; outgascontain[outidx] = gascontain[idx]; } } } __device__ int cntairparticle(float3 *ppos, char *pflag, int igrid, uint *gridstart, uint *gridend, const float3 &ipos, float r) { uint start = gridstart[igrid]; int res = 0; float dis; if (start == CELL_UNDEF) return res; for (int p = start; p<gridend[igrid]; p++) { dis = length(ppos[p] - ipos); if (dis<r && (pflag[p] == TYPEAIR || pflag[p] == TYPEAIRSOLO)) { ++res; } } return res; } __device__ inline bool isInBoundaryCell(int x, int y, int z) { int level = 2; if (x <= level || x >= NX - 1 - level || y <= level || y >= NY - 1 - level) return true; else return false; } __global__ void verifySoloAirParticle(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray lsmerge, farray airux, farray airuy, farray airuz, uint *gridstart, uint *gridend, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { char iflag = pflag[idx]; if (iflag == TYPEFLUID || iflag == TYPESOLID) //TYPEAIR, TYPEAIRSOLO can go on. return; float3 ipos = ppos[idx]; float ls = getScaleFromFrid(ipos, lsmerge); float h = dparam.cellsize.x; int i, j, k; getijkfrompos(i, j, k, ipos); //a key adjustment, the tolerent will affect the result directly. int cnt = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) cnt += cntairparticle(ppos, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, h); float tol1 = -1.45f, tol2 = -0.5f; if (scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene==SCENE_ALL) tol1 = 0.05f, tol2 = -0.8f; else if (scene == SCENE_INTERACTION) tol1 = 0.2f, tol2 = -0.5f; if ((cnt >= 10 || ls>tol1*h) && pflag[idx] == TYPEAIRSOLO && !isInBoundaryCell(i, j, k)) //decide whether the air solo particle should be transfered to air particle. { if (cnt >= 3) pflag[idx] = TYPEAIR; } else if (iflag == TYPEAIR && (isInBoundaryCell(i, j, k) || ls<tol2*h || cnt <= 1)) { //todo: 插值速度 or not??? //pvel[idx]= pvel[idx]*0.8f + 0.2f*getParticleVelFromGrid(ipos,airux,airuy,airuz); pvel[idx] = getParticleVelFromGrid(ipos, airux, airuy, airuz); pflag[idx] = TYPEAIRSOLO; } } } __device__ float sumdensity(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { // notice: should include liquid particle, not just spray particle. if (pflag[p] != TYPEAIR && pflag[p] != TYPEAIRSOLO) continue; dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); //todo: m0 or pmass[p]? } return res; } __global__ void calcDensPress_Air(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIR && pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.airm0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho*0.5f); } } __device__ float3 sumforce(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h && (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR)) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSoloAirP(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH, float maxVelForBubble) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO && pflag[idx] != TYPEAIR) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); //todo: 直接更新速度和位置?? force *= dparam.airm0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; //restrict the vel below a threshold. // if( length(ivel) > maxVelForBubble ) // ivel = normalize(ivel) * maxVelForBubble; // // advect particle, using rho!!!! // ppos[idx]=ipos; pvel[idx] = ivel; } } __device__ float sumdensity_SLCouple(float3 ipos, float h2, int grididx, float3 *ppos, char *pflag, uint *gridstart, uint *gridend) { float res = 0; uint start = gridstart[grididx]; if (start == CELL_UNDEF) return res; float dist2; for (uint p = start; p<gridend[grididx]; p++) { dist2 = dot(ppos[p] - ipos, ppos[p] - ipos); if (dist2<h2) res += pow(h2 - dist2, 3.0f); } return res; } //solid-liquid coupling, in SPH framework __global__ void calcDensPressSPH_SLCouple(float3* ppos, float *pdens, float *ppress, char* pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { float3 ipos = ppos[idx]; float h = dparam.cellsize.x; //todo: set support radius, key part. float h2 = h*h; int i, j, k; getijkfrompos(i, j, k, ipos); float dens = 0; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) dens += sumdensity_SLCouple(ipos, h2, getidx(i + di, j + dj, k + dk), ppos, pflag, gridstart, gridend); dens *= dparam.m0 * dparam.poly6kern; if (dens == 0) dens = 1.0f; pdens[idx] = 1.0f / dens; ppress[idx] = 1.5f * (dens - dparam.waterrho); } } __device__ float3 sumforce_SLCouple(float3 *ppos, float3 *pvel, float *ppress, float *pdens, char *pflag, int grididx, uint *gridstart, uint *gridend, float3 ipos, float3 ivel, float ipress, float idens, float h, float kvis) { uint start = gridstart[grididx]; float3 res = make_float3(0.0f), dir; float dis, c, pterm, dterm;// kattrct=0.0f, kvis=0.0f; if (start == CELL_UNDEF) return res; float vterm = dparam.lapkern * kvis; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<h) { c = h - dis; pterm = -0.5f * c * dparam.spikykern * (ipress + ppress[p]) / dis; dterm = c * idens * pdens[p]; res += (pterm * dir + vterm * (pvel[p] - ivel)) * dterm; } } return res; } __global__ void enforceForceSPH_SLCouple(float3 *ppos, float3 *pvel, float *pdens, float *ppress, char *pflag, int pnum, uint *gridstart, uint *gridend, float viscositySPH) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEFLUID) //只有fluid计算,solid不在这里更新 return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float ipress = ppress[idx], idens = pdens[idx]; float h = dparam.cellsize.x; //float kvis=0.0f; int i, j, k; float3 force = make_float3(0.0f); getijkfrompos(i, j, k, ipos); int width = 1; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) force += sumforce_SLCouple(ppos, pvel, ppress, pdens, pflag, getidx(i + di, j + dj, k + dk), gridstart, gridend, ipos, ivel, ipress, idens, h, viscositySPH); // force=make_float3(0.0f); //todo: 直接更新速度和位置?? //add gravity here? or external force part; force *= dparam.m0; //force = make_float3(0); ivel += force*dparam.dt; ipos += ivel*dparam.dt; // advect particle, using rho!!!! ppos[idx] = ipos; pvel[idx] = ivel; } } __global__ void updateFixedHeat(farray fixedHeat, int frame) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= NX / 4 && i<NX*0.75 && j >= NY / 4 && j<NY*0.75 && k <= 3 /*k<=20 && k>=19*/) fixedHeat[idx] = 273.0f + 100.0f * min(frame / 40.f, 1.0f); else fixedHeat[idx] = UNDEF_TEMPERATURE; } } __global__ void addHeatAtBottom(farray Tp, int frame, float heatIncreaseBottom) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i >= 1 && i<NX - 1 && j >= 1 && j<NY - 1 && k <= 3 /*k<=20 && k>=19*/) Tp[idx] += heatIncreaseBottom;//1.5f; //Tp[idx] = 350.0f;//273.0f + 100.0f * min(frame/40.f, 1.0f ); Tp[idx] = min(378.0f, Tp[idx]); } } // __global__ void compb_heat(farray Tp_old, farray Tp, farray fixedheat, charray mark, float *heatAlphaArray) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx <dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float alpha = heatAlphaArray[mark[idx]]; //如果有固定的温度,那么tp与b都要根据这个fixedheat来计算 // if( fixedheat[idx]!=UNDEF_TEMPERATURE ) // Tp[idx]=fixedheat[idx], Tp_old[idx] = fixedheat[idx]*dparam.cellsize.x*dparam.cellsize.x/alpha/dparam.dt; // else Tp_old[idx] = Tp[idx] * dparam.cellsize.x*dparam.cellsize.x / alpha / dparam.dt; } } //z = Ax: A is a sparse matrix, representing the left hand item of Poisson equation. __global__ void computeAx_heat(farray ans, charray mark, farray x, int n, float *heatAlphaArray, farray fixedHeat, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { float h = dparam.cellsize.x; float dt = dparam.dt; float alpha = heatAlphaArray[mark[idx]]; if (mark[idx] != TYPEBOUNDARY/* && mark[idx]!=TYPEVACUUM*/) { int i, j, k; getijk(i, j, k, idx); float center = x[idx]; float sum = (h*h / alpha / dt + 6.0f)*center; //trick: 决定要不要让freeair参与计算 if (scene == SCENE_BOILING || scene == SCENE_BOILING_HIGHRES || scene == SCENE_MELTANDBOIL || scene == SCENE_MELTANDBOIL_HIGHRES || scene ==SCENE_ALL) { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY || mark(i + 1, j, k) == TYPEVACUUM) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY || mark(i, j + 1, k) == TYPEVACUUM) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY || mark(i, j, k + 1) == TYPEVACUUM) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY || mark(i - 1, j, k) == TYPEVACUUM) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY || mark(i, j - 1, k) == TYPEVACUUM) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY || mark(i, j, k - 1) == TYPEVACUUM) ? center : x(i, j, k - 1)); } else { sum -= ((mark(i + 1, j, k) == TYPEBOUNDARY) ? center : x(i + 1, j, k)); sum -= ((mark(i, j + 1, k) == TYPEBOUNDARY) ? center : x(i, j + 1, k)); sum -= ((mark(i, j, k + 1) == TYPEBOUNDARY) ? center : x(i, j, k + 1)); sum -= ((mark(i - 1, j, k) == TYPEBOUNDARY) ? center : x(i - 1, j, k)); sum -= ((mark(i, j - 1, k) == TYPEBOUNDARY) ? center : x(i, j - 1, k)); sum -= ((mark(i, j, k - 1) == TYPEBOUNDARY) ? center : x(i, j, k - 1)); } ans[idx] = sum; } } } //Ans = x + a*y __global__ void pcg_op_heat(charray A, farray ans, farray x, farray y, float a, int n) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<n) { // if( A[idx]==TYPEFLUID || A[idx]==TYPEAIR ) if (A[idx] != TYPEBOUNDARY) ans[idx] = x[idx] + a*y[idx]; else ans[idx] = 0.0f; } } __global__ void setBoundaryHeat(farray tp) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); if (i == NX - 1) tp[idx] = tp(i - 1, j, k); else if (i == 0) tp[idx] = tp(i + 1, j, k); else if (j == NY - 1) tp[idx] = tp(i, j - 1, k); else if (j == 0) tp[idx] = tp(i, j + 1, k); else if (k == NZ - 1) tp[idx] = tp(i, j, k - 1); else if (k == 0) tp[idx] = tp(i, j, k + 1); } } __global__ void compTpChange(farray tp, farray tpsave, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { if (mark[idx] != TYPEBOUNDARY) tpsave[idx] = tp[idx] - tpsave[idx]; else tpsave[idx] = 0; } } __device__ void sumHeat(float &heatsum, float &weight, float3 gpos, float3 *pos, float *pTemperature, uint *gridstart, uint *gridend, int gidx) { if (gridstart[gidx] == CELL_UNDEF) return; uint start = gridstart[gidx]; uint end = gridend[gidx]; float dis2, w, RE = 1.4; float scale = 1 / dparam.cellsize.x; for (uint p = start; p<end; ++p) { dis2 = dot(pos[p] * scale - gpos, pos[p] * scale - gpos); //scale is necessary. w = sharp_kernel(dis2, RE); weight += w; heatsum += w*pTemperature[p]; } } __global__ void mapHeatp2g_hash(float3 *ppos, float *pTemperature, int pnum, farray heat, uint* gridstart, uint *gridend, float defaulttemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; float weight = 0.0f, heatsum = 0; float3 gpos; getijk(i, j, k, idx); gpos.x = i + 0.5, gpos.y = j + 0.5, gpos.z = k + 0.5; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) if (verifycellidx(i + di, j + dj, k + dk)) sumHeat(heatsum, weight, gpos, ppos, pTemperature, gridstart, gridend, getidx(i + di, j + dj, k + dk)); heatsum = (weight>0) ? (heatsum / weight) : defaulttemperature; heat(i, j, k) = heatsum; } } __global__ void mapHeatg2p(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; pTemperature[idx] = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. } } __global__ void mapHeatg2p_MeltAndBoil(float3 *ppos, char *parflag, float *pTemperature, int pnum, farray Tchange, farray T, float defaultSolidT, float alphaTempTrans) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { //pos-->grid xyz float3 ipos = ppos[idx]; float newtemp = alphaTempTrans*(pTemperature[idx] + getScaleFromFrid(ipos, Tchange)) + (1 - alphaTempTrans)*getScaleFromFrid(ipos, T); //use a scheme like FLIP, update the particle temperature by heat change. if (parflag[idx] == TYPESOLID) pTemperature[idx] = 0.95f*(pTemperature[idx]) + 0.05f*newtemp; else pTemperature[idx] = newtemp; } } __global__ void initHeatParticle(float *pTemperature, float *pHeat, float defaultSolidT, float defaultLiquidT, float LiquidHeatTh, char *pflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) { pTemperature[idx] = defaultSolidT; pHeat[idx] = 0; } else { pTemperature[idx] = defaultLiquidT; pHeat[idx] = LiquidHeatTh; } } } //Temperature0=273.15K, Solubility0=1.0f (每1个流体粒子里含的气体够生成一个完事的气体粒子) __global__ void initsolubility_k(float *psolubility, float* pgascontain, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate, float initgasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID || pflag[idx] == TYPESOLID) { psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. pgascontain[idx] = initgasrate*psolubility[idx]; } else { psolubility[idx] = 0; pgascontain[idx] = 0; } } } //Temperature0=273.15K, Solubility0=1.0f (每1个流体粒子里含的气体够生成一个完事的气体粒子) __global__ void updatesolubility(float *psolubility, float *ptemperature, char *pflag, int pnum, float Solubility0, float Temperature0, float dissolvegasrate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPEFLUID) psolubility[idx] = dissolvegasrate*dparam.airm0 * exp(1018.9f*(1 / ptemperature[idx] - 1 / Temperature0)); //todo: adjust the parameter. } } //addparnums初始化应该是0 __global__ void GenerateGasParticle_k(float *psolubility, float *paircontain, float3 *ppos, float3 *pvel, float *pmass, char *pflag, float *pTemperature, float *pLHeat, int pnum, uint *gridstart, uint *gridend, int *addparnums, float *randfloat, int randcnts, int frame, farray gTemperature, float LiquidHeatTh, int *seedcell, int seednum, float vaporGenRate) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { float gcontain = 0, gsolubility = 0, gairexist = 0; int liquidParCnt = 0, gasParCnt = 0; float airparticlemass0 = dparam.airm0; //todo float vaporsum = 0;//, vaporrate = 0.01f; float3 gaspos = make_float3(0), gasvel = make_float3(0); int i, j, k; getijk(i, j, k, idx); if (k <= 1 || isInBoundaryCell(i, j, k)) return; //最下面的一行不生成气泡粒子 float3 gpos = make_float3(i, j, k)*dparam.cellsize.x; uint start = gridstart[idx]; if (start == CELL_UNDEF) return; //1. 统计气体含量、流体粒子含有的气体量、可溶解量 for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; vaporsum += max(0.0f, pLHeat[p] - LiquidHeatTh) * vaporGenRate * airparticlemass0; liquidParCnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; gaspos += ppos[p]; gasvel += pvel[p]; gasParCnt++; } } bool hasseed = false; for (int i = 0; i<seednum; i++) if (seedcell[i] == idx) hasseed = true; //如有必要,增加一个气体粒子 int addcnt = 0; int randbase = (idx*frame) % (randcnts - 200); //randpos and randfloat are in [0,1] float3 randpos = make_float3(randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts], randfloat[(randbase + addcnt++) % randcnts]); float randnum = randfloat[(randbase + addcnt++) % randcnts]; float r = dparam.cellsize.x * 0.25f; if (gcontain - gsolubility + vaporsum > airparticlemass0 && (hasseed || gasParCnt>0)) { int addindex = atomicAdd(&addparnums[0], 1) + pnum; pmass[addindex] = airparticlemass0;//dparam.m0; //todo: if (gasParCnt>0) { ppos[addindex] = gaspos / gasParCnt + (max(0.5f, randnum)*r) * (randpos - make_float3(0.5f)) * 2; //与凝结核有关 pvel[addindex] = make_float3(0.0f);//gasvel/gasParCnt; //与已有的气体粒子有关 } else { ppos[addindex] = gpos + dparam.cellsize.x*randpos; pvel[addindex] = make_float3(0.0f); } pflag[addindex] = TYPEAIRSOLO; pTemperature[addindex] = gTemperature[idx]; //网格温度 pLHeat[addindex] = 0; //气体粒子的heat无所谓 paircontain[addindex] = 0.0f; psolubility[addindex] = 0.0f; //重置液体粒子的气体含量 for (int p = start; p<gridend[idx]; p++) { if (pflag[p] == TYPEFLUID) { paircontain[p] = min(paircontain[p], psolubility[p]); pLHeat[p] = min(pLHeat[p], LiquidHeatTh); //todo: decrease the liquids mass. } } } } } //addparnums初始化应该是0 __global__ void updatebubblemass(float *psolubility, float *paircontain, float3 *ppos, float *pmass, char *pflag, int pnum, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum / 8) //每个线程负责8个格子 { float gcontain = 0, gsolubility = 0, gairexist = 0; int fpcnt = 0, apcnt = 0; float airparticlemass0 = dparam.airm0; //todo int i, j, k; getijk(i, j, k, idx, NX / 2, NY / 2, NZ / 2); i *= 2, j *= 2, k *= 2; // float3 gpos; int gidx; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { gidx = getidx(i + di, j + dj, k + dk); // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; if (gridstart[gidx] == CELL_UNDEF) continue; //1. 统计气体含量、流体粒子含有的气体量、可溶解量 for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEFLUID) { gcontain += paircontain[p]; gsolubility += psolubility[p]; fpcnt++; } else if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { gairexist += pmass[p]; apcnt++; } } } //2. 如果需要释放流体粒子中溶解的气体形成或增大气泡 float maxradius = 1.5f*dparam.cellsize.x; float maxmass = getMassfromR(maxradius); float massaddlimit = 3.0f*dparam.airm0; //每个气体粒子最多增加3个单位质量 float addmass; if (gcontain>gsolubility) { //todo: 参数 if (abs(gcontain - gsolubility) < 2.5*airparticlemass0/*1.3f*gsolubility*/) //如果相差不大,不进行调整 return; //2.1: 增大已有气泡的体积到最大 float needadd = gcontain - gsolubility; if (apcnt>0) { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; // gpos=make_float3(i+di,j+dj,k+dk)*dparam.cellsize.x; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { addmass = min(massaddlimit, maxmass - pmass[p]); addmass = max(0.0f, min(needadd, addmass)); needadd -= addmass; //有一定的误差 pmass[p] += addmass; if (needadd <= 0) break; } } } } //2.3: 调整每个流体粒子里的气体含量 float actualadd = gcontain - gsolubility - needadd, eachchange; for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (actualadd <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx]; p++) { if (actualadd <= 0) break; if (pflag[p] == TYPEFLUID) { if (paircontain[p] - psolubility[p]>0) { eachchange = min(actualadd, paircontain[p] - psolubility[p]); paircontain[p] -= eachchange; actualadd -= eachchange; } } } } } //end if( gcontain>gsolubility ) else if (gairexist>0) //3: 如果需要吸收气体,且有气体粒子在本网格内 { //todo: 参数 if (abs(gcontain - gsolubility) < 3.6f*airparticlemass0/*1.3f*gsolubility*/) //如果相差不大,不进行调整 return; //3.1: 减少气体粒子的质量 float needminus = gsolubility - gcontain; //可以吸收的气体量 float masschangesum = 0; //实际吸收的气体量 if (gairexist<needminus) needminus = gairexist; if (needminus>0)//minus some of them to 0 mass, use another kernel to delete it. { for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (needminus <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && needminus>0; p++) { if (pflag[p] == TYPEAIRSOLO || pflag[p] == TYPEAIR) { float masschange = min(pmass[p], needminus); //本气体粒子会被吸收多少质量 pmass[p] -= masschange; needminus -= masschange; masschangesum += masschange; } } } } //3.2: 调整流体粒子中溶解的气体含量. change the fluid particls. for (int di = 0; di <= 1; di++) for (int dj = 0; dj <= 1; dj++) for (int dk = 0; dk <= 1; dk++) { if (masschangesum <= 0) break; gidx = getidx(i + di, j + dj, k + dk); if (gridstart[gidx] == CELL_UNDEF) continue; for (int p = gridstart[gidx]; p<gridend[gidx] && masschangesum>0; p++) { if (pflag[p] == TYPEFLUID) { float containchange = min(max(0.0f, psolubility[p] - paircontain[p]), masschangesum); //本流体粒子会被填充多少气体量 paircontain[p] += containchange; masschangesum -= containchange; } } } } } } //使用预计算好的位置根据温度和溶解度生成empty气泡,当气泡大于一定体积时,生成AIR粒子。 //对其它模块的影响:markgrid, correctpos, heattransfer. __global__ void updateEmptyBubbles(float3 *pepos, float3 *pedir, float *peradius, int penum, float3 *parpos, float3 *parvel, float *parmass, float* parTemperature, char *parflag, float *parsolubility, float *paraircontain, int parnum, int *addparnums, uint *gridstart, uint *gridend, farray gTemperature) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<penum) { int airRscale = 2; float3 ipos = pepos[idx], idir = pedir[idx]; float iradius = peradius[idx]; float rthresholdleave = 1.0f*dparam.cellsize.x; //todo: //到此半径则转化成实际气体并离开固壁 控制气泡半径 float rthreshold = max(0.0f, iradius + 0.1f*dparam.cellsize.x); //此次气泡最大半径,防止突然变大带来的不稳定 rthreshold = min(rthreshold, rthresholdleave); int i, j, k; getijkfrompos(i, j, k, ipos); //收集需要管的范围内的气体含量,增大体积 float massorigin = dparam.waterrho * 4 / 3 * M_PI*(pow(iradius, 3))*0.5; float masscantake = dparam.waterrho * 4 / 3 * M_PI*(pow(rthreshold, 3) - pow(iradius, 3))*0.5, massadd = 0; //todo int range = 2; for (int di = -range; di <= range &&masscantake>0; di++) for (int dj = -range; dj <= range&&masscantake>0; dj++) for (int dk = -range; dk <= range&&masscantake>0; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i, j, k); for (uint p = gridstart[grididx]; p<gridend[grididx] && masscantake>0; p++) //遍历所有流体粒子 { if (parflag[p] != TYPEFLUID) continue; float gasreslease = max(0.0f, paraircontain[p] - parsolubility[p]); if (gasreslease <= 0) continue; gasreslease = min(gasreslease, masscantake); massadd += gasreslease; masscantake -= gasreslease; //paraircontain[p] -= gasreslease; } } float newiradius = pow((massadd + massorigin) / dparam.waterrho / 4 * 3 / M_PI, 1.0 / 3); ipos += (newiradius - iradius)*idir; float ss = dparam.samplespace; if (newiradius + 1e-5 >= rthresholdleave) //生成实际的气体粒子 { int num = ceil(newiradius / ss); for (float x = -num*ss; x <= newiradius; x += ss)for (float y = -num*ss; y <= newiradius; y += ss)for (float z = -num*ss; z <= newiradius; z += ss) { if (x*x + y*y + z*z>newiradius*newiradius) continue; int addindex = atomicAdd(&addparnums[0], 1) + parnum; parmass[addindex] = dparam.airm0; //todo: parpos[addindex] = ipos + make_float3(x, y, z); parflag[addindex] = TYPEAIR; parvel[addindex] = make_float3(0.0f); parTemperature[addindex] = gTemperature[getidx(i, j, 1)]; //todo: 找到当前气泡最下面网格的温度 paraircontain[addindex] = 0.0f; parsolubility[addindex] = 0.0f; } ipos.z = 1.1f*dparam.cellsize.x; //重置位置 newiradius = 0; } peradius[idx] = newiradius; pepos[idx] = ipos; } } __device__ void mat4_mul(matrix4* dst, const matrix4* m0, const matrix4* m1) { int row; int col; int i; for (row = 0; row < 4; row++) for (col = 0; col < 4; col++) for (i = 0; i < 4; i++) dst->m[row * 4 + col] += m0->m[row * 4 + i] * m1->m[i * 4 + col]; } __device__ void mat4_mulvec3_as_mat3(float3* dst, const matrix4* m, const float3* v) { float new_x; float new_y; float new_z; new_x = v->x*m->m[0 + 4 * 0] + v->y*m->m[0 + 4 * 1] + v->z*m->m[0 + 4 * 2]; new_y = v->x*m->m[1 + 4 * 0] + v->y*m->m[1 + 4 * 1] + v->z*m->m[1 + 4 * 2]; new_z = v->x*m->m[2 + 4 * 0] + v->y*m->m[2 + 4 * 1] + v->z*m->m[2 + 4 * 2]; dst->x = new_x; dst->y = new_y; dst->z = new_z; } __global__ void MeltingSolidByHeat(float *pTemperature, float *pLHeat, char *pflag, int pnum, float LiquidHeatTh, float meltTemperature, int *numchange) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]>LiquidHeatTh) { pflag[idx] = TYPEFLUID; pLHeat[idx] = LiquidHeatTh; atomicAdd(&numchange[0], 1); } } } __global__ void FreezingSolidByHeat(float3* ppos, float *pLHeat, char *pflag, int pnum, int *numchange, uint *gridstart, uint *gridend) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPEFLUID) { //if( pTemperature[idx]>meltTemperature ) if (pLHeat[idx]<0) { //determine a new position which is appropriate for solid. //找距离最近的固体粒子 int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float mindis = 1000; int minidx = -1; int width = 1; int cntsolid = 0; float h = dparam.cellsize.x; for (int di = -width; di <= width; di++) for (int dj = -width; dj <= width; dj++) for (int dk = -width; dk <= width; dk++) if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start == CELL_UNDEF) continue; for (int p = start; p<gridend[gidx]; p++) { if (pflag[p] == TYPESOLID) { float dis = length(ppos[p] - ipos); if (dis< h) cntsolid++; if (length(ppos[p] - ipos)<mindis) mindis = length(ppos[p] - ipos), minidx = p; } } } if (minidx != -1 && mindis<dparam.cellsize.x && cntsolid>2)//周围有固体粒子 { pflag[idx] = TYPESOLID; pLHeat[idx] = 0; atomicAdd(&numchange[0], 1); if (mindis > dparam.samplespace) { ipos = normalize(ipos - ppos[minidx])*dparam.samplespace + ppos[minidx]; ppos[idx] = ipos; } } } } } //计算air solo particle与流体场之间的drag force,直接在本函数里修改了速度。以dragparam为影响大小的参数。 __global__ void calDragForce(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray ux, farray uy, farray uz, float dragparamsolo, float dragparamgrid, SCENE scene) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] != TYPEAIRSOLO) return; float3 ipos = ppos[idx], ivel = pvel[idx]; //compute the grid index int i, j, k; getijkfrompos(i, j, k, ipos); //compute drag "force" (actually not "force", is velocity change, tuning alpha is very important) float3 gridvel = getParticleVelFromGrid(ipos, ux, uy, uz); float3 gridpos = make_float3(i, j, k); float3 dragf_b = dragparamsolo * length(gridvel - ivel) * (gridvel - ivel); //指向grid's velocity,施加给bubble的,质量被系统归一成1 /* float alpha = 0.5f;*/ float3 velChange_g = -dragf_b*dragparamgrid*dparam.dt; //施加给网格的,要增加一个比例系数,因为同样受力的情况下,网格的质量大,速度改变要小一些 //update for grid float ux0, ux1, uy0, uy1, uz0, uz1; float3 weight = ipos / dparam.cellsize.x - gridpos; //权重 in [0-1] ux0 = velChange_g.x*(1 - weight.x), ux1 = velChange_g.x*weight.x; uy0 = velChange_g.y*(1 - weight.y), uy1 = velChange_g.y*weight.y; uz0 = velChange_g.z*(1 - weight.z), uz1 = velChange_g.z*weight.z; atomicAdd(&(ux.data[getidx(i, j, k, NX + 1, NY, NZ)]), ux0); atomicAdd(&(ux.data[getidx(i + 1, j, k, NX + 1, NY, NZ)]), ux1); atomicAdd(&(uy.data[getidx(i, j, k, NX, NY + 1, NZ)]), uy0); atomicAdd(&(uy.data[getidx(i, j + 1, k, NX, NY + 1, NZ)]), uy1); atomicAdd(&(uz.data[getidx(i, j, k, NX, NY, NZ + 1)]), uz0); atomicAdd(&(uz.data[getidx(i, j, k + 1, NX, NY, NZ + 1)]), uz1); //update for particle,注意是需要反向的。todo:只在Interaction场景里用? if (scene == SCENE_INTERACTION || scene == SCENE_INTERACTION_HIGHRES) pvel[idx] += dragf_b*dparam.dt; } } __global__ void accumulate_GPU_k(int num, float3* out, float3* a)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s>0; s >>= 1) { if (tid<s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float* b)//dsum, a.data, flag, n { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k(int num, float3* out, float3* a, float3* b) { extern __shared__ float3 ddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; ddata[tid] = (i >= num) ? make_float3(0, 0, 0) : a[i]*b[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) ddata[tid] += ddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = ddata[0]; } __global__ void accumulate_GPU_k_float(int num, float* out, float* a)//dsum, a.data, flag, n { extern __shared__ float fddata[]; uint tid = threadIdx.x; uint i = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; fddata[tid] = (i >= num) ? 0 : a[i]; //赋值给solidparticles __syncthreads(); for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) fddata[tid] += fddata[tid + s]; __syncthreads(); } if (tid == 0) out[blockIdx.x] = fddata[0]; } __global__ void compute_cI_k(int pnum, char* parflag, float3 *parPos, float3 *parVel, float3* c, float3* weight, float3 rg) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (parflag[idx] == TYPESOLID) { float dis = length(parPos[idx] - rg); if (dis>1e-6) { c[idx] = cross(parPos[idx] - rg, parVel[idx]); weight[idx] = make_float3(dis, 0, 0); } else c[idx] = weight[idx] = make_float3(0); } else { c[idx] = weight[idx] = make_float3(0); //c[idx] = make_float3(0,0,0); } } } __global__ void setVelZeroSolid_k(float3 *parvel, char *parflag, int pnum) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) parvel[idx] = make_float3(0); } __global__ void computeVelSolid_k(float3* parPos, char* parflag, float3* parVel, int pnum, float3 rg, float3 R, float3 T) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 v_half = cross(R, parPos[idx] - rg); //粒子的角速度` v_half += T; //固体粒子的总速度 v_half = 0.5*(parVel[idx] + v_half); parVel[idx] = v_half; // parVel[idx] = make_float3(0); } } __device__ inline float3 transposeParticle(float3 p, matrix3x3 rm) { float3 res; res.x = p.x*rm.x00 + p.y*rm.x10 + p.z*rm.x20; res.y = p.x*rm.x01 + p.y*rm.x11 + p.z*rm.x21; res.z = p.x*rm.x02 + p.y*rm.x12 + p.z*rm.x22; return res; } //由rotation matrix "rm"来计算各粒子的位置 __global__ void computePosSolid_k(float3* parvel, float3* parPos, char* parflag, int pnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && parflag[idx] == TYPESOLID) { float3 transp = parPos[idx] - rg0; transp = transposeParticle(transp, rm); parPos[idx] = transp + rg; //if (length(parPos[idx])<10.5) //parPos[idx] -= parvel[idx] * 0.00001; } } __global__ void computeSolidVertex_k(float3* vertexpos, int vnum, float3 rg, float3 rg0, matrix3x3 rm) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<vnum) { float3 transp = vertexpos[idx] - rg0; transp = transposeParticle(transp, rm); vertexpos[idx] = transp + rg; } } __global__ void set_nonsolid_2_zero(char* pflag, int pnum, float3* Pos, float3* Vel) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] != TYPESOLID) { Pos[idx] = make_float3(0, 0, 0); Vel[idx] = make_float3(0, 0, 0); //Mass[idx] = 0.; } } //在粒子层面处理fluid, air, airsolo粒子与solid的碰撞关系,保证不会穿过边界到solid的内部。 __global__ void CollisionWithSolid_k(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, farray sux, farray suy, farray suz, SCENE scene, float bounceVelParam, float bouncePosParam) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 0.5f) //靠近固体,距离只有半个格子 { float3 svel = getParticleVelFromGrid(ipos, sux, suy, suz); float3 rvel = ivel - svel; float d = dparam.cellsize.x * 0.5f; float3 phigrad; phigrad.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); phigrad.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); phigrad.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); if (length(phigrad) > 0) { phigrad = normalize(phigrad); //指向外侧 if (dot(rvel, phigrad)<0 || scene == SCENE_FREEZING) //相对速度指向内侧 { ivel -= bounceVelParam * dot(rvel, phigrad)*phigrad; //法向速度置为与固体一样 if (scene == SCENE_FREEZING) ivel -= 0.1f* (rvel - dot(rvel, phigrad)*phigrad); //切向速度 } ipos += bouncePosParam * phigrad * (0.5f - iphi) * dparam.cellsize.x; } } //并根据新的速度更新位置 ipos += ivel*dparam.dt; //边界 float rate = 0.5f, ratevel = -0.5f; if (pflag[idx] == TYPEAIRSOLO) rate = 0.8f, ratevel = -0.5f; float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(rate*dparam.cellsize.x)); // if( ipos.x>tmax.x ) // ivel.x *=ratevel, ipos.x=tmax.x; // if( ipos.x<tmin.x ) // ivel.x *= ratevel, ipos.x=tmin.x; // if( ipos.y>tmax.y ) // ivel.y *=ratevel, ipos.y=tmax.y; // if( ipos.y<tmin.y ) // ivel.y *= ratevel, ipos.y=tmin.y; // if( ipos.z>tmax.z ) // ivel.z *=ratevel, ipos.z=tmax.z; // if( ipos.z<tmin.z ) // ivel.z *= ratevel, ipos.z=tmin.z; if (ipos.x <= tmin.x) ipos.x = tmin.x, ivel.x = 0.0f; if (ipos.y <= tmin.y) ipos.y = tmin.y, ivel.y = 0.0f; if (ipos.z <= tmin.z) ipos.z = tmin.z, ivel.z = 0.0f; if (ipos.x >= tmax.x) ipos.x = tmax.x, ivel.x = 0.0f; if (ipos.y >= tmax.y) ipos.y = tmax.y, ivel.y = 0.0f; if (ipos.z >= tmax.z) ipos.z = tmax.z, ivel.z = 0.0f; //存储新的速度和位置 pvel[idx] = ivel; ppos[idx] = ipos; } } //专门为melting and freezing场景写的,粒度要更细一些。在粒子层面处理fluid, air, airsolo粒子与solid的碰撞关系,保证不会穿过边界到solid的内部。 __global__ void CollisionWithSolid_Freezing(float3 *ppos, float3 *pvel, char *pflag, int pnum, farray phisolid, uint* gridstart, uint* gridend) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (pflag[idx] == TYPESOLID) return; float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; int i, j, k; getijkfrompos(i, j, k, ipos); float iphi = getScaleFromFrid(ipos, phisolid); if (iphi <= 1.0f) //有发生碰撞的可能,再进行检测 { float r = 0.25f*dparam.cellsize.x; float3 collisionpos = make_float3(0), dir; float depth = 0, dis, adhesionDis = 0; int cntcollide = 0, cntadhesion = 0; float h = 4 * r; for (int di = -1; di <= 1; di++)for (int dj = -1; dj <= 1; dj++)for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int grididx = getidx(i + di, j + dj, k + dk); int start = gridstart[grididx]; if (start == CELL_UNDEF) continue; for (uint p = start; p<gridend[grididx]; p++) { dir = ipos - ppos[p]; dis = length(dir); if (dis>0 && dis<2 * r) //碰撞 { collisionpos += ppos[p]; depth = max(depth, 2 * r - dis); cntcollide++; } else if (dis< h) { adhesionDis += dis; cntadhesion++; } } } } float3 n; float d = dparam.cellsize.x * 0.5f; n.x = getScaleFromFrid(ipos + make_float3(d, 0, 0), phisolid) - getScaleFromFrid(ipos - make_float3(d, 0, 0), phisolid); n.y = getScaleFromFrid(ipos + make_float3(0, d, 0), phisolid) - getScaleFromFrid(ipos - make_float3(0, d, 0), phisolid); n.z = getScaleFromFrid(ipos + make_float3(0, 0, d), phisolid) - getScaleFromFrid(ipos - make_float3(0, 0, d), phisolid); float3 originalvel = ivel; if (length(n) > 0) { n = normalize(n); //指向外侧 if (cntcollide>0) //发生碰撞 { collisionpos /= cntcollide; if (length(n) > 0) { //correct vel and pos; ivel -= dot(originalvel, n)*n; //法向速度置为与固体一样 //ivel *= 1.1f; ipos += depth * n; } } else if (cntadhesion>0) //有一定的吸引力 { float alpha = 0.1f; ivel -= n * alpha * length(ivel); } } } //并根据新的速度更新位置 //边界 float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); if (ipos.x>tmax.x) ivel.x *= -0.5f, ipos.x = tmax.x; if (ipos.x<tmin.x) ivel.x *= -0.5f, ipos.x = tmin.x; if (ipos.y>tmax.y) ivel.y *= -0.5f, ipos.y = tmax.y; if (ipos.y<tmin.y) ivel.y *= -0.5f, ipos.y = tmin.y; if (ipos.z>tmax.z) ivel.z *= -0.5f, ipos.z = tmax.z; if (ipos.z<tmin.z) ivel.z *= -0.5f, ipos.z = tmin.z; ipos += ivel*dparam.dt; //存储新的速度和位置 pvel[idx] = ivel; ppos[idx] = ipos; } } __global__ void buoyancyForSolid(float3 *ppos, float3 *pvel, char *pflag, int pnum, uint *gridstart, uint *gridend, float SolidBuoyanceParam) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { int cnt = 0; int i, j, k; float3 ipos = ppos[idx]; getijkfrompos(i, j, k, ipos); float r = dparam.cellsize.x; for (int di = -1; di <= 1; di++) for (int dj = -1; dj <= 1; dj++) for (int dk = -1; dk <= 1; dk++) { if (verifycellidx(i + di, j + dj, k + dk)) { int gidx = getidx(i + di, j + dj, k + dk); uint start = gridstart[gidx]; if (start != CELL_UNDEF) { for (uint p = start; p<gridend[gidx]; p++) if (pflag[p] == TYPEFLUID && length(ppos[p] - ipos)<r) cnt++; } } } if (cnt>2) pvel[idx].z += (dparam.waterrho - dparam.solidrho) * SolidBuoyanceParam * dparam.dt; } } __global__ void solidCollisionWithBound(float3 *ppos, float3 *pvel, char *pflag, int pnum, float SolidbounceParam, int nSolPoint) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pnum && pflag[idx] == TYPESOLID) { //check position float3 tmin = dparam.gmin + (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 tmax = dparam.gmax - (dparam.cellsize + make_float3(0.3f*dparam.samplespace)); float3 ipos = ppos[idx]; float3 ivel = pvel[idx]; //float eps=1e-6; // 反向的速度与“穿透深度,系数,粒子个数”相关。 //(与粒子个数相关主要是因为这个速度是起到“惩罚力”的作用,而粒子个数起到“质量”的作用,在粒子的速度向刚体转换的时候,相当于一个“平均(除质量)”的过程) if (ipos.x<tmin.x) ivel.x += (tmin.x - ipos.x) * SolidbounceParam * nSolPoint; if (ipos.x>tmax.x) ivel.x -= (ipos.x - tmax.x) * SolidbounceParam * nSolPoint; if (ipos.y<tmin.y) ivel.y += (tmin.y - ipos.y) * SolidbounceParam * nSolPoint; if (ipos.y>tmax.y) ivel.y -= (ipos.y - tmax.y) * SolidbounceParam * nSolPoint; if (ipos.z<tmin.z) ivel.z += (tmin.z - ipos.z) * SolidbounceParam * nSolPoint; if (ipos.z>tmax.z) ivel.z -= (ipos.z - tmax.z) * SolidbounceParam * nSolPoint; pvel[idx] = ivel; //ppos[idx]=ipos; //不能修改位置,刚体会变形 } } //there is a problem here, remember to solve it. // __global__ void genAirFromSolid_k( float3 *ppos, float3 *pvel, char *pflag, float *psolubility, float *paircontain, float *pmass, float *pTemperature,int pnum, // charray lsmark, farray phisolid, farray Tgrid, int *addnum, float *randfloat, int nrandnum, int frame ) // { // int idx=__mul24( blockIdx.x, blockDim.x )+threadIdx.x; // if( idx<dparam.gnum &&lsmark[idx]==TYPEFLUID && phisolid[idx]>0 ) //此格子是流体格子 // { // int i,j,k; // getijk( i,j,k,idx); // bool flag=false; // for( int di=-1; di<=1; di++ ) for( int dj=-1; dj<=1; dj++ ) for( int dk=-1; dk<=1; dk++ ) // { // if(verifycellidx(i+di,j+dj,k+dk) && phisolid( i+di,j+dj,k+dk)<0 ) // flag=true; // } // if( !flag ) // return; // // int cnt= (idx*frame) % ( nrandnum-100 ); // if( randfloat[cnt++]>0.95 ) //if randnum>thresold, generate a airsolo bubble // { // int addidx=atomicAdd( addnum, 1 ); // float3 addpos= (make_float3(randfloat[cnt], randfloat[cnt], randfloat[cnt]) + make_float3(i,j,k) ) * dparam.cellsize.x; // ppos[pnum+addidx] = addpos; // pvel[pnum+addidx]=make_float3(0); // pflag[pnum+addidx]=TYPEAIRSOLO; // psolubility[pnum+addidx]=0; // paircontain[pnum+addidx]=0; // pmass[pnum+addidx]=dparam.airm0; // pTemperature[pnum+addidx]=getScaleFromFrid( addpos, Tgrid ); // } // } // } //这个函数是考虑latent heat的主函数,当温度超过界限时(如固体的温度高于熔点),则多余的热量放到latent heat里;当latent heat满足一定条件时,发生phase change. __global__ void updateLatentHeat_k(float *parTemperature, float *parLHeat, char *partype, int pnum, float meltingpoint, float boilingpoint, float LiquidHeatTh) { int idx = __mul24(blockDim.x, blockIdx.x) + threadIdx.x; if (idx<pnum) { if (partype[idx] == TYPESOLID && parTemperature[idx]>meltingpoint) { parLHeat[idx] += parTemperature[idx] - meltingpoint; parTemperature[idx] = meltingpoint; } if (partype[idx] == TYPEFLUID) { if (parTemperature[idx]<meltingpoint) { parLHeat[idx] -= meltingpoint - parTemperature[idx]; parTemperature[idx] = meltingpoint; } else if (parTemperature[idx]>boilingpoint) { parLHeat[idx] += parTemperature[idx] - boilingpoint; // parLHeat[idx] = min( parLHeat[idx], LiquidHeatTh+5 ); parTemperature[idx] = boilingpoint; } else parLHeat[idx] = LiquidHeatTh; } } } __global__ void pouringwater(float3* pos, float3* vel, float* parmass, char* parflag, float *ptemperature, float *pLHeat, float *pGasContain, int parnum, float3 *ppourpos, float3 *ppourvel, char pourflag, int pournum, float *randfloat, int randnum, int frame, float posrandparam, float velrandparam, float defaultLiquidT, float LiquidHeatTh) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<pournum) { //速度与位置的随机化 int randbase = (frame + idx) % (randnum - 6); float3 randvel = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; randbase += 3; float3 randpos = make_float3(randfloat[randbase], randfloat[randbase + 1], randfloat[randbase + 2]) *2.0f - 1.0f; pos[parnum + idx] = ppourpos[idx] + randpos * posrandparam*dparam.samplespace; vel[parnum + idx] = ppourvel[idx] + randvel * velrandparam; parmass[parnum + idx] = dparam.m0; parflag[parnum + idx] = pourflag; ptemperature[parnum + idx] = defaultLiquidT; pLHeat[parnum + idx] = LiquidHeatTh; pGasContain[parnum + idx] = 0; } } inline __device__ float getlen(float x, float y) { return sqrt(x*x + y*y); } __global__ void initheat_grid_k(farray tp, charray mark) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx<dparam.gnum) { int i, j, k; getijk(i, j, k, idx); float x = i, z = k; float r = NX*0.15; if (getlen(x - NX / 4, z - NZ / 4) <= r) tp[idx] = 100, mark[idx] = TYPESOLID; else if (getlen(x - NX / 4 * 3, z - NZ / 4 * 3) <= r) tp[idx] = 0, mark[idx] = TYPEFLUID; else if (z<NZ / 2) tp[idx] = 20, mark[idx] = TYPEVACUUM; else tp[idx] = 80, mark[idx] = TYPEAIR; } } __global__ void set_softparticle_position(float3* solidParPos, float3* mParPos, float3* solidParVelFLIP,float3* mParVel, char* partype) { int idx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (idx < dparam.gnum) if (partype[idx]==TYPESOLID) { mParPos[idx] = solidParPos[idx]; mParVel[idx] = (solidParVelFLIP[idx]+mParVel[idx])/2.0; // mParVel[idx] = solidParVelFLIP[idx]; } };
13e0e8d41bffd23d52b6ad4e8e0a80431445fe83.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "doNothing.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( doNothing), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( doNothing), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( doNothing), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
13e0e8d41bffd23d52b6ad4e8e0a80431445fe83.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "doNothing.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); doNothing<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { doNothing<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { doNothing<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
963973de1694543e55483c00a00f595f2e8a8af3.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 2 #define TW 4 #define TC 16 #define C 160 #define N 96 #define H 28 #define W 28 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(hipError_t code) { if (code != hipSuccess) { std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[4]; __shared__ float pad_temp_shared[2560]; __shared__ float kernel_shared[2560]; float pad_temp_shared_local[10]; float kernel_shared_local[10]; compute_local[(0)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { for (int rx_outer = 0; rx_outer < 3; ++rx_outer) { __syncthreads(); pad_temp_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 755))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 756))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 757))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 758))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1539))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1540))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1541))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1542))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); kernel_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = kernel[(((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 9))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 18))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 27))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 36))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 45))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 54))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 63))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 72))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 81))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 90))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 99))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 108))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 117))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 126))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 135))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 144))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 153))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 162))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 171))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 180))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 189))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 198))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 207))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 216))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 225))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 234))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 243))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 252))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 261))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 270))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 279))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 288))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 297))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 306))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 315))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 324))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 333))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 342))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 351))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 8))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 24))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 40))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 56))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 72))]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 160))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1280))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1281))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 2))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1282))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 3))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1283))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 4))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1284))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 88))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 104))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 120))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 136))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 152))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 5))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1285))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 6))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1286))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 7))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1287))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 8))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1288))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 9))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1289))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 168))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 184))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 200))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 216))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 232))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 10))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1290))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 11))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1291))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 12))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1292))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 13))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1293))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 14))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1294))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 248))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 264))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 280))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 296))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 312))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 15))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1295))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 16))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1296))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 17))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1297))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 18))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1298))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 19))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1299))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 328))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 344))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 360))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 376))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 392))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 20))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1300))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 21))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1301))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 22))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1302))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 23))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1303))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 24))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1304))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 408))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 424))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 440))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 456))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 472))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 25))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1305))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 26))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1306))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 27))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1307))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 28))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1308))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 29))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1309))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 488))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 504))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 520))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 536))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 552))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 30))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1310))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 31))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1311))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 32))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1312))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 33))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1313))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 34))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1314))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 568))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 584))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 600))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 616))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 632))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 35))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1315))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 36))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1316))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 37))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1317))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 38))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1318))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 39))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1319))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 648))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 664))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 680))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 696))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 712))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 40))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1320))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 41))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1321))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 42))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1322))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 43))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1323))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 44))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1324))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 728))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 744))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 760))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 776))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 792))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 45))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1325))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 46))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1326))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 47))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1327))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 48))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1328))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 49))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1329))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 808))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 824))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 840))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 856))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 872))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 50))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1330))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 51))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1331))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 52))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1332))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 53))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1333))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 54))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1334))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 888))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 904))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 920))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 936))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 952))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 55))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1335))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 56))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1336))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 57))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1337))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 58))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1338))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 59))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1339))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 968))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 984))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1000))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1016))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1032))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 60))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1340))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 61))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1341))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 62))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1342))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 63))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1343))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 64))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1344))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1048))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1064))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1080))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1096))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1112))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 65))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1345))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 66))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1346))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 67))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1347))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 68))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1348))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 69))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1349))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1128))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1144))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1160))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1176))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1192))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 70))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1350))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 71))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1351))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 72))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1352))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 73))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1353))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 74))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1354))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1208))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1224))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1240))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1256))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1272))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 75))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1355))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 76))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1356))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 77))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1357))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 78))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1358))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 79))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1359))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1288))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1304))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1320))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1336))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1352))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 80))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1360))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 81))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1361))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 82))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1362))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 83))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1363))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 84))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1364))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1368))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1384))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1400))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1416))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1432))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 85))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1365))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 86))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1366))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 87))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1367))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 88))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1368))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 89))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1369))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1448))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1464))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1480))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1496))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1512))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 90))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1370))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 91))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1371))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 92))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1372))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 93))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1373))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 94))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1374))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1528))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1544))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1560))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1576))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1592))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 95))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1375))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 96))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1376))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 97))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1377))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 98))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1378))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 99))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1379))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1608))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1624))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1640))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1656))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1672))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 100))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1380))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 101))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1381))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 102))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1382))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 103))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1383))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 104))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1384))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1688))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1704))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1720))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1736))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1752))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 105))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1385))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 106))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1386))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 107))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1387))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 108))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1388))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 109))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1389))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1768))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1784))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1800))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1816))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1832))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 110))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1390))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 111))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1391))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 112))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1392))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 113))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1393))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 114))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1394))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1848))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1864))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1880))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1896))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1912))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 115))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1395))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 116))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1396))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 117))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1397))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 118))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1398))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 119))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1399))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1928))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1944))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1960))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1976))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1992))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 120))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1400))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 121))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1401))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 122))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1402))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 123))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1403))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 124))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1404))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2008))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2024))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2040))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2048))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2056))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2064))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2072))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 125))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1405))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 126))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1406))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 127))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1407))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 128))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1408))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 129))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1409))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2080))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2088))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2096))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2104))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2112))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2120))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2128))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2136))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2144))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2152))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 130))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1410))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 131))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1411))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 132))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1412))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 133))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1413))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 134))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1414))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2160))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2168))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2176))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2184))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2192))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2200))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2208))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2216))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2224))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2232))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 135))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1415))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 136))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1416))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 137))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1417))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 138))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1418))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 139))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1419))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2240))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2248))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2256))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2264))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2272))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2280))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2288))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2296))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2304))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2312))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 140))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1420))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 141))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1421))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 142))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1422))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 143))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1423))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 144))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1424))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2320))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2328))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2336))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2344))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2352))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2360))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2368))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2376))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2384))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2392))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 145))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1425))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 146))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1426))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 147))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1427))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 148))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1428))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 149))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1429))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2400))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2408))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2416))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2424))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2432))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2440))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2448))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2456))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2464))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2472))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 150))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1430))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 151))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1431))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 152))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1432))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 153))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1433))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 154))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1434))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2480))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2488))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2496))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2504))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2512))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2520))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2528))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2536))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2544))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2552))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 155))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1435))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 156))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1436))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 157))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1437))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 158))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1438))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 159))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1439))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); } } compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)))] = compute_local[(0)]; compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6272))] = compute_local[(2)]; compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 56))] = compute_local[(1)]; compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6328))] = compute_local[(3)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; case 2: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; hipMalloc(&device_input,C*H*W*sizeof(float)); hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; hipEvent_t event_start; hipEvent_t event_stop; hipEventCreate(&event_start); hipEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; hipMalloc(&device_out,H*W*N*sizeof(float)); hipMemset(device_out,0,H*W*N*sizeof(float)); hipMalloc(&device_K,C*N*9*sizeof(float)); hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice); hipEventRecord(event_start); convGemm.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnGemmTime; hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop); hipEventRecord(event_start); convWinogradeNon.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnWinogradeTimeNon; hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); hipEventRecord(event_start); convFFT.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnFFTTime; hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(7,7,6); dim3 block(4,2,8); hipEventRecord(event_start); hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tvm; hipEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); hipMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); hipEventRecord(event_start); hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tdc; hipEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_cudnn_host, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
963973de1694543e55483c00a00f595f2e8a8af3.cu
#include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 2 #define TW 4 #define TC 16 #define C 160 #define N 96 #define H 28 #define W 28 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(cudaError_t code) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[4]; __shared__ float pad_temp_shared[2560]; __shared__ float kernel_shared[2560]; float pad_temp_shared_local[10]; float kernel_shared_local[10]; compute_local[(0)] = 0.000000e+00f; compute_local[(2)] = 0.000000e+00f; compute_local[(1)] = 0.000000e+00f; compute_local[(3)] = 0.000000e+00f; for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { for (int rx_outer = 0; rx_outer < 3; ++rx_outer) { __syncthreads(); pad_temp_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 755))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 756))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 757))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 758))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1539))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1540))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1541))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1542))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f); pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f); kernel_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = kernel[(((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 9))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 18))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 27))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 36))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 45))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 54))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 63))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 72))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 81))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 90))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 99))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 108))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 117))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 126))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 135))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 144))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 153))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 162))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 171))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 180))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 189))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 198))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 207))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 216))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 225))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 234))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 243))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 252))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 261))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 270))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 279))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 288))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 297))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 306))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 315))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 324))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 333))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 342))]; kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 351))]; __syncthreads(); pad_temp_shared_local[(0)] = pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 8))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 24))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 40))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 56))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 72))]; kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 160))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1280))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1281))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 2))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1282))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 3))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1283))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 4))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1284))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 88))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 104))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 120))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 136))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 152))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 5))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1285))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 6))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1286))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 7))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1287))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 8))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1288))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 9))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1289))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 168))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 184))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 200))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 216))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 232))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 10))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1290))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 11))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1291))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 12))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1292))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 13))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1293))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 14))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1294))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 248))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 264))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 280))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 296))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 312))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 15))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1295))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 16))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1296))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 17))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1297))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 18))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1298))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 19))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1299))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 328))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 344))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 360))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 376))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 392))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 20))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1300))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 21))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1301))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 22))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1302))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 23))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1303))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 24))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1304))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 408))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 424))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 440))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 456))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 472))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 25))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1305))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 26))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1306))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 27))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1307))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 28))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1308))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 29))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1309))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 488))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 504))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 520))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 536))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 552))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 30))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1310))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 31))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1311))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 32))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1312))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 33))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1313))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 34))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1314))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 568))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 584))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 600))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 616))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 632))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 35))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1315))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 36))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1316))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 37))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1317))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 38))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1318))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 39))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1319))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 648))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 664))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 680))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 696))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 712))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 40))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1320))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 41))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1321))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 42))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1322))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 43))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1323))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 44))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1324))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 728))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 744))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 760))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 776))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 792))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 45))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1325))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 46))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1326))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 47))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1327))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 48))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1328))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 49))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1329))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 808))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 824))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 840))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 856))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 872))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 50))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1330))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 51))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1331))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 52))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1332))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 53))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1333))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 54))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1334))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 888))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 904))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 920))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 936))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 952))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 55))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1335))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 56))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1336))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 57))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1337))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 58))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1338))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 59))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1339))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 968))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 984))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1000))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1016))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1032))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 60))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1340))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 61))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1341))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 62))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1342))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 63))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1343))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 64))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1344))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1048))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1064))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1080))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1096))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1112))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 65))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1345))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 66))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1346))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 67))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1347))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 68))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1348))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 69))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1349))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1128))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1144))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1160))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1176))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1192))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 70))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1350))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 71))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1351))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 72))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1352))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 73))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1353))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 74))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1354))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1208))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1224))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1240))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1256))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1272))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 75))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1355))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 76))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1356))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 77))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1357))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 78))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1358))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 79))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1359))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1288))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1304))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1320))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1336))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1352))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 80))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1360))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 81))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1361))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 82))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1362))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 83))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1363))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 84))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1364))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1368))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1384))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1400))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1416))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1432))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 85))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1365))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 86))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1366))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 87))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1367))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 88))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1368))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 89))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1369))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1448))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1464))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1480))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1496))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1512))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 90))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1370))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 91))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1371))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 92))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1372))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 93))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1373))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 94))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1374))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1528))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1544))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1560))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1576))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1592))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 95))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1375))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 96))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1376))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 97))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1377))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 98))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1378))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 99))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1379))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1608))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1624))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1640))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1656))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1672))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 100))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1380))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 101))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1381))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 102))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1382))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 103))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1383))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 104))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1384))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1688))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1704))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1720))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1736))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1752))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 105))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1385))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 106))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1386))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 107))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1387))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 108))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1388))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 109))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1389))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1768))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1784))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1800))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1816))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1832))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 110))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1390))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 111))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1391))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 112))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1392))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 113))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1393))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 114))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1394))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1848))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1864))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1880))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1896))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1912))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 115))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1395))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 116))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1396))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 117))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1397))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 118))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1398))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 119))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1399))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1928))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1944))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1960))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1976))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1992))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 120))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1400))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 121))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1401))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 122))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1402))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 123))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1403))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 124))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1404))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2008))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2024))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2040))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2048))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2056))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2064))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2072))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 125))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1405))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 126))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1406))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 127))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1407))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 128))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1408))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 129))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1409))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2080))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2088))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2096))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2104))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2112))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2120))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2128))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2136))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2144))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2152))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 130))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1410))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 131))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1411))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 132))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1412))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 133))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1413))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 134))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1414))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2160))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2168))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2176))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2184))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2192))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2200))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2208))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2216))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2224))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2232))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 135))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1415))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 136))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1416))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 137))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1417))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 138))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1418))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 139))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1419))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2240))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2248))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2256))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2264))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2272))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2280))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2288))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2296))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2304))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2312))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 140))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1420))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 141))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1421))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 142))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1422))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 143))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1423))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 144))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1424))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2320))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2328))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2336))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2344))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2352))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2360))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2368))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2376))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2384))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2392))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 145))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1425))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 146))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1426))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 147))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1427))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 148))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1428))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 149))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1429))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2400))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2408))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2416))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2424))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2432))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2440))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2448))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2456))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2464))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2472))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 150))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1430))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 151))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1431))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 152))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1432))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 153))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1433))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 154))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1434))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2480))]; pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2488))]; pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2496))]; pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2504))]; pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2512))]; pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2520))]; pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2528))]; pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2536))]; pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2544))]; pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2552))]; kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 155))]; kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1435))]; kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 156))]; kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1436))]; kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 157))]; kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1437))]; kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 158))]; kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1438))]; kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 159))]; kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1439))]; compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)])); compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)])); compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)])); compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)])); compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)])); } } compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)))] = compute_local[(0)]; compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6272))] = compute_local[(2)]; compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 56))] = compute_local[(1)]; compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6328))] = compute_local[(3)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; case 2: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 2: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 2; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 3: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 3; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; case 4: #pragma unroll for (unsigned int th = 0; th < 2; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 4; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[0]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[0]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[1]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[0]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[1]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[1]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[2]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[3]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[3]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[4]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[3]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[4]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[4]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[5]; temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[6]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[6]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[7]; temp_result[4] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 2]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[6]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[7]; temp_result[5] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 3]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[7]; temp_result[6] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 4]*data_array[8]; temp_result[7] += shared_input[c*(TH+2)*(WPAD) + 3 * WPAD + tw_id * TW + 5]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; cudaMalloc(&device_input,C*H*W*sizeof(float)); cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; cudaEvent_t event_start; cudaEvent_t event_stop; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; cudaMalloc(&device_out,H*W*N*sizeof(float)); cudaMemset(device_out,0,H*W*N*sizeof(float)); cudaMalloc(&device_K,C*N*9*sizeof(float)); cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(event_start); convGemm.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnGemmTime; cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop); cudaEventRecord(event_start); convWinogradeNon.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnWinogradeTimeNon; cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); cudaEventRecord(event_start); convFFT.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnFFTTime; cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(7,7,6); dim3 block(4,2,8); cudaEventRecord(event_start); default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tvm; cudaEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); cudaMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); cudaEventRecord(event_start); conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tdc; cudaEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/2080Ti-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_cudnn_host, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<< cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
842fb0ed94f0b1f9768e73fb47b24d8da6d06b03.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathCompareT.cuh" #include "THHTensor.hpp" #include "THHStream.hpp" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateShortType.h"
842fb0ed94f0b1f9768e73fb47b24d8da6d06b03.cu
#include "../THCTensorMathCompareT.cuh" #include "THCTensor.hpp" #include "THCStream.hpp" #include "../generic/THCTensorMathCompareT.cu" #include "../THCGenerateShortType.h"
57784212e553694cf4f06c5e488fa87f90ebdcad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> __global__ void kernelx(int *A, int *x, int *b, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; if(tId< N){ for(int k=0;k<N;k++){ atomicAdd(&b[k],A[(int)(k*1e4+tId)]*x[tId]); } } } int main(int argc, char const *argv[]) { int n = 1e4; int block_size = 256; int grid_size = (int) ceil((float) n/ block_size); int *GPU_b; int *GPU_x; int *GPU_A; int *CPU_x = (int *) malloc(1e4 * sizeof (int)); int *CPU_A = (int *) malloc(1e8 * sizeof (int)); for(int k = 0; k < 1e8; k++){ if(k < 1e4){ CPU_x[k] = 1; } CPU_A[k] = 1; } hipMalloc(&GPU_x , 1e4 * sizeof(int)); hipMalloc(&GPU_b , 1e4 * sizeof(int)); hipMalloc(&GPU_A , 1e8 * sizeof(int)); hipMemcpy(GPU_A, CPU_A, 1e8 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(GPU_x, CPU_x, 1e4 * sizeof(int), hipMemcpyHostToDevice); hipMemset(GPU_b,0,1e4 * sizeof(int)); hipLaunchKernelGGL(( kernelx), dim3(grid_size), dim3(block_size), 0, 0, GPU_A, GPU_x, GPU_b, n); hipMemcpy(CPU_x, GPU_b, 1e4 * sizeof(int), hipMemcpyDeviceToHost); //for(int k = 0; k< 1e4; k++){ // printf("%d\n", CPU_x[k]); //} hipFree(GPU_x); hipFree(GPU_b); hipFree(GPU_A); free(CPU_x); free(CPU_A); return(0); }
57784212e553694cf4f06c5e488fa87f90ebdcad.cu
#include <stdio.h> #include <math.h> __global__ void kernelx(int *A, int *x, int *b, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; if(tId< N){ for(int k=0;k<N;k++){ atomicAdd(&b[k],A[(int)(k*1e4+tId)]*x[tId]); } } } int main(int argc, char const *argv[]) { int n = 1e4; int block_size = 256; int grid_size = (int) ceil((float) n/ block_size); int *GPU_b; int *GPU_x; int *GPU_A; int *CPU_x = (int *) malloc(1e4 * sizeof (int)); int *CPU_A = (int *) malloc(1e8 * sizeof (int)); for(int k = 0; k < 1e8; k++){ if(k < 1e4){ CPU_x[k] = 1; } CPU_A[k] = 1; } cudaMalloc(&GPU_x , 1e4 * sizeof(int)); cudaMalloc(&GPU_b , 1e4 * sizeof(int)); cudaMalloc(&GPU_A , 1e8 * sizeof(int)); cudaMemcpy(GPU_A, CPU_A, 1e8 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(GPU_x, CPU_x, 1e4 * sizeof(int), cudaMemcpyHostToDevice); cudaMemset(GPU_b,0,1e4 * sizeof(int)); kernelx<<<grid_size, block_size>>>(GPU_A, GPU_x, GPU_b, n); cudaMemcpy(CPU_x, GPU_b, 1e4 * sizeof(int), cudaMemcpyDeviceToHost); //for(int k = 0; k< 1e4; k++){ // printf("%d\n", CPU_x[k]); //} cudaFree(GPU_x); cudaFree(GPU_b); cudaFree(GPU_A); free(CPU_x); free(CPU_A); return(0); }
67b3a1e0a5895584695c55dab93142b29a3b8132.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************** Emitting C Generated Code *******************************************/ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "cudnn_header.h" #include "nccl_header.h" #include <string.h> #include <cblas.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include "cublas_header.h" #include <stdbool.h> #include "mpi_header.h" #include "scanner_header.h" /************* Functions **************/ __global__ void x10(float* x11, float* x12, int x13, int x14, int x15) { // this is the permute kernel for [1, 0, 2] // arg0: 3D input tensor (dimZ x dimY x dimX) // arg1: 3D output tensor (dimY x dimZ x dimX) // arg2: dimZ of input // arg3: dimY of input // arg4: dimX of input // caller must use <<<dim3(dimY, dimZ, 1), dim3(A, 1, 1)>>> where A < dimX // each threadblock hands one dimX in coalease size of A, then we have dimZ x dimY threadblocks // this kernel might be inefficient if the dimX is small. TODO int x16 = blockDim.x; int x17 = threadIdx.x; while (x17 < x15) { int x18 = x17; x12[(blockIdx.y + blockIdx.x * x13) * x15 + x18] = x11[(blockIdx.x + blockIdx.y * x14) * x15 + x18]; x17 = x17 + x16; } } __global__ void x20(float* x21, float x22, int x23) { // begin generating kernel function for FILL of type Float int x24 = gridDim.x * blockDim.x; int x25 = threadIdx.x + blockIdx.x * blockDim.x; while (x25 < x23) { x21[x25] = x22; x25 = x25 + x24; } // end generating kernel function for FILL of type Float } __global__ void x29(float* x30, float* x31, int x32) { // begin generating kernel function for ACCUM of type Float int x33 = gridDim.x * blockDim.x; int x34 = threadIdx.x + blockIdx.x * blockDim.x; while (x34 < x32) { int x35 = x34; x30[x35] = x30[x35] + x31[x35]; x34 = x34 + x33; } // end generating kernel function for ACCUM of type Float } /**************** Snippet ****************/ void Snippet(int x0) { // begin setting up the MPI/NCCL environment int x1 = 0; int x2 = 0; MPICHECK(MPI_Init(NULL, NULL)); MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &x2)); MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x1)); MPICHECK(MPI_Barrier(MPI_COMM_WORLD)); CUDA_CALL(hipSetDevice(x2)); ncclUniqueId x3; NCCLCHECK(ncclGetUniqueId(&x3)); MPICHECK(MPI_Bcast(&x3, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD)); ncclComm_t x4; NCCLCHECK(ncclCommInitRank(&x4, x1, x3, x2)); hipStream_t x5; CUDA_CALL(hipStreamCreateWithFlags(&x5, hipStreamNonBlocking)); int x6 = x2; // end setting up the MPI/NCCL environment // begin initializing GPU array of size 64 and type Float float* x7 = (float*)malloc(64 * sizeof(float)); CUDA_CALL(hipSetDevice(x6)); float* x8 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x8, (size_t)(64 * sizeof(float)))); scan_float_array(x7, 64, "golden/input_rank_%d.data", x6); CUDA_CALL(hipMemcpy(x8, x7, (size_t)(64 * sizeof(float)), hipMemcpyHostToDevice)); // end initializing GPU array of size 64 and type Float // begin allocating gpu array of size 64 and type Float for the output of permute CUDA_CALL(hipSetDevice(x6)); float* x9 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x9, (size_t)(64 * sizeof(float)))); // end allocating gpu array of size 64 and type Float for the output of permute // begin calling permute kernel hipLaunchKernelGGL(( x10), dim3(dim3(2, 4, 1)), dim3(dim3(7, 1, 1)), 0, 0, x8, x9, 4, 2, 8); // end calling permute kernel // begin initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 CUDA_CALL(hipSetDevice(x6)); float* x19 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x19, (size_t)(64 * sizeof(float)))); hipLaunchKernelGGL(( x20), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x19, 0, 64); // end initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 // begin checking GPU array of size 64 and type Float float* x26 = (float*)malloc(64 * sizeof(float)); CUDA_CALL(hipMemcpy(x26, x9, (size_t)(64 * sizeof(float)), hipMemcpyDeviceToHost)); check_float_array_with_file(x26, 64, "golden/loss_rank_%d.data", x6); // end checking GPU array of size 64 and type Float // begin initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 CUDA_CALL(hipSetDevice(x6)); float* x27 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x27, (size_t)(64 * sizeof(float)))); hipLaunchKernelGGL(( x20), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x27, 1, 64); // end initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 // begin allocating gpu array of size 64 and type Float for the output of permute CUDA_CALL(hipSetDevice(x6)); float* x28 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x28, (size_t)(64 * sizeof(float)))); // end allocating gpu array of size 64 and type Float for the output of permute // begin calling permute kernel hipLaunchKernelGGL(( x10), dim3(dim3(4, 2, 1)), dim3(dim3(7, 1, 1)), 0, 0, x27, x28, 2, 4, 8); // end calling permute kernel // begin computing ACCUM on GPU for size 64 and type Float at device (pre-rename) x39 with base_operand x120 and addition_operand x183 CUDA_CALL(hipSetDevice(x6)); hipLaunchKernelGGL(( x29), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x19, x28, 64); // end computing ACCUM on GPU for size 64 and type Float at device (pre-rename) x39 with base_operand x120 and addition_operand x183 // begin checking GPU array of size 64 and type Float float* x36 = (float*)malloc(64 * sizeof(float)); CUDA_CALL(hipMemcpy(x36, x19, (size_t)(64 * sizeof(float)), hipMemcpyDeviceToHost)); check_float_array_with_file(x36, 64, "golden/input_grad_rank_%d.data", x6); // end checking GPU array of size 64 and type Float NCCLCHECK(ncclCommDestroy(x4)); MPICHECK(MPI_Finalize()); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
67b3a1e0a5895584695c55dab93142b29a3b8132.cu
/***************************************** Emitting C Generated Code *******************************************/ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "cudnn_header.h" #include "nccl_header.h" #include <string.h> #include <cblas.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include "cublas_header.h" #include <stdbool.h> #include "mpi_header.h" #include "scanner_header.h" /************* Functions **************/ __global__ void x10(float* x11, float* x12, int x13, int x14, int x15) { // this is the permute kernel for [1, 0, 2] // arg0: 3D input tensor (dimZ x dimY x dimX) // arg1: 3D output tensor (dimY x dimZ x dimX) // arg2: dimZ of input // arg3: dimY of input // arg4: dimX of input // caller must use <<<dim3(dimY, dimZ, 1), dim3(A, 1, 1)>>> where A < dimX // each threadblock hands one dimX in coalease size of A, then we have dimZ x dimY threadblocks // this kernel might be inefficient if the dimX is small. TODO int x16 = blockDim.x; int x17 = threadIdx.x; while (x17 < x15) { int x18 = x17; x12[(blockIdx.y + blockIdx.x * x13) * x15 + x18] = x11[(blockIdx.x + blockIdx.y * x14) * x15 + x18]; x17 = x17 + x16; } } __global__ void x20(float* x21, float x22, int x23) { // begin generating kernel function for FILL of type Float int x24 = gridDim.x * blockDim.x; int x25 = threadIdx.x + blockIdx.x * blockDim.x; while (x25 < x23) { x21[x25] = x22; x25 = x25 + x24; } // end generating kernel function for FILL of type Float } __global__ void x29(float* x30, float* x31, int x32) { // begin generating kernel function for ACCUM of type Float int x33 = gridDim.x * blockDim.x; int x34 = threadIdx.x + blockIdx.x * blockDim.x; while (x34 < x32) { int x35 = x34; x30[x35] = x30[x35] + x31[x35]; x34 = x34 + x33; } // end generating kernel function for ACCUM of type Float } /**************** Snippet ****************/ void Snippet(int x0) { // begin setting up the MPI/NCCL environment int x1 = 0; int x2 = 0; MPICHECK(MPI_Init(NULL, NULL)); MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &x2)); MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x1)); MPICHECK(MPI_Barrier(MPI_COMM_WORLD)); CUDA_CALL(cudaSetDevice(x2)); ncclUniqueId x3; NCCLCHECK(ncclGetUniqueId(&x3)); MPICHECK(MPI_Bcast(&x3, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD)); ncclComm_t x4; NCCLCHECK(ncclCommInitRank(&x4, x1, x3, x2)); cudaStream_t x5; CUDA_CALL(cudaStreamCreateWithFlags(&x5, cudaStreamNonBlocking)); int x6 = x2; // end setting up the MPI/NCCL environment // begin initializing GPU array of size 64 and type Float float* x7 = (float*)malloc(64 * sizeof(float)); CUDA_CALL(cudaSetDevice(x6)); float* x8 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x8, (size_t)(64 * sizeof(float)))); scan_float_array(x7, 64, "golden/input_rank_%d.data", x6); CUDA_CALL(cudaMemcpy(x8, x7, (size_t)(64 * sizeof(float)), cudaMemcpyHostToDevice)); // end initializing GPU array of size 64 and type Float // begin allocating gpu array of size 64 and type Float for the output of permute CUDA_CALL(cudaSetDevice(x6)); float* x9 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x9, (size_t)(64 * sizeof(float)))); // end allocating gpu array of size 64 and type Float for the output of permute // begin calling permute kernel x10<<<dim3(2, 4, 1), dim3(7, 1, 1)>>>(x8, x9, 4, 2, 8); // end calling permute kernel // begin initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 CUDA_CALL(cudaSetDevice(x6)); float* x19 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x19, (size_t)(64 * sizeof(float)))); x20<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x19, 0, 64); // end initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 // begin checking GPU array of size 64 and type Float float* x26 = (float*)malloc(64 * sizeof(float)); CUDA_CALL(cudaMemcpy(x26, x9, (size_t)(64 * sizeof(float)), cudaMemcpyDeviceToHost)); check_float_array_with_file(x26, 64, "golden/loss_rank_%d.data", x6); // end checking GPU array of size 64 and type Float // begin initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 CUDA_CALL(cudaSetDevice(x6)); float* x27 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x27, (size_t)(64 * sizeof(float)))); x20<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x27, 1, 64); // end initializing fixed GPU array of size 64 and type Float and device (pre-rename) x39 // begin allocating gpu array of size 64 and type Float for the output of permute CUDA_CALL(cudaSetDevice(x6)); float* x28 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x28, (size_t)(64 * sizeof(float)))); // end allocating gpu array of size 64 and type Float for the output of permute // begin calling permute kernel x10<<<dim3(4, 2, 1), dim3(7, 1, 1)>>>(x27, x28, 2, 4, 8); // end calling permute kernel // begin computing ACCUM on GPU for size 64 and type Float at device (pre-rename) x39 with base_operand x120 and addition_operand x183 CUDA_CALL(cudaSetDevice(x6)); x29<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x19, x28, 64); // end computing ACCUM on GPU for size 64 and type Float at device (pre-rename) x39 with base_operand x120 and addition_operand x183 // begin checking GPU array of size 64 and type Float float* x36 = (float*)malloc(64 * sizeof(float)); CUDA_CALL(cudaMemcpy(x36, x19, (size_t)(64 * sizeof(float)), cudaMemcpyDeviceToHost)); check_float_array_with_file(x36, 64, "golden/input_grad_rank_%d.data", x6); // end checking GPU array of size 64 and type Float NCCLCHECK(ncclCommDestroy(x4)); MPICHECK(MPI_Finalize()); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }