hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
54d9ff347aef64b40b829ccbf43089ea748931a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/scatter.cu.h" #include "paddle/phi/kernels/funcs/sparse/scatter.cu.h" #include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/gpu/convolution.cu.h" namespace phi { namespace sparse { template <typename T, typename IntT> void Conv3dGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, SparseCooTensor* out, DenseTensor* rulebook) { // update padding and dilation // Currently, only support x.layout is NDHWC, groups = 1 // if x.layout != NDHWC then transpose(x), transpose(weight) const auto& x_dims = x.dims(); const auto& kernel_dims = kernel.dims(); int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; DDim out_dims = {1, 1, 1, 1, 1}; std::vector<int> kernel_sizes(kernel_dims.size()); for (int i = 0; i < kernel_dims.size(); i++) { kernel_sizes[i] = kernel_dims[i]; } std::vector<int> subm_paddings(paddings), subm_strides(strides); if (subm) { // the out shape of subm_conv is same as input shape // reset the padding=kernel_size/2 and strides=1 phi::funcs::sparse::ResetSubmKernelSizeAndStrides( kernel.dims(), &subm_paddings, &subm_strides); } phi::funcs::sparse::GetOutShape( x_dims, kernel_sizes, subm_paddings, dilations, subm_strides, &out_dims); const int in_channels = kernel_dims[3]; const int out_channels = kernel_dims[4]; std::vector<int> offsets(kernel_size + 1), h_counter(kernel_size); // Second algorithm: // https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf // 1. product rulebook DenseTensorMeta counter_meta( DataType::INT32, {kernel_size}, DataLayout::NCHW); DenseTensorMeta offsets_meta( DataType::INT32, {kernel_size}, DataLayout::NCHW); DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(offsets_meta)); DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW); DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta)); DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta)); int n = ProductRuleBook<T, GPUContext, IntT>(dev_ctx, x, kernel_sizes, subm_paddings, dilations, subm_strides, out_dims, subm, rulebook, &counter_per_kernel, &offsets_per_kernel, &out_index, &unique_value, out, &h_counter, &offsets); const int* counter_ptr = counter_per_kernel.data<int>(); const int* offsets_ptr = counter_per_kernel.data<int>(); const IntT* rulebook_ptr = rulebook->data<IntT>(); // 2. gather DenseTensorMeta in_features_meta( x.dtype(), {n, in_channels}, DataLayout::NCHW); DenseTensorMeta out_features_meta( x.dtype(), {n, out_channels}, DataLayout::NCHW); phi::DenseTensor in_features = phi::Empty(dev_ctx, std::move(in_features_meta)); phi::DenseTensor out_features = phi::Empty(dev_ctx, std::move(out_features_meta)); T* in_features_ptr = in_features.data<T>(); T* out_features_ptr = out_features.data<T>(); phi::funcs::SetConstant<GPUContext, T> set_zero; set_zero(dev_ctx, &out_features, static_cast<T>(0.0f)); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * in_channels, 1); hipLaunchKernelGGL(( GatherKernel<T, IntT>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), x.non_zero_elements().data<T>(), rulebook_ptr + n, in_features_ptr, n, in_channels); // 3. call gemm for every werght auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx); auto* out_values = out->mutable_non_zero_elements(); T* out_values_ptr = out_values->data<T>(); const T* kernel_ptr = kernel.data<T>(); for (int i = 0; i < kernel_size; i++) { if (h_counter[i] <= 0) { continue; } // call gemm: (n, in_channels) * (in_channels, out_channels) const int M = h_counter[i]; const int K = in_channels; const int N = out_channels; T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels; const T* tmp_kernel_ptr = kernel_ptr + i * K * N; T* tmp_out_ptr = out_features_ptr + offsets[i] * out_channels; blas.GEMM(CblasNoTrans, CblasNoTrans, M, N, K, static_cast<T>(1), tmp_in_ptr, tmp_kernel_ptr, static_cast<T>(0), tmp_out_ptr); } // 4. scatter if (subm) { set_zero(dev_ctx, out_values, static_cast<T>(0.0f)); config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * out_channels, 1); hipLaunchKernelGGL(( phi::funcs::ScatterCUDAKernel<T, IntT>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), out_features_ptr, rulebook_ptr + 2 * n, out_values_ptr, n, out_channels, false); } else { config = phi::backends::gpu::GetGpuLaunchConfig1D( dev_ctx, out->nnz() * out_channels, 1); hipLaunchKernelGGL(( phi::funcs::sparse::ScatterKernel<T>) , dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), out_features_ptr, unique_value.data<int>(), out_index.data<int>(), out->nnz(), n, out_channels, out_values_ptr); } } /** * x: (N, D, H, W, C) * kernel: (D, H, W, C, OC) * out: (N, D, H, W, OC) **/ template <typename T, typename Context> void Conv3dKernel(const Context& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, SparseCooTensor* out, DenseTensor* rulebook) { PD_VISIT_INTEGRAL_TYPES( x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] { Conv3dGPUKernel<T, data_t>(dev_ctx, x, kernel, paddings, dilations, strides, groups, subm, out, rulebook); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(sparse_conv3d, GPU, ALL_LAYOUT, phi::sparse::Conv3dKernel, float, double, phi::dtype::float16) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
54d9ff347aef64b40b829ccbf43089ea748931a8.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" #include "paddle/phi/kernels/funcs/scatter.cu.h" #include "paddle/phi/kernels/funcs/sparse/scatter.cu.h" #include "paddle/phi/kernels/sparse/convolution_kernel.h" #include "paddle/phi/kernels/sparse/gpu/convolution.cu.h" namespace phi { namespace sparse { template <typename T, typename IntT> void Conv3dGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, SparseCooTensor* out, DenseTensor* rulebook) { // update padding and dilation // Currently, only support x.layout is NDHWC, groups = 1 // if x.layout != NDHWC then transpose(x), transpose(weight) const auto& x_dims = x.dims(); const auto& kernel_dims = kernel.dims(); int kernel_size = kernel_dims[0] * kernel_dims[1] * kernel_dims[2]; DDim out_dims = {1, 1, 1, 1, 1}; std::vector<int> kernel_sizes(kernel_dims.size()); for (int i = 0; i < kernel_dims.size(); i++) { kernel_sizes[i] = kernel_dims[i]; } std::vector<int> subm_paddings(paddings), subm_strides(strides); if (subm) { // the out shape of subm_conv is same as input shape // reset the padding=kernel_size/2 and strides=1 phi::funcs::sparse::ResetSubmKernelSizeAndStrides( kernel.dims(), &subm_paddings, &subm_strides); } phi::funcs::sparse::GetOutShape( x_dims, kernel_sizes, subm_paddings, dilations, subm_strides, &out_dims); const int in_channels = kernel_dims[3]; const int out_channels = kernel_dims[4]; std::vector<int> offsets(kernel_size + 1), h_counter(kernel_size); // Second algorithm: // https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf // 1. product rulebook DenseTensorMeta counter_meta( DataType::INT32, {kernel_size}, DataLayout::NCHW); DenseTensorMeta offsets_meta( DataType::INT32, {kernel_size}, DataLayout::NCHW); DenseTensor counter_per_kernel = phi::Empty(dev_ctx, std::move(counter_meta)); DenseTensor offsets_per_kernel = phi::Empty(dev_ctx, std::move(offsets_meta)); DenseTensorMeta index_meta(DataType::INT32, {1}, DataLayout::NCHW); DenseTensor out_index = phi::Empty(dev_ctx, std::move(index_meta)); DenseTensor unique_value = phi::Empty(dev_ctx, std::move(index_meta)); int n = ProductRuleBook<T, GPUContext, IntT>(dev_ctx, x, kernel_sizes, subm_paddings, dilations, subm_strides, out_dims, subm, rulebook, &counter_per_kernel, &offsets_per_kernel, &out_index, &unique_value, out, &h_counter, &offsets); const int* counter_ptr = counter_per_kernel.data<int>(); const int* offsets_ptr = counter_per_kernel.data<int>(); const IntT* rulebook_ptr = rulebook->data<IntT>(); // 2. gather DenseTensorMeta in_features_meta( x.dtype(), {n, in_channels}, DataLayout::NCHW); DenseTensorMeta out_features_meta( x.dtype(), {n, out_channels}, DataLayout::NCHW); phi::DenseTensor in_features = phi::Empty(dev_ctx, std::move(in_features_meta)); phi::DenseTensor out_features = phi::Empty(dev_ctx, std::move(out_features_meta)); T* in_features_ptr = in_features.data<T>(); T* out_features_ptr = out_features.data<T>(); phi::funcs::SetConstant<GPUContext, T> set_zero; set_zero(dev_ctx, &out_features, static_cast<T>(0.0f)); auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * in_channels, 1); GatherKernel<T, IntT><<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>(x.non_zero_elements().data<T>(), rulebook_ptr + n, in_features_ptr, n, in_channels); // 3. call gemm for every werght auto blas = phi::funcs::GetBlas<GPUContext, T>(dev_ctx); auto* out_values = out->mutable_non_zero_elements(); T* out_values_ptr = out_values->data<T>(); const T* kernel_ptr = kernel.data<T>(); for (int i = 0; i < kernel_size; i++) { if (h_counter[i] <= 0) { continue; } // call gemm: (n, in_channels) * (in_channels, out_channels) const int M = h_counter[i]; const int K = in_channels; const int N = out_channels; T* tmp_in_ptr = in_features_ptr + offsets[i] * in_channels; const T* tmp_kernel_ptr = kernel_ptr + i * K * N; T* tmp_out_ptr = out_features_ptr + offsets[i] * out_channels; blas.GEMM(CblasNoTrans, CblasNoTrans, M, N, K, static_cast<T>(1), tmp_in_ptr, tmp_kernel_ptr, static_cast<T>(0), tmp_out_ptr); } // 4. scatter if (subm) { set_zero(dev_ctx, out_values, static_cast<T>(0.0f)); config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, n * out_channels, 1); phi::funcs::ScatterCUDAKernel<T, IntT> <<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(out_features_ptr, rulebook_ptr + 2 * n, out_values_ptr, n, out_channels, false); } else { config = phi::backends::gpu::GetGpuLaunchConfig1D( dev_ctx, out->nnz() * out_channels, 1); phi::funcs::sparse::ScatterKernel<T> <<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>(out_features_ptr, unique_value.data<int>(), out_index.data<int>(), out->nnz(), n, out_channels, out_values_ptr); } } /** * x: (N, D, H, W, C) * kernel: (D, H, W, C, OC) * out: (N, D, H, W, OC) **/ template <typename T, typename Context> void Conv3dKernel(const Context& dev_ctx, const SparseCooTensor& x, const DenseTensor& kernel, const std::vector<int>& paddings, const std::vector<int>& dilations, const std::vector<int>& strides, const int groups, const bool subm, SparseCooTensor* out, DenseTensor* rulebook) { PD_VISIT_INTEGRAL_TYPES( x.non_zero_indices().dtype(), "Conv3dGPUKernel", ([&] { Conv3dGPUKernel<T, data_t>(dev_ctx, x, kernel, paddings, dilations, strides, groups, subm, out, rulebook); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(sparse_conv3d, GPU, ALL_LAYOUT, phi::sparse::Conv3dKernel, float, double, phi::dtype::float16) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
9b7212f15f9ab7019094881f3afa1fbc2bebe9f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> s d c */ #include "common_magma.h" #define zgemv_bs 32 extern __shared__ magmaDoubleComplex shared_data[]; __global__ void kernel_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A = A_array[blockIdx.x]; magmaDoubleComplex *x = x_array[blockIdx.x]; magmaDoubleComplex *y = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex *buff = (magmaDoubleComplex*)shared_data; if(tx < n) { buff[tx] = x[tx*incx]; } __syncthreads(); if(tx < m ) { for(int j=0; j < n ; j++) { res += A[tx]*buff[j]; A += lda; } y[tx*incy] = alpha * res + y[tx*incy] * beta; } } /* Matrix Non-transpose Vector Multiplication y := alpha*A*x + beta*y, */ extern "C" void magmablas_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { if( m > 512 || n > 512) { fprintf( stderr, "m=%d, n=%d, zgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_zgemv instead", m, n, 512); return ; } dim3 grid(batchCount, 1, 1); dim3 threads(max(m,n), 1, 1); hipLaunchKernelGGL(( kernel_zgemvn_batched), dim3(grid), dim3(threads), n * sizeof(magmaDoubleComplex) , 0, m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } __global__ void kernel_zgemvt_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += A_ptr[i] * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += A_ptr[m1] * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Transpose Vector Multiplication y := alpha* A**T *x + beta*y, */ extern "C" void magmablas_zgemvt_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; hipLaunchKernelGGL(( kernel_zgemvt_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #if defined(PRECISION_z) || defined (PRECISION_c) __global__ void kernel_zgemvc_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += MAGMA_Z_CNJG (A_ptr[i]) * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += MAGMA_Z_CNJG(A_ptr[m1]) * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Conjugate Transpose Vector Multiplication y := alpha* A**H *x + beta*y, */ extern "C" void magmablas_zgemvc_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; hipLaunchKernelGGL(( kernel_zgemvc_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #endif // defined(PRECISION_z) || defined (PRECISION_c) /** Purpose ------- This routine computes Y = alpha opt(A) x + beta y, on the GPU, where A = A_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1]. This is a batched version. @param[in] trans CHARACTER*1. On entry, TRANS specifies the form of op( A ) to be used in the matrix multiplication as follows: = 'N': op( A ) = A. = 'T': op( A ) = A**T. = 'C': op( A ) = A**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix opt(A). @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix opt(A) @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A_array A = A_array[i] A: COMPLEX*16 array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x_array x = x_array[i] x: COMPLEX*16 array of dimension. n if trans == MagmaNoTrans. m if trans == MagmaTrans or MagmaConjTrans. @param[in] incx INTEGER. incx specifies the increment for the elments of x. incx must not be zero. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. @param[out] y_array y = y_array[i]: On exit y = alpha opt(A) x + beta y. y: COMPLEX*16 array of dimension. m if trans == MagmaNoTrans. n if trans == MagmaTrans or MagmaConjTrans. @param[in] incy INTEGER. incy specifies the increment for the elments of y. incy must not be zero. @param[in] batchCount INTEGER number of pointers contained in A_array, x_array and y_array. @ingroup magma_zblas2 ******************************************************************* */ extern "C" void magmablas_zgemv_batched( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, magma_int_t lda, magmaDoubleComplex **x_array, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, magma_int_t incy, magma_int_t batchCount) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if(m==0 || n ==0 ) return; if ( trans == MagmaNoTrans ) { magmablas_zgemvn_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaTrans ) { magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaConjTrans ) { #if defined(PRECISION_z) || defined (PRECISION_c) magmablas_zgemvc_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #else magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #endif } else { fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) ); } } #undef zgemv_bs
9b7212f15f9ab7019094881f3afa1fbc2bebe9f1.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @precisions normal z -> s d c */ #include "common_magma.h" #define zgemv_bs 32 extern __shared__ magmaDoubleComplex shared_data[]; __global__ void kernel_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A = A_array[blockIdx.x]; magmaDoubleComplex *x = x_array[blockIdx.x]; magmaDoubleComplex *y = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex *buff = (magmaDoubleComplex*)shared_data; if(tx < n) { buff[tx] = x[tx*incx]; } __syncthreads(); if(tx < m ) { for(int j=0; j < n ; j++) { res += A[tx]*buff[j]; A += lda; } y[tx*incy] = alpha * res + y[tx*incy] * beta; } } /* Matrix Non-transpose Vector Multiplication y := alpha*A*x + beta*y, */ extern "C" void magmablas_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { if( m > 512 || n > 512) { fprintf( stderr, "m=%d, n=%d, zgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_zgemv instead", m, n, 512); return ; } dim3 grid(batchCount, 1, 1); dim3 threads(max(m,n), 1, 1); kernel_zgemvn_batched<<< grid, threads, n * sizeof(magmaDoubleComplex) >>>( m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } __global__ void kernel_zgemvt_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += A_ptr[i] * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += A_ptr[m1] * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Transpose Vector Multiplication y := alpha* A**T *x + beta*y, */ extern "C" void magmablas_zgemvt_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; kernel_zgemvt_batched <<< grid, threads >>>(m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #if defined(PRECISION_z) || defined (PRECISION_c) __global__ void kernel_zgemvc_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += MAGMA_Z_CNJG (A_ptr[i]) * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += MAGMA_Z_CNJG(A_ptr[m1]) * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Conjugate Transpose Vector Multiplication y := alpha* A**H *x + beta*y, */ extern "C" void magmablas_zgemvc_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; kernel_zgemvc_batched <<< grid, threads >>>(m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #endif // defined(PRECISION_z) || defined (PRECISION_c) /** Purpose ------- This routine computes Y = alpha opt(A) x + beta y, on the GPU, where A = A_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1]. This is a batched version. @param[in] trans CHARACTER*1. On entry, TRANS specifies the form of op( A ) to be used in the matrix multiplication as follows: = 'N': op( A ) = A. = 'T': op( A ) = A**T. = 'C': op( A ) = A**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix opt(A). @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix opt(A) @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A_array A = A_array[i] A: COMPLEX*16 array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x_array x = x_array[i] x: COMPLEX*16 array of dimension. n if trans == MagmaNoTrans. m if trans == MagmaTrans or MagmaConjTrans. @param[in] incx INTEGER. incx specifies the increment for the elments of x. incx must not be zero. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. @param[out] y_array y = y_array[i]: On exit y = alpha opt(A) x + beta y. y: COMPLEX*16 array of dimension. m if trans == MagmaNoTrans. n if trans == MagmaTrans or MagmaConjTrans. @param[in] incy INTEGER. incy specifies the increment for the elments of y. incy must not be zero. @param[in] batchCount INTEGER number of pointers contained in A_array, x_array and y_array. @ingroup magma_zblas2 ******************************************************************* */ extern "C" void magmablas_zgemv_batched( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, magma_int_t lda, magmaDoubleComplex **x_array, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, magma_int_t incy, magma_int_t batchCount) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } if(m==0 || n ==0 ) return; if ( trans == MagmaNoTrans ) { magmablas_zgemvn_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaTrans ) { magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaConjTrans ) { #if defined(PRECISION_z) || defined (PRECISION_c) magmablas_zgemvc_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #else magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #endif } else { fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) ); } } #undef zgemv_bs
140c00ff6fae545b45717d4a8ea10cbad9f3e825.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib> #include <helper_functions.h> #include "gemm_cublas_functions.h" int main(int argc, char * argv[]){ if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -size=matrixSquaredRootSize\n"); exit(EXIT_SUCCESS); } //Matrix size (width == height) int N; if (checkCmdLineFlag(argc, (const char **)argv, "size")) { N = ::max(getCmdLineArgumentInt(argc, (const char **)argv, "size"), 1); } MatrixMultiplyReal(N); return 0; }
140c00ff6fae545b45717d4a8ea10cbad9f3e825.cu
#include <stdlib> #include <helper_functions.h> #include "gemm_cublas_functions.h" int main(int argc, char * argv[]){ if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -size=matrixSquaredRootSize\n"); exit(EXIT_SUCCESS); } //Matrix size (width == height) int N; if (checkCmdLineFlag(argc, (const char **)argv, "size")) { N = std::max(getCmdLineArgumentInt(argc, (const char **)argv, "size"), 1); } MatrixMultiplyReal(N); return 0; }
d33e4cb1d4002429cd1face890aa736f6b749598.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
d33e4cb1d4002429cd1face890aa736f6b749598.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <sys/timeb.h> void Multiplication(float *__restrict__ c, float * a, float * b, int N) { #pragma acc parallel loop present(c, a, b) for (int n = 0; n < N; n++) { for (int m = 0; m < N; m++) { float sum = 0.f; for (int k = 0; k < N; k++) { sum += a[k + n * N] * b[k * N + m ]; } c[m + n * N] = sum; } } } int main() { int i; const int N = 4; float **Matrix_A = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_A[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_B = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_B[i] = (float*)malloc(N * sizeof(float*)); } float **Matrix_C = (float**)malloc(N * sizeof(float*)); for (i = 0; i < N; i++) { Matrix_C[i] = (float*)malloc(N * sizeof(float*)); } float * a = (float *)malloc(N * N * sizeof(float*)), * b = (float *)malloc(N * N * sizeof(float*)), * c = (float *)malloc(N * N * sizeof(float*)); srand(time(NULL)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { Matrix_A[i][j] = rand() % 5; Matrix_B[i][j] = rand() % 10; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < N; j++) { a[j + i * N] = Matrix_A[i][j]; b[j + i * N] = Matrix_B[i][j]; } } #pragma acc data copyin (a[0:N*N], b[0:N*N]) copyout (c[0:N*N]) { Multiplication(c, a, b, N); } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_A[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%3.0f ", Matrix_B[i][j]); } printf("\n"); } printf("\n"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Matrix_C[i][j] = c[j + i * N]; printf("%3.0f ", Matrix_C[i][j]); } printf("\n"); } free(a); free(b); free(c); free(Matrix_A); free(Matrix_B); free(Matrix_C); }
e95941202962adf38007451f9426e862d2a85f8a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> hipStream_t getQueryEngineCudaStream(); #include "BufferCompaction.h" #include "GpuInitGroups.h" #include "GpuRtConstants.h" #include "Logger/Logger.h" #define checkCudaErrors(err) CHECK_EQ(err, hipSuccess) template <typename T> __device__ int8_t* init_columnar_buffer(T* buffer_ptr, const T init_val, const uint32_t entry_count, const int32_t start, const int32_t step) { for (int32_t i = start; i < entry_count; i += step) { buffer_ptr[i] = init_val; } return reinterpret_cast<int8_t*>(buffer_ptr + entry_count); } extern "C" __device__ void init_columnar_group_by_buffer_gpu_impl( int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t agg_col_count, const int8_t* col_sizes, const bool need_padding, const bool keyless, const int8_t key_size) { const int32_t start = blockIdx.x * blockDim.x + threadIdx.x; const int32_t step = blockDim.x * gridDim.x; int8_t* buffer_ptr = reinterpret_cast<int8_t*>(groups_buffer); if (!keyless) { for (uint32_t i = 0; i < key_count; ++i) { switch (key_size) { case 1: buffer_ptr = init_columnar_buffer<int8_t>( buffer_ptr, EMPTY_KEY_8, groups_buffer_entry_count, start, step); break; case 2: buffer_ptr = init_columnar_buffer<int16_t>(reinterpret_cast<int16_t*>(buffer_ptr), EMPTY_KEY_16, groups_buffer_entry_count, start, step); break; case 4: buffer_ptr = init_columnar_buffer<int32_t>(reinterpret_cast<int32_t*>(buffer_ptr), EMPTY_KEY_32, groups_buffer_entry_count, start, step); break; case 8: buffer_ptr = init_columnar_buffer<int64_t>(reinterpret_cast<int64_t*>(buffer_ptr), EMPTY_KEY_64, groups_buffer_entry_count, start, step); break; default: // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now. break; } buffer_ptr = align_to_int64(buffer_ptr); } } int32_t init_idx = 0; for (int32_t i = 0; i < agg_col_count; ++i) { if (need_padding) { buffer_ptr = align_to_int64(buffer_ptr); } switch (col_sizes[i]) { case 1: buffer_ptr = init_columnar_buffer<int8_t>( buffer_ptr, init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 2: buffer_ptr = init_columnar_buffer<int16_t>(reinterpret_cast<int16_t*>(buffer_ptr), init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 4: buffer_ptr = init_columnar_buffer<int32_t>(reinterpret_cast<int32_t*>(buffer_ptr), init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 8: buffer_ptr = init_columnar_buffer<int64_t>(reinterpret_cast<int64_t*>(buffer_ptr), init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 0: continue; default: // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet now. break; } } __syncthreads(); } template <typename K> inline __device__ void fill_empty_device_key(K* keys_ptr, const uint32_t key_count, const K empty_key) { for (uint32_t i = 0; i < key_count; ++i) { keys_ptr[i] = empty_key; } } __global__ void init_group_by_buffer_gpu(int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const bool keyless, const int8_t warp_size) { const int32_t start = blockIdx.x * blockDim.x + threadIdx.x; const int32_t step = blockDim.x * gridDim.x; if (keyless) { for (int32_t i = start; i < groups_buffer_entry_count * row_size_quad * static_cast<int32_t>(warp_size); i += step) { groups_buffer[i] = init_vals[i % row_size_quad]; } __syncthreads(); return; } for (int32_t i = start; i < groups_buffer_entry_count; i += step) { int64_t* keys_ptr = groups_buffer + i * row_size_quad; switch (key_width) { case 4: fill_empty_device_key( reinterpret_cast<int32_t*>(keys_ptr), key_count, EMPTY_KEY_32); break; case 8: fill_empty_device_key( reinterpret_cast<int64_t*>(keys_ptr), key_count, EMPTY_KEY_64); break; default: break; } } const uint32_t values_off_quad = align_to_int64(key_count * key_width) / sizeof(int64_t); for (uint32_t i = start; i < groups_buffer_entry_count; i += step) { int64_t* vals_ptr = groups_buffer + i * row_size_quad + values_off_quad; const uint32_t val_count = row_size_quad - values_off_quad; // value slots are always 64-bit for (uint32_t j = 0; j < val_count; ++j) { vals_ptr[j] = init_vals[j]; } } __syncthreads(); } __global__ void init_columnar_group_by_buffer_gpu_wrapper( int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t agg_col_count, const int8_t* col_sizes, const bool need_padding, const bool keyless, const int8_t key_size) { init_columnar_group_by_buffer_gpu_impl(groups_buffer, init_vals, groups_buffer_entry_count, key_count, agg_col_count, col_sizes, need_padding, keyless, key_size); } void init_group_by_buffer_on_device(int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const bool keyless, const int8_t warp_size, const size_t block_size_x, const size_t grid_size_x) { auto qe_cuda_stream = getQueryEngineCudaStream(); hipLaunchKernelGGL(( init_group_by_buffer_gpu), dim3(grid_size_x), dim3(block_size_x), 0, qe_cuda_stream, groups_buffer, init_vals, groups_buffer_entry_count, key_count, key_width, row_size_quad, keyless, warp_size); checkCudaErrors(hipStreamSynchronize(qe_cuda_stream)); } void init_columnar_group_by_buffer_on_device(int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t agg_col_count, const int8_t* col_sizes, const bool need_padding, const bool keyless, const int8_t key_size, const size_t block_size_x, const size_t grid_size_x) { auto qe_cuda_stream = getQueryEngineCudaStream(); hipLaunchKernelGGL(( init_columnar_group_by_buffer_gpu_wrapper), dim3(grid_size_x), dim3(block_size_x), 0, qe_cuda_stream, groups_buffer, init_vals, groups_buffer_entry_count, key_count, agg_col_count, col_sizes, need_padding, keyless, key_size); checkCudaErrors(hipStreamSynchronize(qe_cuda_stream)); }
e95941202962adf38007451f9426e862d2a85f8a.cu
#include <cuda.h> CUstream getQueryEngineCudaStream(); #include "BufferCompaction.h" #include "GpuInitGroups.h" #include "GpuRtConstants.h" #include "Logger/Logger.h" #define checkCudaErrors(err) CHECK_EQ(err, cudaSuccess) template <typename T> __device__ int8_t* init_columnar_buffer(T* buffer_ptr, const T init_val, const uint32_t entry_count, const int32_t start, const int32_t step) { for (int32_t i = start; i < entry_count; i += step) { buffer_ptr[i] = init_val; } return reinterpret_cast<int8_t*>(buffer_ptr + entry_count); } extern "C" __device__ void init_columnar_group_by_buffer_gpu_impl( int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t agg_col_count, const int8_t* col_sizes, const bool need_padding, const bool keyless, const int8_t key_size) { const int32_t start = blockIdx.x * blockDim.x + threadIdx.x; const int32_t step = blockDim.x * gridDim.x; int8_t* buffer_ptr = reinterpret_cast<int8_t*>(groups_buffer); if (!keyless) { for (uint32_t i = 0; i < key_count; ++i) { switch (key_size) { case 1: buffer_ptr = init_columnar_buffer<int8_t>( buffer_ptr, EMPTY_KEY_8, groups_buffer_entry_count, start, step); break; case 2: buffer_ptr = init_columnar_buffer<int16_t>(reinterpret_cast<int16_t*>(buffer_ptr), EMPTY_KEY_16, groups_buffer_entry_count, start, step); break; case 4: buffer_ptr = init_columnar_buffer<int32_t>(reinterpret_cast<int32_t*>(buffer_ptr), EMPTY_KEY_32, groups_buffer_entry_count, start, step); break; case 8: buffer_ptr = init_columnar_buffer<int64_t>(reinterpret_cast<int64_t*>(buffer_ptr), EMPTY_KEY_64, groups_buffer_entry_count, start, step); break; default: // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet right now. break; } buffer_ptr = align_to_int64(buffer_ptr); } } int32_t init_idx = 0; for (int32_t i = 0; i < agg_col_count; ++i) { if (need_padding) { buffer_ptr = align_to_int64(buffer_ptr); } switch (col_sizes[i]) { case 1: buffer_ptr = init_columnar_buffer<int8_t>( buffer_ptr, init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 2: buffer_ptr = init_columnar_buffer<int16_t>(reinterpret_cast<int16_t*>(buffer_ptr), init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 4: buffer_ptr = init_columnar_buffer<int32_t>(reinterpret_cast<int32_t*>(buffer_ptr), init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 8: buffer_ptr = init_columnar_buffer<int64_t>(reinterpret_cast<int64_t*>(buffer_ptr), init_vals[init_idx++], groups_buffer_entry_count, start, step); break; case 0: continue; default: // FIXME(miyu): CUDA linker doesn't accept assertion on GPU yet now. break; } } __syncthreads(); } template <typename K> inline __device__ void fill_empty_device_key(K* keys_ptr, const uint32_t key_count, const K empty_key) { for (uint32_t i = 0; i < key_count; ++i) { keys_ptr[i] = empty_key; } } __global__ void init_group_by_buffer_gpu(int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const bool keyless, const int8_t warp_size) { const int32_t start = blockIdx.x * blockDim.x + threadIdx.x; const int32_t step = blockDim.x * gridDim.x; if (keyless) { for (int32_t i = start; i < groups_buffer_entry_count * row_size_quad * static_cast<int32_t>(warp_size); i += step) { groups_buffer[i] = init_vals[i % row_size_quad]; } __syncthreads(); return; } for (int32_t i = start; i < groups_buffer_entry_count; i += step) { int64_t* keys_ptr = groups_buffer + i * row_size_quad; switch (key_width) { case 4: fill_empty_device_key( reinterpret_cast<int32_t*>(keys_ptr), key_count, EMPTY_KEY_32); break; case 8: fill_empty_device_key( reinterpret_cast<int64_t*>(keys_ptr), key_count, EMPTY_KEY_64); break; default: break; } } const uint32_t values_off_quad = align_to_int64(key_count * key_width) / sizeof(int64_t); for (uint32_t i = start; i < groups_buffer_entry_count; i += step) { int64_t* vals_ptr = groups_buffer + i * row_size_quad + values_off_quad; const uint32_t val_count = row_size_quad - values_off_quad; // value slots are always 64-bit for (uint32_t j = 0; j < val_count; ++j) { vals_ptr[j] = init_vals[j]; } } __syncthreads(); } __global__ void init_columnar_group_by_buffer_gpu_wrapper( int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t agg_col_count, const int8_t* col_sizes, const bool need_padding, const bool keyless, const int8_t key_size) { init_columnar_group_by_buffer_gpu_impl(groups_buffer, init_vals, groups_buffer_entry_count, key_count, agg_col_count, col_sizes, need_padding, keyless, key_size); } void init_group_by_buffer_on_device(int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const bool keyless, const int8_t warp_size, const size_t block_size_x, const size_t grid_size_x) { auto qe_cuda_stream = getQueryEngineCudaStream(); init_group_by_buffer_gpu<<<grid_size_x, block_size_x, 0, qe_cuda_stream>>>( groups_buffer, init_vals, groups_buffer_entry_count, key_count, key_width, row_size_quad, keyless, warp_size); checkCudaErrors(cudaStreamSynchronize(qe_cuda_stream)); } void init_columnar_group_by_buffer_on_device(int64_t* groups_buffer, const int64_t* init_vals, const uint32_t groups_buffer_entry_count, const uint32_t key_count, const uint32_t agg_col_count, const int8_t* col_sizes, const bool need_padding, const bool keyless, const int8_t key_size, const size_t block_size_x, const size_t grid_size_x) { auto qe_cuda_stream = getQueryEngineCudaStream(); init_columnar_group_by_buffer_gpu_wrapper<<<grid_size_x, block_size_x, 0, qe_cuda_stream>>>(groups_buffer, init_vals, groups_buffer_entry_count, key_count, agg_col_count, col_sizes, need_padding, keyless, key_size); checkCudaErrors(cudaStreamSynchronize(qe_cuda_stream)); }
4dd594174d557444eeac2af47cac478f1411fed0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hiprand/hiprand.h" #include <iostream> using namespace std; __global__ void sumSingleBlock(int *d) { int tid = threadIdx.x; for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>=1, stepSize <<= 1) { if (tid < tc) { int pa = tid * stepSize * 2; int pb = pa + stepSize; d[pa] += d[pb]; } } } __global__ void sumSingleBlock_shm(int *d) { extern __shared__ int dcopy[]; int tid = threadIdx.x; dcopy[tid*2] = d[tid*2]; dcopy[tid*2+1] = d[tid*2+1]; for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>=1, stepSize <<= 1) { if (tid < tc) { int pa = tid * stepSize * 2; int pb = pa + stepSize; dcopy[pa] += dcopy[pb]; } } if (tid == 0) { d[0] = dcopy[0]; } } int main() { const int count = 32; const size_t size = count * sizeof(int); int h[count]; for (int i=0; i<count; ++i) { h[i] = i+1; } int *d; hipMalloc(&d, size); hipMemcpy(d, h, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumSingleBlock), dim3(1), dim3(count/2), 0, 0, d); int result; hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost); //hipFree(d); std::cout << "use global mem:" << result << std::endl; hipMemcpy(d, h, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( sumSingleBlock_shm), dim3(1), dim3(count/2), count, 0, d); hipMemcpy(&result, d, sizeof(int), hipMemcpyDeviceToHost); std::cout << "use shared mem:" << result << std::endl; hipFree(d); }
4dd594174d557444eeac2af47cac478f1411fed0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "curand.h" #include <iostream> using namespace std; __global__ void sumSingleBlock(int *d) { int tid = threadIdx.x; for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>=1, stepSize <<= 1) { if (tid < tc) { int pa = tid * stepSize * 2; int pb = pa + stepSize; d[pa] += d[pb]; } } } __global__ void sumSingleBlock_shm(int *d) { extern __shared__ int dcopy[]; int tid = threadIdx.x; dcopy[tid*2] = d[tid*2]; dcopy[tid*2+1] = d[tid*2+1]; for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>=1, stepSize <<= 1) { if (tid < tc) { int pa = tid * stepSize * 2; int pb = pa + stepSize; dcopy[pa] += dcopy[pb]; } } if (tid == 0) { d[0] = dcopy[0]; } } int main() { const int count = 32; const size_t size = count * sizeof(int); int h[count]; for (int i=0; i<count; ++i) { h[i] = i+1; } int *d; cudaMalloc(&d, size); cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); sumSingleBlock<<<1, count/2>>>(d); int result; cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); //cudaFree(d); std::cout << "use global mem:" << result << std::endl; cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); sumSingleBlock_shm<<<1, count/2, count>>>(d); cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); std::cout << "use shared mem:" << result << std::endl; cudaFree(d); }
5e109c8f861cb65b3c7c13046da437353b7f7300.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // User: [email protected] // ExecutionRequest[P:'ElPactometro.cu',P:1,T:1,args:'',q:'cudalb'] // May 16 2019 17:39:20 #include "cputils.h" // Added by tablon /* * Simplified simulation of fire extinguishing * * Computacion Paralela, Grado en Informatica (Universidad de Valladolid) * 2018/2019 * * v1.4 * * (c) 2019 Arturo Gonzalez Escribano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <cputils.h> #define RADIUS_TYPE_1 3 #define RADIUS_TYPE_2_3 9 #define THRESHOLD 0.1f /* Structure to store data of an extinguishing team */ typedef struct { int x,y; int type; int target; } Team; /* Structure to store data of a fire focal point */ typedef struct { int x,y; int start; int heat; int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team } FocalPoint; /* Macro function to simplify accessing with two coordinates to a flattened array */ #define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ] /*__global__ void kernel_heat( float* deviceSurface , FocalPoint* deviceFocal , int num_focal , int columns) { int i; for( i=0; i<num_focal; i++ ) { if ( deviceFocal[i].active != 1 ) continue; int x = deviceFocal[i].x; int y = deviceFocal[i].y; accessMat( deviceSurface, x, y ) = deviceFocal[i].heat; } }*/ __global__ void kernel_heat( float* deviceSurface , FocalPoint* deviceFocal , int num_focal , int columns) { int i = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if(i<num_focal){ if ( deviceFocal[i].active != 1 ) return; int x = deviceFocal[i].x; int y = deviceFocal[i].y; accessMat( deviceSurface, x, y ) = deviceFocal[i].heat; } } __global__ void kernel_copy(float* deviceSurfaceCopy , float* deviceSurface , int columns ,int rows) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int pos =y*columns+x; if ( pos> -1 && pos < rows * columns ) { deviceSurfaceCopy [pos] = deviceSurface [pos]; } } template <unsigned int blockSize> __device__ void warpReduce(volatile float *sdata, int tid) { if (blockSize >= 64) sdata[tid] =fmax(sdata[tid],sdata[tid + 32]); if (blockSize >= 32) sdata[tid] =fmax(sdata[tid],sdata[tid + 16]); if (blockSize >= 16) sdata[tid] =fmax(sdata[tid],sdata[tid + 8]); if (blockSize >= 8) sdata[tid] =fmax(sdata[tid],sdata[tid + 4]); if (blockSize >= 4) sdata[tid] =fmax(sdata[tid],sdata[tid + 2]); if (blockSize >= 2)sdata[tid] =fmax(sdata[tid],sdata[tid + 1]); } template <unsigned int blockSize> __global__ void kernel_reduce(float *deviceSurface, float *deviceSurfaceCopy, int rows , int columns,float *glr){ extern __shared__ float data[]; int tid = threadIdx.x + threadIdx.y * blockDim.x; int gid = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; int y = gid / columns; int x = gid % columns; /*unsigned int tid = threadIdx.x + threadIdx.y * blockDim.x; i = blockIdx.x*(blockDim.x*2) + threadIdx.x; data[tid] = g_idata[i] + g_idata[i+blockDim.x]; __syncthreads();*/ if(gid < (rows*columns)){ float gl = deviceSurface[y*columns+x] - deviceSurfaceCopy[y*columns+x]; if(gl < 0.0) gl = gl * (-1.0); // con lo q pusimos en comun xd data[tid] =gl; } else { data[tid] = 0.0; } __syncthreads(); /*for (unsigned int s=blockDim.x * blockDim.y/2; s>0; s>>=1) { if (tid < s) { data[tid] = fmax(data[tid],data[tid + s]); } __syncthreads(); }*/ /* for (unsigned int s=1; s < blockDim.x*blockDim.y; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x*blockDim.x*blockDim.y) { data[index] = fmax(data[tid],data[tid + s]); } __syncthreads(); }*/ /*for(unsigned int s = 1;s < blockDim.x * blockDim.y; s *= 2){ if(tid % (2*s) == 0){ data[tid] = fmax(data[tid],data[tid + s]); } __syncthreads(); }*/ if (blockSize >= 512) { if (tid < 256) { data[tid] =fmax(data[tid],data[tid + 256]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { data[tid] =fmax(data[tid],data[tid + 128]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { data[tid] =fmax(data[tid],data[tid + 64]); } __syncthreads(); } if (tid < 32) warpReduce<blockSize>(data, tid); if(tid == 0) glr[blockIdx.x+blockIdx.y*gridDim.x] = data[0]; } __global__ void kernel_update(float* deviceSurfaceCopy ,float* deviceSurface , int columns ,int rows) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if( (x > 0) && (y < rows-1) && ( y > 0) && ( x< columns-1) ){ deviceSurface[y*columns+x] = ( deviceSurfaceCopy[(y-1)*columns+x] + deviceSurfaceCopy[(y+1)*columns+x] + deviceSurfaceCopy[y*columns+(x-1)]+deviceSurfaceCopy[y*columns+(x+1)])/4; } } __global__ void kernel_moves(Team *teams,FocalPoint *focal,int num_teams,int num_focal) { int j; int t=(threadIdx.x+blockIdx.x*blockDim.x)+(threadIdx.y+blockIdx.y*blockDim.y)*blockDim.x*gridDim.x; if(t < num_teams){ float distance = FLT_MAX; int target = -1; for( j=0; j<num_focal; j++ ) { if ( focal[j].active != 1 ) continue; float dx = focal[j].x - teams[t].x; float dy = focal[j].y - teams[t].y; float local_distance = ( dx*dx + dy*dy ); if ( local_distance < distance ) { distance = local_distance; target = j; } } teams[t].target = target; if ( target == -1 ) return; if ( teams[t].type == 1 ) { // Type 1: Can move in diagonal if ( focal[target].x < teams[t].x ) teams[t].x--; if ( focal[target].x > teams[t].x ) teams[t].x++; if ( focal[target].y < teams[t].y ) teams[t].y--; if ( focal[target].y > teams[t].y ) teams[t].y++; } else if ( teams[t].type == 2 ) { // Type 2: First in horizontal direction, then in vertical direction if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; else if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; } else { // Type 3: First in vertical direction, then in horizontal direction if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; else if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; } } // if t } //global /* * Function: Print usage line in stderr */ void show_usage( char *program_name ) { fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name ); fprintf(stderr,"\t<config_file> ::= -f <file_name>\n"); fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n"); fprintf(stderr,"\n"); } #ifdef DEBUG /* * Function: Print the current state of the simulation */ void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) { /* * You don't need to optimize this function, it is only for pretty printing and debugging purposes. * It is not compiled in the production versions of the program. * Thus, it is never used when measuring times in the leaderboard */ int i,j; printf("Iteration: %d\n", iteration ); printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); for( i=0; i<rows; i++ ) { printf("|"); for( j=0; j<columns; j++ ) { char symbol; if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*'; else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100); else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+'; else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.'; else symbol = '0'; int t; int flag_team = 0; for( t=0; t<num_teams; t++ ) if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; } if ( flag_team ) printf("[%c]", symbol ); else { int f; int flag_focal = 0; for( f=0; f<num_focal; f++ ) if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; } if ( flag_focal ) printf("(%c)", symbol ); else printf(" %c ", symbol ); } } printf("|\n"); } printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); printf("Global residual: %f\n\n", global_residual); } #endif /* * MAIN PROGRAM */ int main(int argc, char *argv[]) { int i,j,t; // Simulation data int rows, columns, max_iter; float *surface, *surfaceCopy; int num_teams, num_focal; Team *teams; FocalPoint *focal; /* 1. Read simulation arguments */ /* 1.1. Check minimum number of arguments */ if (argc<2) { fprintf(stderr,"-- Error in arguments: No arguments\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } int read_from_file = ! strcmp( argv[1], "-f" ); /* 1.2. Read configuration from file */ if ( read_from_file ) { /* 1.2.1. Open file */ if (argc<3) { fprintf(stderr,"-- Error in arguments: file-name argument missing\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } FILE *args = cp_abrir_fichero( argv[2] ); if ( args == NULL ) { fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]); exit( EXIT_FAILURE ); } /* 1.2.2. Read surface and maximum number of iterations */ int ok; ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); if ( surface == NULL || surfaceCopy == NULL ) { fprintf(stderr,"-- Error allocating: surface structures\n"); exit( EXIT_FAILURE ); } /* 1.2.3. Teams information */ ok = fscanf(args, "%d", &num_teams ); if ( ok != 1 ) { fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } } /* 1.2.4. Focal points information */ ok = fscanf(args, "%d", &num_focal ); if ( ok != 1 ) { fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal ); if ( focal == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat); if ( ok != 4 ) { fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } focal[i].active = 0; } } /* 1.3. Read configuration from arguments */ else { /* 1.3.1. Check minimum number of arguments */ if (argc<6) { fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } /* 1.3.2. Surface and maximum number of iterations */ rows = atoi( argv[1] ); columns = atoi( argv[2] ); max_iter = atoi( argv[3] ); surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); /* 1.3.3. Teams information */ num_teams = atoi( argv[4] ); teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } if ( argc < num_teams*3 + 5 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { teams[i].x = atoi( argv[5+i*3] ); teams[i].y = atoi( argv[6+i*3] ); teams[i].type = atoi( argv[7+i*3] ); } /* 1.3.4. Focal points information */ int focal_args = 5 + i*3; if ( argc < focal_args+1 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } num_focal = atoi( argv[focal_args] ); focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } if ( argc < focal_args + 1 + num_focal*4 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { focal[i].x = atoi( argv[focal_args+i*4+1] ); focal[i].y = atoi( argv[focal_args+i*4+2] ); focal[i].start = atoi( argv[focal_args+i*4+3] ); focal[i].heat = atoi( argv[focal_args+i*4+4] ); focal[i].active = 0; } /* 1.3.5. Sanity check: No extra arguments at the end of line */ if ( argc > focal_args+i*4+1 ) { fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } } #ifdef DEBUG /* 1.4. Print arguments */ printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter); printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal ); for( i=0; i<num_teams; i++ ) { printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type ); } for( i=0; i<num_focal; i++ ) { printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i, focal[i].x, focal[i].y, focal[i].start, focal[i].heat ); } #endif // DEBUG /* 2. Select GPU and start global timer */ hipSetDevice(0); hipDeviceSynchronize(); double ttotal = cp_Wtime(); /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * */ float *deviceSurface; float *deviceSurfaceCopy; float *deviceGlobalResidual; Team *deviceteam; FocalPoint *devicepf; dim3 dimBlock(32,8); dim3 dimGrid( ((columns + dimBlock.x - 1) / dimBlock.x), ((rows + dimBlock.y - 1) / dimBlock.y) ); //float num_bloq = ((columns + dimBlock.x - 1) / dimBlock.x) * ((rows + dimBlock.y - 1) / dimBlock.y); hipMalloc((void**) &deviceSurface , sizeof(float) * (size_t)rows * (size_t)columns); hipMalloc((void**) &deviceSurfaceCopy , sizeof(float) * (size_t)rows * (size_t)columns); hipMalloc((void**) &deviceGlobalResidual , sizeof(float)*dimGrid.x*dimGrid.y); hipMalloc((void**) &deviceteam ,sizeof(Team) * (size_t)num_teams ); hipMalloc( (void**) &devicepf , sizeof(FocalPoint) * (size_t)num_focal ); float *glr = (float *)malloc( sizeof(float) * dimGrid.x*dimGrid.y); //teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); /* 3. Initialize surface */ for( i=0; i<rows; i++ ){ for( j=0; j<columns; j++ ){ accessMat( surface, i, j ) = 0.0; accessMat( surfaceCopy, i, j ) = 0.0; } } /* 4. Simulation */ int iter; int flag_stability = 0; int first_activation = 0; for( iter=0; iter<max_iter && ! flag_stability; iter++ ) { /* 4.1. Activate focal points */ int num_deactivated = 0; for( i=0; i<num_focal; i++ ) { if ( focal[i].start == iter ) { focal[i].active = 1; //if ( ! first_activation ) first_activation = 1; } // Count focal points already deactivated by a team if ( focal[i].active == 2 ) num_deactivated++; } //hipMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , hipMemcpyHostToDevice ); /* 4.2. Propagate heat (10 steps per each team movement) */ float global_residual = 0.0f; int step; hipMemcpy( deviceSurface, surface, sizeof(float) * (size_t)rows * (size_t)columns , hipMemcpyHostToDevice ); hipMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , hipMemcpyHostToDevice ); for( step=0; step<10; step++ ) { //hipMemcpy( deviceSurfaceCopy, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns , hipMemcpyHostToDevice ); /* 4.2.1. Update heat on active focal points */ /*for( i=0; i<num_focal; i++ ) { if ( focal[i].active != 1 ) continue; int x = focal[i].x; int y = focal[i].y; accessMat( surface, x, y ) = focal[i].heat; }*/ hipLaunchKernelGGL(( kernel_heat), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceSurface , devicepf , num_focal , columns); /* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */ /* for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) accessMat( surfaceCopy, i, j ) = accessMat( surface, i, j );*/ // para copy //hipMemcpy( deviceSurface, surface, sizeof(float) * (size_t)rows * (size_t)columns , hipMemcpyHostToDevice ); //nocudaMemcpy( deviceSurfaceCopy, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns , hipMemcpyHostToDevice ); //kernel_copy<<<dimGrid,dimBlock>>> (deviceSurfaceCopy , deviceSurface , columns , rows); float *copia = deviceSurface; deviceSurface = deviceSurfaceCopy; deviceSurfaceCopy = copia; // para update //hipMemcpy( surfaceCopy, deviceSurfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost ); //hipMemcpy( surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost ); // 4.2.3. Update surface values (skip borders) */ /*for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) accessMat( surface, i, j ) = ( accessMat( surfaceCopy, i-1, j ) + accessMat( surfaceCopy, i+1, j ) + accessMat( surfaceCopy, i, j-1 ) + accessMat( surfaceCopy, i, j+1 ) ) / 4; */ hipLaunchKernelGGL(( kernel_update), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceSurfaceCopy , deviceSurface , columns , rows); //hipMemcpy(surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost ); //hipMemcpy( surfaceCopy, deviceSurfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost ); /*---------------------------------------------------------------------------------------------------------------------*/ //hipMemcpy( deviceSurface, surface, sizeof(float) * (size_t)rows * (size_t)columns , hipMemcpyHostToDevice ); //hipMemcpy( deviceSurfaceCopy, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns , hipMemcpyHostToDevice ); if(step == 0){ int tamSize = sizeof(float) * dimBlock.x * dimBlock.y; hipLaunchKernelGGL(( kernel_reduce<256>), dim3(dimGrid),dim3(dimBlock),tamSize, 0, deviceSurface , deviceSurfaceCopy , rows , columns , deviceGlobalResidual); hipMemcpy( glr, deviceGlobalResidual, sizeof(float) * dimGrid.x*dimGrid.y , hipMemcpyDeviceToHost ); //hipMemcpy(surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost ); //hipMemcpy( surfaceCopy, deviceSurfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost ); for(int i=0; i<dimGrid.x*dimGrid.y;i++){ if(glr[i]>global_residual){ global_residual=glr[i]; } } } /*for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) { global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ); } */ } // steps /* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */ hipMemcpy(surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost ); if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1; /* 4.3. Move teams */ /* 4.3. Move teams for( t=0; t<num_teams; t++ ) { float distance = FLT_MAX; int target = -1; for( j=0; j<num_focal; j++ ) { if ( focal[j].active != 1 ) continue; // Skip non-active focal points float dx = focal[j].x - teams[t].x; float dy = focal[j].y - teams[t].y; float local_distance = sqrtf( dx*dx + dy*dy ); if ( local_distance < distance ) { distance = local_distance; target = j; } } teams[t].target = target; if ( target == -1 ) continue; if ( teams[t].type == 1 ) { // Type 1: Can move in diagonal if ( focal[target].x < teams[t].x ) teams[t].x--; if ( focal[target].x > teams[t].x ) teams[t].x++; if ( focal[target].y < teams[t].y ) teams[t].y--; if ( focal[target].y > teams[t].y ) teams[t].y++; } else if ( teams[t].type == 2 ) { // Type 2: First in horizontal direction, then in vertical direction if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; else if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; } else { // Type 3: First in vertical direction, then in horizontal direction if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; else if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; } }*/ if( num_deactivated != num_focal){ hipMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , hipMemcpyHostToDevice ); hipMemcpy( deviceteam, teams, sizeof(Team) * (size_t)num_teams , hipMemcpyHostToDevice ); hipLaunchKernelGGL(( kernel_moves), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceteam,devicepf,num_teams,num_focal); hipMemcpy( teams, deviceteam, sizeof(Team) * (size_t)num_teams, hipMemcpyDeviceToHost ); } /* 4.4. Team actions */ for( t=0; t<num_teams; t++ ) { /* 4.4.1. Deactivate the target focal point when it is reached */ int target = teams[t].target; if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y && focal[target].active == 1 ) focal[target].active = 2; /* 4.4.2. Reduce heat in a circle around the team */ int radius; // Influence area of fixed radius depending on type if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1; else radius = RADIUS_TYPE_2_3; for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) { for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) { if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface float dx = teams[t].x - i; float dy = teams[t].y - j; float distance = ( dx*dx + dy*dy ); if ( distance <= radius*radius ) { accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor } } } } //hipMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , hipMemcpyHostToDevice ); #ifdef DEBUG /* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */ print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual ); #endif // DEBUG } /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ /* 5. Stop global time */ hipDeviceSynchronize(); ttotal = cp_Wtime() - ttotal; /* 6. Output for leaderboard */ printf("\n"); /* 6.1. Total computation time */ printf("Time: %lf\n", ttotal ); /* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */ printf("Result: %d", iter); /* for (i=0; i<num_teams; i++) printf(" %d %d", teams[i].x, teams[i].y ); */ for (i=0; i<num_focal; i++) printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) ); printf("\n"); /* 7. Free resources */ free( teams ); free( focal ); free( surface ); free( surfaceCopy ); /* 8. End */ return 0; }
5e109c8f861cb65b3c7c13046da437353b7f7300.cu
// User: [email protected] // ExecutionRequest[P:'ElPactometro.cu',P:1,T:1,args:'',q:'cudalb'] // May 16 2019 17:39:20 #include "cputils.h" // Added by tablon /* * Simplified simulation of fire extinguishing * * Computacion Paralela, Grado en Informatica (Universidad de Valladolid) * 2018/2019 * * v1.4 * * (c) 2019 Arturo Gonzalez Escribano */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <float.h> #include <cputils.h> #define RADIUS_TYPE_1 3 #define RADIUS_TYPE_2_3 9 #define THRESHOLD 0.1f /* Structure to store data of an extinguishing team */ typedef struct { int x,y; int type; int target; } Team; /* Structure to store data of a fire focal point */ typedef struct { int x,y; int start; int heat; int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team } FocalPoint; /* Macro function to simplify accessing with two coordinates to a flattened array */ #define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ] /*__global__ void kernel_heat( float* deviceSurface , FocalPoint* deviceFocal , int num_focal , int columns) { int i; for( i=0; i<num_focal; i++ ) { if ( deviceFocal[i].active != 1 ) continue; int x = deviceFocal[i].x; int y = deviceFocal[i].y; accessMat( deviceSurface, x, y ) = deviceFocal[i].heat; } }*/ __global__ void kernel_heat( float* deviceSurface , FocalPoint* deviceFocal , int num_focal , int columns) { int i = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if(i<num_focal){ if ( deviceFocal[i].active != 1 ) return; int x = deviceFocal[i].x; int y = deviceFocal[i].y; accessMat( deviceSurface, x, y ) = deviceFocal[i].heat; } } __global__ void kernel_copy(float* deviceSurfaceCopy , float* deviceSurface , int columns ,int rows) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int pos =y*columns+x; if ( pos> -1 && pos < rows * columns ) { deviceSurfaceCopy [pos] = deviceSurface [pos]; } } template <unsigned int blockSize> __device__ void warpReduce(volatile float *sdata, int tid) { if (blockSize >= 64) sdata[tid] =fmax(sdata[tid],sdata[tid + 32]); if (blockSize >= 32) sdata[tid] =fmax(sdata[tid],sdata[tid + 16]); if (blockSize >= 16) sdata[tid] =fmax(sdata[tid],sdata[tid + 8]); if (blockSize >= 8) sdata[tid] =fmax(sdata[tid],sdata[tid + 4]); if (blockSize >= 4) sdata[tid] =fmax(sdata[tid],sdata[tid + 2]); if (blockSize >= 2)sdata[tid] =fmax(sdata[tid],sdata[tid + 1]); } template <unsigned int blockSize> __global__ void kernel_reduce(float *deviceSurface, float *deviceSurfaceCopy, int rows , int columns,float *glr){ extern __shared__ float data[]; int tid = threadIdx.x + threadIdx.y * blockDim.x; int gid = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; int y = gid / columns; int x = gid % columns; /*unsigned int tid = threadIdx.x + threadIdx.y * blockDim.x; i = blockIdx.x*(blockDim.x*2) + threadIdx.x; data[tid] = g_idata[i] + g_idata[i+blockDim.x]; __syncthreads();*/ if(gid < (rows*columns)){ float gl = deviceSurface[y*columns+x] - deviceSurfaceCopy[y*columns+x]; if(gl < 0.0) gl = gl * (-1.0); // con lo q pusimos en comun xd data[tid] =gl; } else { data[tid] = 0.0; } __syncthreads(); /*for (unsigned int s=blockDim.x * blockDim.y/2; s>0; s>>=1) { if (tid < s) { data[tid] = fmax(data[tid],data[tid + s]); } __syncthreads(); }*/ /* for (unsigned int s=1; s < blockDim.x*blockDim.y; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x*blockDim.x*blockDim.y) { data[index] = fmax(data[tid],data[tid + s]); } __syncthreads(); }*/ /*for(unsigned int s = 1;s < blockDim.x * blockDim.y; s *= 2){ if(tid % (2*s) == 0){ data[tid] = fmax(data[tid],data[tid + s]); } __syncthreads(); }*/ if (blockSize >= 512) { if (tid < 256) { data[tid] =fmax(data[tid],data[tid + 256]); } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { data[tid] =fmax(data[tid],data[tid + 128]); } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { data[tid] =fmax(data[tid],data[tid + 64]); } __syncthreads(); } if (tid < 32) warpReduce<blockSize>(data, tid); if(tid == 0) glr[blockIdx.x+blockIdx.y*gridDim.x] = data[0]; } __global__ void kernel_update(float* deviceSurfaceCopy ,float* deviceSurface , int columns ,int rows) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if( (x > 0) && (y < rows-1) && ( y > 0) && ( x< columns-1) ){ deviceSurface[y*columns+x] = ( deviceSurfaceCopy[(y-1)*columns+x] + deviceSurfaceCopy[(y+1)*columns+x] + deviceSurfaceCopy[y*columns+(x-1)]+deviceSurfaceCopy[y*columns+(x+1)])/4; } } __global__ void kernel_moves(Team *teams,FocalPoint *focal,int num_teams,int num_focal) { int j; int t=(threadIdx.x+blockIdx.x*blockDim.x)+(threadIdx.y+blockIdx.y*blockDim.y)*blockDim.x*gridDim.x; if(t < num_teams){ float distance = FLT_MAX; int target = -1; for( j=0; j<num_focal; j++ ) { if ( focal[j].active != 1 ) continue; float dx = focal[j].x - teams[t].x; float dy = focal[j].y - teams[t].y; float local_distance = ( dx*dx + dy*dy ); if ( local_distance < distance ) { distance = local_distance; target = j; } } teams[t].target = target; if ( target == -1 ) return; if ( teams[t].type == 1 ) { // Type 1: Can move in diagonal if ( focal[target].x < teams[t].x ) teams[t].x--; if ( focal[target].x > teams[t].x ) teams[t].x++; if ( focal[target].y < teams[t].y ) teams[t].y--; if ( focal[target].y > teams[t].y ) teams[t].y++; } else if ( teams[t].type == 2 ) { // Type 2: First in horizontal direction, then in vertical direction if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; else if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; } else { // Type 3: First in vertical direction, then in horizontal direction if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; else if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; } } // if t } //global /* * Function: Print usage line in stderr */ void show_usage( char *program_name ) { fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name ); fprintf(stderr,"\t<config_file> ::= -f <file_name>\n"); fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n"); fprintf(stderr,"\n"); } #ifdef DEBUG /* * Function: Print the current state of the simulation */ void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) { /* * You don't need to optimize this function, it is only for pretty printing and debugging purposes. * It is not compiled in the production versions of the program. * Thus, it is never used when measuring times in the leaderboard */ int i,j; printf("Iteration: %d\n", iteration ); printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); for( i=0; i<rows; i++ ) { printf("|"); for( j=0; j<columns; j++ ) { char symbol; if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*'; else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100); else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+'; else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.'; else symbol = '0'; int t; int flag_team = 0; for( t=0; t<num_teams; t++ ) if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; } if ( flag_team ) printf("[%c]", symbol ); else { int f; int flag_focal = 0; for( f=0; f<num_focal; f++ ) if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; } if ( flag_focal ) printf("(%c)", symbol ); else printf(" %c ", symbol ); } } printf("|\n"); } printf("+"); for( j=0; j<columns; j++ ) printf("---"); printf("+\n"); printf("Global residual: %f\n\n", global_residual); } #endif /* * MAIN PROGRAM */ int main(int argc, char *argv[]) { int i,j,t; // Simulation data int rows, columns, max_iter; float *surface, *surfaceCopy; int num_teams, num_focal; Team *teams; FocalPoint *focal; /* 1. Read simulation arguments */ /* 1.1. Check minimum number of arguments */ if (argc<2) { fprintf(stderr,"-- Error in arguments: No arguments\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } int read_from_file = ! strcmp( argv[1], "-f" ); /* 1.2. Read configuration from file */ if ( read_from_file ) { /* 1.2.1. Open file */ if (argc<3) { fprintf(stderr,"-- Error in arguments: file-name argument missing\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } FILE *args = cp_abrir_fichero( argv[2] ); if ( args == NULL ) { fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]); exit( EXIT_FAILURE ); } /* 1.2.2. Read surface and maximum number of iterations */ int ok; ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); if ( surface == NULL || surfaceCopy == NULL ) { fprintf(stderr,"-- Error allocating: surface structures\n"); exit( EXIT_FAILURE ); } /* 1.2.3. Teams information */ ok = fscanf(args, "%d", &num_teams ); if ( ok != 1 ) { fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type); if ( ok != 3 ) { fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } } /* 1.2.4. Focal points information */ ok = fscanf(args, "%d", &num_focal ); if ( ok != 1 ) { fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]); exit( EXIT_FAILURE ); } focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal ); if ( focal == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat); if ( ok != 4 ) { fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]); exit( EXIT_FAILURE ); } focal[i].active = 0; } } /* 1.3. Read configuration from arguments */ else { /* 1.3.1. Check minimum number of arguments */ if (argc<6) { fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } /* 1.3.2. Surface and maximum number of iterations */ rows = atoi( argv[1] ); columns = atoi( argv[2] ); max_iter = atoi( argv[3] ); surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns ); /* 1.3.3. Teams information */ num_teams = atoi( argv[4] ); teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d teams\n", num_teams ); exit( EXIT_FAILURE ); } if ( argc < num_teams*3 + 5 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams ); exit( EXIT_FAILURE ); } for( i=0; i<num_teams; i++ ) { teams[i].x = atoi( argv[5+i*3] ); teams[i].y = atoi( argv[6+i*3] ); teams[i].type = atoi( argv[7+i*3] ); } /* 1.3.4. Focal points information */ int focal_args = 5 + i*3; if ( argc < focal_args+1 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } num_focal = atoi( argv[focal_args] ); focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal ); if ( teams == NULL ) { fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } if ( argc < focal_args + 1 + num_focal*4 ) { fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal ); exit( EXIT_FAILURE ); } for( i=0; i<num_focal; i++ ) { focal[i].x = atoi( argv[focal_args+i*4+1] ); focal[i].y = atoi( argv[focal_args+i*4+2] ); focal[i].start = atoi( argv[focal_args+i*4+3] ); focal[i].heat = atoi( argv[focal_args+i*4+4] ); focal[i].active = 0; } /* 1.3.5. Sanity check: No extra arguments at the end of line */ if ( argc > focal_args+i*4+1 ) { fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n"); show_usage( argv[0] ); exit( EXIT_FAILURE ); } } #ifdef DEBUG /* 1.4. Print arguments */ printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter); printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal ); for( i=0; i<num_teams; i++ ) { printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type ); } for( i=0; i<num_focal; i++ ) { printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i, focal[i].x, focal[i].y, focal[i].start, focal[i].heat ); } #endif // DEBUG /* 2. Select GPU and start global timer */ cudaSetDevice(0); cudaDeviceSynchronize(); double ttotal = cp_Wtime(); /* * * START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT * */ float *deviceSurface; float *deviceSurfaceCopy; float *deviceGlobalResidual; Team *deviceteam; FocalPoint *devicepf; dim3 dimBlock(32,8); dim3 dimGrid( ((columns + dimBlock.x - 1) / dimBlock.x), ((rows + dimBlock.y - 1) / dimBlock.y) ); //float num_bloq = ((columns + dimBlock.x - 1) / dimBlock.x) * ((rows + dimBlock.y - 1) / dimBlock.y); cudaMalloc((void**) &deviceSurface , sizeof(float) * (size_t)rows * (size_t)columns); cudaMalloc((void**) &deviceSurfaceCopy , sizeof(float) * (size_t)rows * (size_t)columns); cudaMalloc((void**) &deviceGlobalResidual , sizeof(float)*dimGrid.x*dimGrid.y); cudaMalloc((void**) &deviceteam ,sizeof(Team) * (size_t)num_teams ); cudaMalloc( (void**) &devicepf , sizeof(FocalPoint) * (size_t)num_focal ); float *glr = (float *)malloc( sizeof(float) * dimGrid.x*dimGrid.y); //teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams ); /* 3. Initialize surface */ for( i=0; i<rows; i++ ){ for( j=0; j<columns; j++ ){ accessMat( surface, i, j ) = 0.0; accessMat( surfaceCopy, i, j ) = 0.0; } } /* 4. Simulation */ int iter; int flag_stability = 0; int first_activation = 0; for( iter=0; iter<max_iter && ! flag_stability; iter++ ) { /* 4.1. Activate focal points */ int num_deactivated = 0; for( i=0; i<num_focal; i++ ) { if ( focal[i].start == iter ) { focal[i].active = 1; //if ( ! first_activation ) first_activation = 1; } // Count focal points already deactivated by a team if ( focal[i].active == 2 ) num_deactivated++; } //cudaMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , cudaMemcpyHostToDevice ); /* 4.2. Propagate heat (10 steps per each team movement) */ float global_residual = 0.0f; int step; cudaMemcpy( deviceSurface, surface, sizeof(float) * (size_t)rows * (size_t)columns , cudaMemcpyHostToDevice ); cudaMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , cudaMemcpyHostToDevice ); for( step=0; step<10; step++ ) { //cudaMemcpy( deviceSurfaceCopy, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns , cudaMemcpyHostToDevice ); /* 4.2.1. Update heat on active focal points */ /*for( i=0; i<num_focal; i++ ) { if ( focal[i].active != 1 ) continue; int x = focal[i].x; int y = focal[i].y; accessMat( surface, x, y ) = focal[i].heat; }*/ kernel_heat<<<dimGrid,dimBlock>>> (deviceSurface , devicepf , num_focal , columns); /* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */ /* for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) accessMat( surfaceCopy, i, j ) = accessMat( surface, i, j );*/ // para copy //cudaMemcpy( deviceSurface, surface, sizeof(float) * (size_t)rows * (size_t)columns , cudaMemcpyHostToDevice ); //nocudaMemcpy( deviceSurfaceCopy, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns , cudaMemcpyHostToDevice ); //kernel_copy<<<dimGrid,dimBlock>>> (deviceSurfaceCopy , deviceSurface , columns , rows); float *copia = deviceSurface; deviceSurface = deviceSurfaceCopy; deviceSurfaceCopy = copia; // para update //cudaMemcpy( surfaceCopy, deviceSurfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost ); //cudaMemcpy( surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost ); // 4.2.3. Update surface values (skip borders) */ /*for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) accessMat( surface, i, j ) = ( accessMat( surfaceCopy, i-1, j ) + accessMat( surfaceCopy, i+1, j ) + accessMat( surfaceCopy, i, j-1 ) + accessMat( surfaceCopy, i, j+1 ) ) / 4; */ kernel_update<<<dimGrid,dimBlock>>> (deviceSurfaceCopy , deviceSurface , columns , rows); //cudaMemcpy(surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost ); //cudaMemcpy( surfaceCopy, deviceSurfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost ); /*---------------------------------------------------------------------------------------------------------------------*/ //cudaMemcpy( deviceSurface, surface, sizeof(float) * (size_t)rows * (size_t)columns , cudaMemcpyHostToDevice ); //cudaMemcpy( deviceSurfaceCopy, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns , cudaMemcpyHostToDevice ); if(step == 0){ int tamSize = sizeof(float) * dimBlock.x * dimBlock.y; kernel_reduce<256><<<dimGrid,dimBlock,tamSize>>> (deviceSurface , deviceSurfaceCopy , rows , columns , deviceGlobalResidual); cudaMemcpy( glr, deviceGlobalResidual, sizeof(float) * dimGrid.x*dimGrid.y , cudaMemcpyDeviceToHost ); //cudaMemcpy(surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost ); //cudaMemcpy( surfaceCopy, deviceSurfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost ); for(int i=0; i<dimGrid.x*dimGrid.y;i++){ if(glr[i]>global_residual){ global_residual=glr[i]; } } } /*for( i=1; i<rows-1; i++ ) for( j=1; j<columns-1; j++ ) if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) { global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ); } */ } // steps /* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */ cudaMemcpy(surface, deviceSurface, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost ); if( num_deactivated == num_focal && global_residual < THRESHOLD ) flag_stability = 1; /* 4.3. Move teams */ /* 4.3. Move teams for( t=0; t<num_teams; t++ ) { float distance = FLT_MAX; int target = -1; for( j=0; j<num_focal; j++ ) { if ( focal[j].active != 1 ) continue; // Skip non-active focal points float dx = focal[j].x - teams[t].x; float dy = focal[j].y - teams[t].y; float local_distance = sqrtf( dx*dx + dy*dy ); if ( local_distance < distance ) { distance = local_distance; target = j; } } teams[t].target = target; if ( target == -1 ) continue; if ( teams[t].type == 1 ) { // Type 1: Can move in diagonal if ( focal[target].x < teams[t].x ) teams[t].x--; if ( focal[target].x > teams[t].x ) teams[t].x++; if ( focal[target].y < teams[t].y ) teams[t].y--; if ( focal[target].y > teams[t].y ) teams[t].y++; } else if ( teams[t].type == 2 ) { // Type 2: First in horizontal direction, then in vertical direction if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; else if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; } else { // Type 3: First in vertical direction, then in horizontal direction if ( focal[target].x < teams[t].x ) teams[t].x--; else if ( focal[target].x > teams[t].x ) teams[t].x++; else if ( focal[target].y < teams[t].y ) teams[t].y--; else if ( focal[target].y > teams[t].y ) teams[t].y++; } }*/ if( num_deactivated != num_focal){ cudaMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , cudaMemcpyHostToDevice ); cudaMemcpy( deviceteam, teams, sizeof(Team) * (size_t)num_teams , cudaMemcpyHostToDevice ); kernel_moves<<<dimGrid,dimBlock>>> (deviceteam,devicepf,num_teams,num_focal); cudaMemcpy( teams, deviceteam, sizeof(Team) * (size_t)num_teams, cudaMemcpyDeviceToHost ); } /* 4.4. Team actions */ for( t=0; t<num_teams; t++ ) { /* 4.4.1. Deactivate the target focal point when it is reached */ int target = teams[t].target; if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y && focal[target].active == 1 ) focal[target].active = 2; /* 4.4.2. Reduce heat in a circle around the team */ int radius; // Influence area of fixed radius depending on type if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1; else radius = RADIUS_TYPE_2_3; for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) { for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) { if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface float dx = teams[t].x - i; float dy = teams[t].y - j; float distance = ( dx*dx + dy*dy ); if ( distance <= radius*radius ) { accessMat( surface, i, j ) = accessMat( surface, i, j ) * ( 1 - 0.25 ); // Team efficiency factor } } } } //cudaMemcpy( devicepf, focal, sizeof(FocalPoint) * (size_t)num_focal , cudaMemcpyHostToDevice ); #ifdef DEBUG /* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */ print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual ); #endif // DEBUG } /* * * STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT * */ /* 5. Stop global time */ cudaDeviceSynchronize(); ttotal = cp_Wtime() - ttotal; /* 6. Output for leaderboard */ printf("\n"); /* 6.1. Total computation time */ printf("Time: %lf\n", ttotal ); /* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */ printf("Result: %d", iter); /* for (i=0; i<num_teams; i++) printf(" %d %d", teams[i].x, teams[i].y ); */ for (i=0; i<num_focal; i++) printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) ); printf("\n"); /* 7. Free resources */ free( teams ); free( focal ); free( surface ); free( surfaceCopy ); /* 8. End */ return 0; }
fe05ed932d64d25e0eaacdd22366ad6d83785b70.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "UdpateEnergyTerm_time.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *energy = NULL; hipMalloc(&energy, XSIZE*YSIZE); int energy_dim = 1; int nPatches = 1; float *idFocuser_focused = NULL; hipMalloc(&idFocuser_focused, XSIZE*YSIZE); float par_time_increase_energy_on_focus = 1; float par_time_decrease_energy_in_time = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( UdpateEnergyTerm_time), dim3(gridBlock),dim3(threadBlock), 0, 0, energy,energy_dim,nPatches,idFocuser_focused,par_time_increase_energy_on_focus,par_time_decrease_energy_in_time); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( UdpateEnergyTerm_time), dim3(gridBlock),dim3(threadBlock), 0, 0, energy,energy_dim,nPatches,idFocuser_focused,par_time_increase_energy_on_focus,par_time_decrease_energy_in_time); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( UdpateEnergyTerm_time), dim3(gridBlock),dim3(threadBlock), 0, 0, energy,energy_dim,nPatches,idFocuser_focused,par_time_increase_energy_on_focus,par_time_decrease_energy_in_time); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fe05ed932d64d25e0eaacdd22366ad6d83785b70.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "UdpateEnergyTerm_time.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *energy = NULL; cudaMalloc(&energy, XSIZE*YSIZE); int energy_dim = 1; int nPatches = 1; float *idFocuser_focused = NULL; cudaMalloc(&idFocuser_focused, XSIZE*YSIZE); float par_time_increase_energy_on_focus = 1; float par_time_decrease_energy_in_time = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); UdpateEnergyTerm_time<<<gridBlock,threadBlock>>>(energy,energy_dim,nPatches,idFocuser_focused,par_time_increase_energy_on_focus,par_time_decrease_energy_in_time); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { UdpateEnergyTerm_time<<<gridBlock,threadBlock>>>(energy,energy_dim,nPatches,idFocuser_focused,par_time_increase_energy_on_focus,par_time_decrease_energy_in_time); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { UdpateEnergyTerm_time<<<gridBlock,threadBlock>>>(energy,energy_dim,nPatches,idFocuser_focused,par_time_increase_energy_on_focus,par_time_decrease_energy_in_time); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
acb1649958eb557d91b5624ba39200c54307cedf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "shared.cuh" struct ParticleRef { Point pos; Point dir; double nextdist; }; inline __device__ ParticleRef make_ref(const ParticleView &view, int i) { return {view.get_pos(i), view.get_dir(i), view.get_nextdist(i)}; } __device__ inline void saxpy(double *__restrict__ x, double *__restrict__ y, double *__restrict__ z, const double *__restrict__ u, const double *__restrict__ v, const double *__restrict__ w, double distance) { *x += *u * distance; *y += *v * distance; *z += *w * distance; } __device__ inline void move_impl(const ParticleRef ref) { saxpy(ref.pos.x, ref.pos.y, ref.pos.z, ref.dir.x, ref.dir.y, ref.dir.z, ref.nextdist); } __global__ void move(ParticleView view) { int i = thread_id(); if (i >= view.size) return; move_impl(make_ref(view, i)); }
acb1649958eb557d91b5624ba39200c54307cedf.cu
#include "shared.cuh" struct ParticleRef { Point pos; Point dir; double nextdist; }; inline __device__ ParticleRef make_ref(const ParticleView &view, int i) { return {view.get_pos(i), view.get_dir(i), view.get_nextdist(i)}; } __device__ inline void saxpy(double *__restrict__ x, double *__restrict__ y, double *__restrict__ z, const double *__restrict__ u, const double *__restrict__ v, const double *__restrict__ w, double distance) { *x += *u * distance; *y += *v * distance; *z += *w * distance; } __device__ inline void move_impl(const ParticleRef ref) { saxpy(ref.pos.x, ref.pos.y, ref.pos.z, ref.dir.x, ref.dir.y, ref.dir.z, ref.nextdist); } __global__ void move(ParticleView view) { int i = thread_id(); if (i >= view.size) return; move_impl(make_ref(view, i)); }
30f8d7b98bab7a3bf746ab4ce8abf396ff8e69a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // allocate memory float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // ALLOCATE DEVICE MEMORY float *d_A = NULL; hipMalloc((void **)&d_A, size); float *d_B = NULL; hipMalloc((void **)&d_B, size); float *d_C = NULL; hipMalloc((void **)&d_C, size); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); printf("Test PASSED\n"); // Free device global memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
30f8d7b98bab7a3bf746ab4ce8abf396ff8e69a6.cu
#include <stdio.h> __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // allocate memory float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // ALLOCATE DEVICE MEMORY float *d_A = NULL; cudaMalloc((void **)&d_A, size); float *d_B = NULL; cudaMalloc((void **)&d_B, size); float *d_C = NULL; cudaMalloc((void **)&d_C, size); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); printf("Test PASSED\n"); // Free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
6f1090f0b11e4e246616149dd6cf3805a21f2b3a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/class_arg_kernel.cuh" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ const char* str; \ hipGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ std::cout << "(CUDART) returned " << hipGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program( constmem_program_source, 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(hipMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( hipMemcpy(&h_out[0], d_out, n * sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v2 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v3 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v4("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v5("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v6("bad_program\nNOT CUDA C!"), std::runtime_error); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; #if TORCH_HIP_VERSION < 11000 const char* cppstd = "-std=c++98"; #else const char* cppstd = "-std=c++11"; #endif auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); auto program_v2 = jitify::experimental::Program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); } static const char* const cub_program_source = "cub_program\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef hipcub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(hipFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { hipFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(hipFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <hiprand/hiprand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call hiprand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { hipFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(hipFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { hipFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(hipFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { hipFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } static const char* const builtin_numeric_limits_program_source = "builtin_numeric_limits_program\n" "#include <limits>\n" "struct MyType {};\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = std::numeric_limits<T>::min();\n" " data[1] = std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericLimitsHeader) { hipFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_limits_program_source); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } TEST(JitifyTest, ClassKernelArg) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; int h_data; int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); dim3 grid(1); dim3 block(1); jitify::Program program = kernel_cache.program("example_headers/class_arg_kernel.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); { // test that we can pass an arg object to a kernel Arg arg(-1); CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART(hipDeviceSynchronize()); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg.x, h_data); } { // test that we can pass an arg object rvalue to a kernel int value = -2; CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, Arg(value))); CHECK_CUDART(hipDeviceSynchronize()); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(value, h_data); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-3); // references are passed as pointers since refernces are just pointers from // an ABI point of view CHECK_CUDA(program.kernel("class_arg_ref_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-4); CHECK_CUDA(program.kernel("class_arg_ptr_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } CHECK_CUDART(hipFree(d_data)); } static const char* const assert_program_source = R"( #include <cassert> __global__ void my_assert_kernel() { assert(0 == 1); } )"; TEST(JitifyTest, AssertHeader) { // Checks that cassert works as expected jitify::JitCache kernel_cache; auto program = kernel_cache.program(assert_program_source, {}, {"-I" CUDA_INC_DIR}); dim3 grid(1); dim3 block(1); CHECK_CUDA((program.kernel("my_assert_kernel") .instantiate<>() .configure(grid, block) .launch())); } static const char* const get_attribute_program_source = R"( __global__ void get_attribute_kernel(int *out, int *in) { __shared__ int buffer[4096]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, GetAttribute) { // Checks that we can get function attributes jitify::JitCache kernel_cache; auto program = kernel_cache.program(get_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("get_attribute_kernel").instantiate(); EXPECT_EQ(4096 * sizeof(int), instance.get_func_attribute(hipFuncAttributeSharedSizeBytes)); } static const char* const set_attribute_program_source = R"( __global__ void set_attribute_kernel(int *out, int *in) { extern __shared__ int buffer[]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, SetAttribute) { // Checks that we can set function attributes jitify::JitCache kernel_cache; int* in; CHECK_CUDART(hipMalloc((void**)&in, sizeof(int))); int* out; CHECK_CUDART(hipMalloc((void**)&out, sizeof(int))); // query the maximum supported shared bytes per block hipDevice_t device; CHECK_CUDA(hipDeviceGet(&device, 0)); int shared_bytes; CHECK_CUDA(hipDeviceGetAttribute( &shared_bytes, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); auto program = kernel_cache.program(set_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("set_attribute_kernel").instantiate(); instance.set_func_attribute(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_bytes); dim3 grid(1); dim3 block(1); // this kernel will fail on Volta+ unless the set attribute succeeded CHECK_CUDA(instance.configure(grid, block, shared_bytes).launch(out, in)); CHECK_CUDART(hipFree(out)); CHECK_CUDART(hipFree(in)); } // NOTE: Keep this as the last test in the file, in case the env var is sticky. TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); }
6f1090f0b11e4e246616149dd6cf3805a21f2b3a.cu
/* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/class_arg_kernel.cuh" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ CUresult status = call; \ if (status != CUDA_SUCCESS) { \ const char* str; \ cuGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, CUDA_SUCCESS); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ cudaError_t status = call; \ if (status != cudaSuccess) { \ std::cout << "(CUDART) returned " << cudaGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, cudaSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program( constmem_program_source, 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(cudaMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( cudaMemcpy(&h_out[0], d_out, n * sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v2 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v3 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v4("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v5("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v6("bad_program\nNOT CUDA C!"), std::runtime_error); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; #if CUDA_VERSION < 11000 const char* cppstd = "-std=c++98"; #else const char* cppstd = "-std=c++11"; #endif auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); auto program_v2 = jitify::experimental::Program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); } static const char* const cub_program_source = "cub_program\n" "#include <cub/block/block_load.cuh>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <cub/block/block_reduce.cuh>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef cub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(cudaFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { cudaFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(cudaFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <curand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call curand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { cudaFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(cudaFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { cudaFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(cudaFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { cudaFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } static const char* const builtin_numeric_limits_program_source = "builtin_numeric_limits_program\n" "#include <limits>\n" "struct MyType {};\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = std::numeric_limits<T>::min();\n" " data[1] = std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericLimitsHeader) { cudaFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_limits_program_source); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } TEST(JitifyTest, ClassKernelArg) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; int h_data; int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); dim3 grid(1); dim3 block(1); jitify::Program program = kernel_cache.program("example_headers/class_arg_kernel.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); { // test that we can pass an arg object to a kernel Arg arg(-1); CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART(cudaDeviceSynchronize()); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg.x, h_data); } { // test that we can pass an arg object rvalue to a kernel int value = -2; CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, Arg(value))); CHECK_CUDART(cudaDeviceSynchronize()); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(value, h_data); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-3); // references are passed as pointers since refernces are just pointers from // an ABI point of view CHECK_CUDA(program.kernel("class_arg_ref_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-4); CHECK_CUDA(program.kernel("class_arg_ptr_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } CHECK_CUDART(cudaFree(d_data)); } static const char* const assert_program_source = R"( #include <cassert> __global__ void my_assert_kernel() { assert(0 == 1); } )"; TEST(JitifyTest, AssertHeader) { // Checks that cassert works as expected jitify::JitCache kernel_cache; auto program = kernel_cache.program(assert_program_source, {}, {"-I" CUDA_INC_DIR}); dim3 grid(1); dim3 block(1); CHECK_CUDA((program.kernel("my_assert_kernel") .instantiate<>() .configure(grid, block) .launch())); } static const char* const get_attribute_program_source = R"( __global__ void get_attribute_kernel(int *out, int *in) { __shared__ int buffer[4096]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, GetAttribute) { // Checks that we can get function attributes jitify::JitCache kernel_cache; auto program = kernel_cache.program(get_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("get_attribute_kernel").instantiate(); EXPECT_EQ(4096 * sizeof(int), instance.get_func_attribute(CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES)); } static const char* const set_attribute_program_source = R"( __global__ void set_attribute_kernel(int *out, int *in) { extern __shared__ int buffer[]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, SetAttribute) { // Checks that we can set function attributes jitify::JitCache kernel_cache; int* in; CHECK_CUDART(cudaMalloc((void**)&in, sizeof(int))); int* out; CHECK_CUDART(cudaMalloc((void**)&out, sizeof(int))); // query the maximum supported shared bytes per block CUdevice device; CHECK_CUDA(cuDeviceGet(&device, 0)); int shared_bytes; CHECK_CUDA(cuDeviceGetAttribute( &shared_bytes, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); auto program = kernel_cache.program(set_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("set_attribute_kernel").instantiate(); instance.set_func_attribute(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_bytes); dim3 grid(1); dim3 block(1); // this kernel will fail on Volta+ unless the set attribute succeeded CHECK_CUDA(instance.configure(grid, block, shared_bytes).launch(out, in)); CHECK_CUDART(cudaFree(out)); CHECK_CUDART(cudaFree(in)); } // NOTE: Keep this as the last test in the file, in case the env var is sticky. TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); }
4f43e11d67f0943e5195502165616c1d41e89cc5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "add.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_A = NULL; hipMalloc(&d_A, XSIZE*YSIZE); float *d_B = NULL; hipMalloc(&d_B, XSIZE*YSIZE); float *d_C = NULL; hipMalloc(&d_C, XSIZE*YSIZE); int widthA = XSIZE; int widthB = XSIZE; int widthC = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_B,d_C,widthA,widthB,widthC); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_B,d_C,widthA,widthB,widthC); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( add), dim3(gridBlock),dim3(threadBlock), 0, 0, d_A,d_B,d_C,widthA,widthB,widthC); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4f43e11d67f0943e5195502165616c1d41e89cc5.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *d_A = NULL; cudaMalloc(&d_A, XSIZE*YSIZE); float *d_B = NULL; cudaMalloc(&d_B, XSIZE*YSIZE); float *d_C = NULL; cudaMalloc(&d_C, XSIZE*YSIZE); int widthA = XSIZE; int widthB = XSIZE; int widthC = XSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); add<<<gridBlock,threadBlock>>>(d_A,d_B,d_C,widthA,widthB,widthC); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { add<<<gridBlock,threadBlock>>>(d_A,d_B,d_C,widthA,widthB,widthC); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { add<<<gridBlock,threadBlock>>>(d_A,d_B,d_C,widthA,widthB,widthC); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b3b4d7e406d9957e8203a108970e8355d78f567e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int warp_thread_id = threadIdx.x % 32; __shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS]; __shared__ void **tmp_ptr; __shared__ void *arr[SHARED_MEM_ELEMENTS]; if (threadIdx.x == 0) { for (i=0; i < SHARED_MEM_ELEMENTS; i++) { arr[i] = (void *)&sdata[i]; } for (i=0; i < (SHARED_MEM_ELEMENTS - 1); i++) { sdata[i] = (unsigned long long)arr[i+1]; } sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0]; } __syncthreads(); tmp_ptr = (void **)(&(arr[(threadIdx.x + stride)%SHARED_MEM_ELEMENTS])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { for (int l = 0; l < iterations; l++) { tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } // init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); // hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
b3b4d7e406d9957e8203a108970e8355d78f567e.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 4096 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int warp_thread_id = threadIdx.x % 32; __shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS]; __shared__ void **tmp_ptr; __shared__ void *arr[SHARED_MEM_ELEMENTS]; if (threadIdx.x == 0) { for (i=0; i < SHARED_MEM_ELEMENTS; i++) { arr[i] = (void *)&sdata[i]; } for (i=0; i < (SHARED_MEM_ELEMENTS - 1); i++) { sdata[i] = (unsigned long long)arr[i+1]; } sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0]; } __syncthreads(); tmp_ptr = (void **)(&(arr[(threadIdx.x + stride)%SHARED_MEM_ELEMENTS])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { for (int l = 0; l < iterations; l++) { tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } // init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); // cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
821e40c192ac6d7e0f5e2c19bef49a6e4a1320f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "denoise.h" union Color // 4 bytes = 4 chars = 1 float { float c; uchar4 components; }; __global__ void denoise_kernel(OutputBuffer input, float* d_output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= input.width || y >= input.height) return; float3 color = make_float3(input.buffer[x*input.width*14 + y*14 + 0], input.buffer[x*input.width*14 + y*14 + 1], input.buffer[x*input.width*14 + y*14 + 2]); // clamp to range 0-1 color.x = min(max(color.x, 0.0f), 1.0f); color.y = min(max(color.y, 0.0f), 1.0f); color.z = min(max(color.z, 0.0f), 1.0f); Color formatColor; formatColor.components = make_uchar4((unsigned char)(color.x*255.0), (unsigned char)(color.y*255.0), (unsigned char)(color.z*255.0), 1); // x and y are mixed up because I modified the eye rays so that the image output would be correct... d_output[x*input.width*3 + y*3 + 0] = y; d_output[x*input.width*3 + y*3 + 1] = input.width - x; d_output[x*input.width*3 + y*3 + 2] = formatColor.c; }
821e40c192ac6d7e0f5e2c19bef49a6e4a1320f9.cu
#include "denoise.h" union Color // 4 bytes = 4 chars = 1 float { float c; uchar4 components; }; __global__ void denoise_kernel(OutputBuffer input, float* d_output) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= input.width || y >= input.height) return; float3 color = make_float3(input.buffer[x*input.width*14 + y*14 + 0], input.buffer[x*input.width*14 + y*14 + 1], input.buffer[x*input.width*14 + y*14 + 2]); // clamp to range 0-1 color.x = min(max(color.x, 0.0f), 1.0f); color.y = min(max(color.y, 0.0f), 1.0f); color.z = min(max(color.z, 0.0f), 1.0f); Color formatColor; formatColor.components = make_uchar4((unsigned char)(color.x*255.0), (unsigned char)(color.y*255.0), (unsigned char)(color.z*255.0), 1); // x and y are mixed up because I modified the eye rays so that the image output would be correct... d_output[x*input.width*3 + y*3 + 0] = y; d_output[x*input.width*3 + y*3 + 1] = input.width - x; d_output[x*input.width*3 + y*3 + 2] = formatColor.c; }
9b571a5f29fcdcf7b2850fc23243a1650bfbcb1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../inc/parser.h" #include "../inc/helper.cuh" #include <errno.h> #include <math.h> __global__ void shared_approach_aos(Tuple* R, int rSize, Tuple* S, int sSize, int portionSize, Result* outputResults, int* outputCounter) { extern __shared__ int shared[]; int* sA = shared; // S.a int* sX = (int*)&sA[portionSize]; // S.x int rIndex = blockIdx.x * blockDim.x + threadIdx.x; // //(rSize /blockDim.x) * blockDim.x + portionSize int threadID = threadIdx.x; //local thread id, i.e. within block int rA = R[rIndex].a; // store R.a to registers int sIndex = 0; while (sIndex < sSize) // iterate portions of the second array { if (threadID < portionSize) { sA[threadID] = S[sIndex + threadID].a; sX[threadID] = S[sIndex + threadID].x; } __syncthreads(); //for (int i = 0; i < portionSize; ++i) for (int i = 0; i < (sSize - sIndex > portionSize ? portionSize : sSize - sIndex); ++i) { if (rA > sA[i] && rIndex < rSize) { outputResults[atomicAdd(outputCounter, 1)] = { rA, sA[i], sX[i] }; } } sIndex += portionSize; __syncthreads(); } } TimePair callKernel(Tuple* R, LLONG rSize, Tuple* S, LLONG sSize, int blockSize, int portionSize, int deviceMemorySize) { float executionTime = 0.0; float millis = 0.0; hipEvent_t start, stop; checkErrors(hipEventCreate(&start)); checkErrors(hipEventCreate(&stop)); hipEvent_t* intermediate_start; hipEvent_t* intermediate_stop; Tuple* deviceR; Tuple* deviceS; // allocate memory for relations checkErrors(hipMalloc((void**)&(deviceR), sizeof(Tuple) * rSize)); checkErrors(hipMalloc((void**)&(deviceS), sizeof(Tuple) * sSize)); // copy relations to gpu checkErrors(hipMemcpy(deviceR, R, sizeof(Tuple) * rSize, hipMemcpyHostToDevice)); checkErrors(hipMemcpy(deviceS, S, sizeof(Tuple) * sSize, hipMemcpyHostToDevice)); Result* hostOutputResults; Result* deviceOutputResults; int* outputCounter; LLONG maxSpace = 0; LLONG md = deviceMemorySize * 1024LL * 1024LL * 1024LL; int t = 0; int side = 0; LLONG rIndex; LLONG sIndex; int rPortion; int sPortion; int tempOffset = 0; int offset = 0; int rectangles = 0; int count = 0; checkErrors(hipMalloc((void**)&(outputCounter), sizeof(int))); dim3 threadBlock(blockSize); dim3 grid((rSize / threadBlock.x) + 1); maxSpace = rSize * sSize * sizeof(Result); t = (maxSpace / md) + 1; printf("Md: %d, ||O||max: %llu, t: %d\n", deviceMemorySize, (maxSpace / 1024LL / 1024LL / 1024LL),t); printf("Grid: %dx%d, Thread Block: %dx%d\n", grid.x, grid.y, threadBlock.x, threadBlock.y); printf("Max results per pass %llu / %d = %d\n", md, sizeof(Result), (int)(md / (LLONG)sizeof(Result))); // allocate memory for device result checkErrors(hipMalloc((void**)&(deviceOutputResults), sizeof(Result) * (int)(md / (LLONG)sizeof(Result)))); // allocate 10gb -> 894784853 host memory to store results from device (13gb -> 1163220310) hostOutputResults = (Result*) malloc(sizeof(Result) * 1163220310); checkErrors(hipEventRecord(start, 0)); if(t == 1) // call kernel once and finish { hipLaunchKernelGGL(( shared_approach_aos) , dim3(grid), dim3(threadBlock), portionSize * sizeof(int) + portionSize * sizeof(int), 0, deviceR, rSize, deviceS, sSize, portionSize, deviceOutputResults, outputCounter); checkErrors(hipEventRecord(stop, 0)); checkErrors(hipMemcpyAsync(&tempOffset, outputCounter, sizeof(int), hipMemcpyDeviceToHost, 0)); checkErrors(hipMemcpyAsync(hostOutputResults, deviceOutputResults, tempOffset * sizeof(Result), hipMemcpyDeviceToHost, 0)); checkErrors(hipEventElapsedTime(&executionTime, start, stop)); offset += tempOffset; } else // call kernel multiple times { side = (int)sqrt((double)((rSize * sSize) / t)); if((rSize % side == 0) && (sSize % side == 0) || ((rSize / t <= sSize ) && (sSize <= rSize))) { rPortion = side; sPortion = side; } else { rPortion = (int) rSize / t; sPortion = (int) sSize; } rectangles = getActualNumberOfRectangles(rPortion, sPortion, rSize, sSize); printf("Number of overall rectangles: %d\n", rectangles); intermediate_start = (hipEvent_t*)malloc(sizeof(hipEvent_t) * rectangles); intermediate_stop = (hipEvent_t*)malloc(sizeof(hipEvent_t) * rectangles); for(int i = 0; i < rectangles; i++) { checkErrors(hipEventCreate(&intermediate_start[i])); checkErrors(hipEventCreate(&intermediate_stop[i])); } int zero = 0; rIndex = 0; while(rIndex < rSize) { sIndex = 0; while(sIndex < sSize) { if(rPortion > rSize - rIndex) rPortion = rSize - rIndex; if(sPortion > sSize - sIndex) sPortion = sSize - sIndex; checkErrors(hipEventRecord(intermediate_start[count], 0)); hipLaunchKernelGGL(( shared_approach_aos) , dim3(grid), dim3(threadBlock), portionSize * sizeof(int) + portionSize * sizeof(int), 0, deviceR + rIndex, rPortion, deviceS + sIndex, sPortion, portionSize, deviceOutputResults, outputCounter); checkErrors(hipEventRecord(intermediate_stop[count], 0)); checkErrors(hipMemcpyAsync(&tempOffset, outputCounter, sizeof(int), hipMemcpyDeviceToHost, 0)); checkErrors(hipMemcpyAsync(outputCounter, &zero, sizeof(int), hipMemcpyHostToDevice, 0)); checkErrors(hipMemcpyAsync(count == 0 ? hostOutputResults : hostOutputResults + offset, deviceOutputResults, tempOffset * sizeof(Result), hipMemcpyDeviceToHost, 0)); offset += tempOffset; count++; sIndex += sPortion; } rIndex += rPortion; } } checkErrors(hipEventRecord(stop, 0)); checkErrors(hipPeekAtLastError()); checkErrors(hipDeviceSynchronize()); checkErrors(hipEventSynchronize(stop)); checkErrors(hipEventElapsedTime(&millis, start, stop)); for(int i = 0; i < rectangles; i++) { float temp = 0.0; checkErrors(hipEventElapsedTime(&temp, intermediate_start[i], intermediate_stop[i])); executionTime += temp; } checkErrors(hipEventDestroy(start)); checkErrors(hipEventDestroy(stop)); for(int i = 0; i < rectangles; i++) { checkErrors(hipEventDestroy(intermediate_start[i])); checkErrors(hipEventDestroy(intermediate_stop[i])); } checkErrors(hipFree(deviceR)); checkErrors(hipFree(deviceS)); checkErrors(hipFree(deviceOutputResults)); checkErrors(hipDeviceReset()); /* int res = assertResultsGenralAoS(R, rSize, S, sSize, hostOutputResults, offset); if(res == 0) printf("Success!\n"); else printf("Fail :/\n");*/ free(hostOutputResults); return {executionTime, millis}; } int main(int argc, char** argv) { char* rPath; LLONG rSize; char* sPath; LLONG sSize; int blockSize; int repeats; int deviceMemory; int portionSize; char* eptr; // read input arguments if (argc != 9) { printf("Not enough arguments\n---------------\n"); printf("1st:\t R path\n"); printf("2nd:\t |R| (R size)\n"); printf("3rd:\t S path\n"); printf("4th:\t |S| (S size)\n"); printf("5th:\t Thread block size (max 1024)\n"); printf("6th:\t Portion size (max 32)\n"); printf("7th:\t Available Device Memory (in GB)\n"); printf("8th:\t Number of repeats\n"); return 1; } rPath = argv[1]; sPath = argv[3]; rSize = strtoll(argv[2], &eptr, 10); sSize = strtoll(argv[4], &eptr, 10); blockSize = strtol(argv[5], &eptr, 10); portionSize = strtol(argv[6], &eptr, 10); deviceMemory = strtol(argv[7], &eptr, 10); repeats = strtol(argv[8], &eptr, 10); if(rSize == 0 || sSize == 0 || blockSize == 0 || repeats == 0 || deviceMemory == 0 || portionSize == 0) { printf("Wrong input arguments (error: %d)", errno); return 1; } // allocate memory Tuple* R; Tuple* S; R = (Tuple*)malloc(sizeof(Relation) * rSize); S = (Tuple*)malloc(sizeof(Relation) * sSize); readRelationAoS(rPath, R); readRelationAoS(sPath, S); printf("Shared Memory Approach (AoS)\n"); // call kernel multiple times TimePair* pairs = (TimePair*)malloc(sizeof(TimePair) * repeats); float executionTimeAggregate = 0.0; float overallTimeAggregate = 0.0; for(int i = 0; i < repeats; ++i) { pairs[i] = callKernel(R, rSize, S, sSize, blockSize, portionSize, deviceMemory); executionTimeAggregate += pairs[i].executionTime; overallTimeAggregate += pairs[i].overallTime; } // calculate and print average time printf("-----------------\n"); printf("Execution time (GPU): %f\n", (executionTimeAggregate / (float) repeats)); printf("Transfer & overhead time: %f\n", (overallTimeAggregate - executionTimeAggregate) / (float) repeats); printf("-----------------\n"); printf("Overall Execution time: %f\n", (overallTimeAggregate / (float) repeats)); free(R); free(S); free(pairs); return 0; }
9b571a5f29fcdcf7b2850fc23243a1650bfbcb1e.cu
#include "../inc/parser.h" #include "../inc/helper.cuh" #include <errno.h> #include <math.h> __global__ void shared_approach_aos(Tuple* R, int rSize, Tuple* S, int sSize, int portionSize, Result* outputResults, int* outputCounter) { extern __shared__ int shared[]; int* sA = shared; // S.a int* sX = (int*)&sA[portionSize]; // S.x int rIndex = blockIdx.x * blockDim.x + threadIdx.x; // //(rSize /blockDim.x) * blockDim.x + portionSize int threadID = threadIdx.x; //local thread id, i.e. within block int rA = R[rIndex].a; // store R.a to registers int sIndex = 0; while (sIndex < sSize) // iterate portions of the second array { if (threadID < portionSize) { sA[threadID] = S[sIndex + threadID].a; sX[threadID] = S[sIndex + threadID].x; } __syncthreads(); //for (int i = 0; i < portionSize; ++i) for (int i = 0; i < (sSize - sIndex > portionSize ? portionSize : sSize - sIndex); ++i) { if (rA > sA[i] && rIndex < rSize) { outputResults[atomicAdd(outputCounter, 1)] = { rA, sA[i], sX[i] }; } } sIndex += portionSize; __syncthreads(); } } TimePair callKernel(Tuple* R, LLONG rSize, Tuple* S, LLONG sSize, int blockSize, int portionSize, int deviceMemorySize) { float executionTime = 0.0; float millis = 0.0; cudaEvent_t start, stop; checkErrors(cudaEventCreate(&start)); checkErrors(cudaEventCreate(&stop)); cudaEvent_t* intermediate_start; cudaEvent_t* intermediate_stop; Tuple* deviceR; Tuple* deviceS; // allocate memory for relations checkErrors(cudaMalloc((void**)&(deviceR), sizeof(Tuple) * rSize)); checkErrors(cudaMalloc((void**)&(deviceS), sizeof(Tuple) * sSize)); // copy relations to gpu checkErrors(cudaMemcpy(deviceR, R, sizeof(Tuple) * rSize, cudaMemcpyHostToDevice)); checkErrors(cudaMemcpy(deviceS, S, sizeof(Tuple) * sSize, cudaMemcpyHostToDevice)); Result* hostOutputResults; Result* deviceOutputResults; int* outputCounter; LLONG maxSpace = 0; LLONG md = deviceMemorySize * 1024LL * 1024LL * 1024LL; int t = 0; int side = 0; LLONG rIndex; LLONG sIndex; int rPortion; int sPortion; int tempOffset = 0; int offset = 0; int rectangles = 0; int count = 0; checkErrors(cudaMalloc((void**)&(outputCounter), sizeof(int))); dim3 threadBlock(blockSize); dim3 grid((rSize / threadBlock.x) + 1); maxSpace = rSize * sSize * sizeof(Result); t = (maxSpace / md) + 1; printf("Md: %d, ||O||max: %llu, t: %d\n", deviceMemorySize, (maxSpace / 1024LL / 1024LL / 1024LL),t); printf("Grid: %dx%d, Thread Block: %dx%d\n", grid.x, grid.y, threadBlock.x, threadBlock.y); printf("Max results per pass %llu / %d = %d\n", md, sizeof(Result), (int)(md / (LLONG)sizeof(Result))); // allocate memory for device result checkErrors(cudaMalloc((void**)&(deviceOutputResults), sizeof(Result) * (int)(md / (LLONG)sizeof(Result)))); // allocate 10gb -> 894784853 host memory to store results from device (13gb -> 1163220310) hostOutputResults = (Result*) malloc(sizeof(Result) * 1163220310); checkErrors(cudaEventRecord(start, 0)); if(t == 1) // call kernel once and finish { shared_approach_aos <<<grid, threadBlock, portionSize * sizeof(int) + portionSize * sizeof(int), 0>>>(deviceR, rSize, deviceS, sSize, portionSize, deviceOutputResults, outputCounter); checkErrors(cudaEventRecord(stop, 0)); checkErrors(cudaMemcpyAsync(&tempOffset, outputCounter, sizeof(int), cudaMemcpyDeviceToHost, 0)); checkErrors(cudaMemcpyAsync(hostOutputResults, deviceOutputResults, tempOffset * sizeof(Result), cudaMemcpyDeviceToHost, 0)); checkErrors(cudaEventElapsedTime(&executionTime, start, stop)); offset += tempOffset; } else // call kernel multiple times { side = (int)sqrt((double)((rSize * sSize) / t)); if((rSize % side == 0) && (sSize % side == 0) || ((rSize / t <= sSize ) && (sSize <= rSize))) { rPortion = side; sPortion = side; } else { rPortion = (int) rSize / t; sPortion = (int) sSize; } rectangles = getActualNumberOfRectangles(rPortion, sPortion, rSize, sSize); printf("Number of overall rectangles: %d\n", rectangles); intermediate_start = (cudaEvent_t*)malloc(sizeof(cudaEvent_t) * rectangles); intermediate_stop = (cudaEvent_t*)malloc(sizeof(cudaEvent_t) * rectangles); for(int i = 0; i < rectangles; i++) { checkErrors(cudaEventCreate(&intermediate_start[i])); checkErrors(cudaEventCreate(&intermediate_stop[i])); } int zero = 0; rIndex = 0; while(rIndex < rSize) { sIndex = 0; while(sIndex < sSize) { if(rPortion > rSize - rIndex) rPortion = rSize - rIndex; if(sPortion > sSize - sIndex) sPortion = sSize - sIndex; checkErrors(cudaEventRecord(intermediate_start[count], 0)); shared_approach_aos <<<grid, threadBlock, portionSize * sizeof(int) + portionSize * sizeof(int), 0>>>(deviceR + rIndex, rPortion, deviceS + sIndex, sPortion, portionSize, deviceOutputResults, outputCounter); checkErrors(cudaEventRecord(intermediate_stop[count], 0)); checkErrors(cudaMemcpyAsync(&tempOffset, outputCounter, sizeof(int), cudaMemcpyDeviceToHost, 0)); checkErrors(cudaMemcpyAsync(outputCounter, &zero, sizeof(int), cudaMemcpyHostToDevice, 0)); checkErrors(cudaMemcpyAsync(count == 0 ? hostOutputResults : hostOutputResults + offset, deviceOutputResults, tempOffset * sizeof(Result), cudaMemcpyDeviceToHost, 0)); offset += tempOffset; count++; sIndex += sPortion; } rIndex += rPortion; } } checkErrors(cudaEventRecord(stop, 0)); checkErrors(cudaPeekAtLastError()); checkErrors(cudaDeviceSynchronize()); checkErrors(cudaEventSynchronize(stop)); checkErrors(cudaEventElapsedTime(&millis, start, stop)); for(int i = 0; i < rectangles; i++) { float temp = 0.0; checkErrors(cudaEventElapsedTime(&temp, intermediate_start[i], intermediate_stop[i])); executionTime += temp; } checkErrors(cudaEventDestroy(start)); checkErrors(cudaEventDestroy(stop)); for(int i = 0; i < rectangles; i++) { checkErrors(cudaEventDestroy(intermediate_start[i])); checkErrors(cudaEventDestroy(intermediate_stop[i])); } checkErrors(cudaFree(deviceR)); checkErrors(cudaFree(deviceS)); checkErrors(cudaFree(deviceOutputResults)); checkErrors(cudaDeviceReset()); /* int res = assertResultsGenralAoS(R, rSize, S, sSize, hostOutputResults, offset); if(res == 0) printf("Success!\n"); else printf("Fail :/\n");*/ free(hostOutputResults); return {executionTime, millis}; } int main(int argc, char** argv) { char* rPath; LLONG rSize; char* sPath; LLONG sSize; int blockSize; int repeats; int deviceMemory; int portionSize; char* eptr; // read input arguments if (argc != 9) { printf("Not enough arguments\n---------------\n"); printf("1st:\t R path\n"); printf("2nd:\t |R| (R size)\n"); printf("3rd:\t S path\n"); printf("4th:\t |S| (S size)\n"); printf("5th:\t Thread block size (max 1024)\n"); printf("6th:\t Portion size (max 32)\n"); printf("7th:\t Available Device Memory (in GB)\n"); printf("8th:\t Number of repeats\n"); return 1; } rPath = argv[1]; sPath = argv[3]; rSize = strtoll(argv[2], &eptr, 10); sSize = strtoll(argv[4], &eptr, 10); blockSize = strtol(argv[5], &eptr, 10); portionSize = strtol(argv[6], &eptr, 10); deviceMemory = strtol(argv[7], &eptr, 10); repeats = strtol(argv[8], &eptr, 10); if(rSize == 0 || sSize == 0 || blockSize == 0 || repeats == 0 || deviceMemory == 0 || portionSize == 0) { printf("Wrong input arguments (error: %d)", errno); return 1; } // allocate memory Tuple* R; Tuple* S; R = (Tuple*)malloc(sizeof(Relation) * rSize); S = (Tuple*)malloc(sizeof(Relation) * sSize); readRelationAoS(rPath, R); readRelationAoS(sPath, S); printf("Shared Memory Approach (AoS)\n"); // call kernel multiple times TimePair* pairs = (TimePair*)malloc(sizeof(TimePair) * repeats); float executionTimeAggregate = 0.0; float overallTimeAggregate = 0.0; for(int i = 0; i < repeats; ++i) { pairs[i] = callKernel(R, rSize, S, sSize, blockSize, portionSize, deviceMemory); executionTimeAggregate += pairs[i].executionTime; overallTimeAggregate += pairs[i].overallTime; } // calculate and print average time printf("-----------------\n"); printf("Execution time (GPU): %f\n", (executionTimeAggregate / (float) repeats)); printf("Transfer & overhead time: %f\n", (overallTimeAggregate - executionTimeAggregate) / (float) repeats); printf("-----------------\n"); printf("Overall Execution time: %f\n", (overallTimeAggregate / (float) repeats)); free(R); free(S); free(pairs); return 0; }
7caa4073d12315746df5e66cced2c53990c09fb4.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Filename : implementation.cu Author : Martino Milani / Sbastien Gachoud SCIPER : 286204 / 250083 ============================================================================ */ #include <iostream> #include <iomanip> #include <sys/time.h> #include <hip/hip_runtime.h> using namespace std; // CPU Baseline void array_process(double *input, double *output, int length, int iterations) { double *temp; for(int n=0; n<(int) iterations; n++) { for(int i=1; i<length-1; i++) { for(int j=1; j<length-1; j++) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } output[(length/2-1)*length+(length/2-1)] = 1000; output[(length/2)*length+(length/2-1)] = 1000; output[(length/2-1)*length+(length/2)] = 1000; output[(length/2)*length+(length/2)] = 1000; temp = input; input = output; output = temp; } } __global__ void gpu_computation(double* input, double* output, int length); // GPU Optimized function void GPU_array_process(double *input, double *output, int length, int iterations) { //Cuda events for calculating elapsed time hipEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end; hipEventCreate(&cpy_H2D_start); hipEventCreate(&cpy_H2D_end); hipEventCreate(&cpy_D2H_start); hipEventCreate(&cpy_D2H_end); hipEventCreate(&comp_start); hipEventCreate(&comp_end); /* Preprocessing goes here */ /*----- What I did -----*/ const long SIZE = length * length * sizeof(double); double* gpu_input; double* gpu_output; dim3 threadsPerBlock(32,32); dim3 nbBlocks(length / threadsPerBlock.x + 1, length / threadsPerBlock.y + 1); const long PADDED_SIZE = (nbBlocks.x+1) * threadsPerBlock.x * (nbBlocks.y+1) * threadsPerBlock.y * sizeof(double); //+1 to avoid going out of the input hipSetDevice(0); if(hipMalloc((void**)&gpu_input, PADDED_SIZE) != hipSuccess){ cerr << "Error allocating input" << endl; } if(hipMalloc((void**)&gpu_output, PADDED_SIZE) != hipSuccess){ cerr << "Error allocating output" << endl; } /*----------------------*/ hipEventRecord(cpy_H2D_start); /* Copying array from host to device goes here */ /*----- What I did -----*/ if(hipMemcpy(gpu_input, input, SIZE, hipMemcpyHostToDevice) != hipSuccess){ cerr << "Error copying input to gpu" << endl; } if(hipMemcpy(gpu_output, output, SIZE, hipMemcpyHostToDevice) != hipSuccess){ cerr << "Error copying output to gpu" << endl; } /*----------------------*/ hipEventRecord(cpy_H2D_end); hipEventSynchronize(cpy_H2D_end); //Copy array from host to device hipEventRecord(comp_start); /* GPU calculation goes here */ /*----- What I did -----*/ for(int iter(0); iter < iterations; iter++){ if(iter%2){ hipLaunchKernelGGL(( gpu_computation) , dim3(nbBlocks), dim3(threadsPerBlock) , 0, 0, gpu_output, gpu_input, length); } else{ hipLaunchKernelGGL(( gpu_computation) , dim3(nbBlocks), dim3(threadsPerBlock) , 0, 0, gpu_input, gpu_output, length); } hipDeviceSynchronize(); } /*----------------------*/ hipEventRecord(comp_end); hipEventSynchronize(comp_end); hipEventRecord(cpy_D2H_start); /* Copying array from device to host goes here */ /*----- What I did -----*/ if(iterations%2==0) { if(hipMemcpy(output, gpu_input, SIZE, hipMemcpyDeviceToHost) != hipSuccess){ cerr << "failed to retrieve gpu_input" << endl; } } else{ if(hipMemcpy(output, gpu_output, SIZE, hipMemcpyDeviceToHost) != hipSuccess){ cerr << "failed to retrieve gpu_output" << endl; } } /*----------------------*/ hipEventRecord(cpy_D2H_end); hipEventSynchronize(cpy_D2H_end); /* Postprocessing goes here */ /*----- What I did -----*/ hipFree(&gpu_input); hipFree(&gpu_output); /*----------------------*/ float time; hipEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end); cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; hipEventElapsedTime(&time, comp_start, comp_end); cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl; hipEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end); cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; } __global__ void gpu_computation(double* input, double* output, int length){ int x_glob = (blockIdx.x * blockDim.x) + threadIdx.x + 1; //+1 to avoid first column int y_glob = (blockIdx.y * blockDim.y) + threadIdx.y + 1; //+1 to avoid first row int element_id = (y_glob * length) + x_glob; if ( ((x_glob == length/2-1) || (x_glob == length/2)) && ((y_glob == length/2-1) || (y_glob == length/2)) || x_glob >= length - 1 || y_glob >= length-1) { return; } output[element_id] = (input[(y_glob-1)*(length)+(x_glob-1)] + input[(y_glob-1)*(length)+(x_glob)] + input[(y_glob-1)*(length)+(x_glob+1)] + input[(y_glob)*(length)+(x_glob-1)] + input[(y_glob)*(length)+(x_glob)] + input[(y_glob)*(length)+(x_glob+1)] + input[(y_glob+1)*(length)+(x_glob-1)] + input[(y_glob+1)*(length)+(x_glob)] + input[(y_glob+1)*(length)+(x_glob+1)] ) / 9; }
7caa4073d12315746df5e66cced2c53990c09fb4.cu
/* ============================================================================ Filename : implementation.cu Author : Martino Milani / Sébastien Gachoud SCIPER : 286204 / 250083 ============================================================================ */ #include <iostream> #include <iomanip> #include <sys/time.h> #include <cuda_runtime.h> using namespace std; // CPU Baseline void array_process(double *input, double *output, int length, int iterations) { double *temp; for(int n=0; n<(int) iterations; n++) { for(int i=1; i<length-1; i++) { for(int j=1; j<length-1; j++) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } output[(length/2-1)*length+(length/2-1)] = 1000; output[(length/2)*length+(length/2-1)] = 1000; output[(length/2-1)*length+(length/2)] = 1000; output[(length/2)*length+(length/2)] = 1000; temp = input; input = output; output = temp; } } __global__ void gpu_computation(double* input, double* output, int length); // GPU Optimized function void GPU_array_process(double *input, double *output, int length, int iterations) { //Cuda events for calculating elapsed time cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end; cudaEventCreate(&cpy_H2D_start); cudaEventCreate(&cpy_H2D_end); cudaEventCreate(&cpy_D2H_start); cudaEventCreate(&cpy_D2H_end); cudaEventCreate(&comp_start); cudaEventCreate(&comp_end); /* Preprocessing goes here */ /*----- What I did -----*/ const long SIZE = length * length * sizeof(double); double* gpu_input; double* gpu_output; dim3 threadsPerBlock(32,32); dim3 nbBlocks(length / threadsPerBlock.x + 1, length / threadsPerBlock.y + 1); const long PADDED_SIZE = (nbBlocks.x+1) * threadsPerBlock.x * (nbBlocks.y+1) * threadsPerBlock.y * sizeof(double); //+1 to avoid going out of the input cudaSetDevice(0); if(cudaMalloc((void**)&gpu_input, PADDED_SIZE) != cudaSuccess){ cerr << "Error allocating input" << endl; } if(cudaMalloc((void**)&gpu_output, PADDED_SIZE) != cudaSuccess){ cerr << "Error allocating output" << endl; } /*----------------------*/ cudaEventRecord(cpy_H2D_start); /* Copying array from host to device goes here */ /*----- What I did -----*/ if(cudaMemcpy(gpu_input, input, SIZE, cudaMemcpyHostToDevice) != cudaSuccess){ cerr << "Error copying input to gpu" << endl; } if(cudaMemcpy(gpu_output, output, SIZE, cudaMemcpyHostToDevice) != cudaSuccess){ cerr << "Error copying output to gpu" << endl; } /*----------------------*/ cudaEventRecord(cpy_H2D_end); cudaEventSynchronize(cpy_H2D_end); //Copy array from host to device cudaEventRecord(comp_start); /* GPU calculation goes here */ /*----- What I did -----*/ for(int iter(0); iter < iterations; iter++){ if(iter%2){ gpu_computation <<< nbBlocks, threadsPerBlock >>> (gpu_output, gpu_input, length); } else{ gpu_computation <<< nbBlocks, threadsPerBlock >>> (gpu_input, gpu_output, length); } cudaThreadSynchronize(); } /*----------------------*/ cudaEventRecord(comp_end); cudaEventSynchronize(comp_end); cudaEventRecord(cpy_D2H_start); /* Copying array from device to host goes here */ /*----- What I did -----*/ if(iterations%2==0) { if(cudaMemcpy(output, gpu_input, SIZE, cudaMemcpyDeviceToHost) != cudaSuccess){ cerr << "failed to retrieve gpu_input" << endl; } } else{ if(cudaMemcpy(output, gpu_output, SIZE, cudaMemcpyDeviceToHost) != cudaSuccess){ cerr << "failed to retrieve gpu_output" << endl; } } /*----------------------*/ cudaEventRecord(cpy_D2H_end); cudaEventSynchronize(cpy_D2H_end); /* Postprocessing goes here */ /*----- What I did -----*/ cudaFree(&gpu_input); cudaFree(&gpu_output); /*----------------------*/ float time; cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end); cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, comp_start, comp_end); cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end); cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; } __global__ void gpu_computation(double* input, double* output, int length){ int x_glob = (blockIdx.x * blockDim.x) + threadIdx.x + 1; //+1 to avoid first column int y_glob = (blockIdx.y * blockDim.y) + threadIdx.y + 1; //+1 to avoid first row int element_id = (y_glob * length) + x_glob; if ( ((x_glob == length/2-1) || (x_glob == length/2)) && ((y_glob == length/2-1) || (y_glob == length/2)) || x_glob >= length - 1 || y_glob >= length-1) { return; } output[element_id] = (input[(y_glob-1)*(length)+(x_glob-1)] + input[(y_glob-1)*(length)+(x_glob)] + input[(y_glob-1)*(length)+(x_glob+1)] + input[(y_glob)*(length)+(x_glob-1)] + input[(y_glob)*(length)+(x_glob)] + input[(y_glob)*(length)+(x_glob+1)] + input[(y_glob+1)*(length)+(x_glob-1)] + input[(y_glob+1)*(length)+(x_glob)] + input[(y_glob+1)*(length)+(x_glob+1)] ) / 9; }
422a8ec96f7f7d467cb8e9eb9f07aad4113b2e4b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> /* * This example is a clone of replace-rand.cu that uses CUDA streams to overlap * the generation of random numbers using cuSPARSE with any host computation. */ /* * initialize_state initializes cuRAND device state */ __global__ void initialize_state(hiprandState_t *states) { int tid = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(9384, tid, 0, states + tid); } /* * refill_randoms uses the cuRAND device API to generate N random values using * the states passed to the kernel. */ __global__ void refill_randoms(float *dRand, int N, hiprandState_t *states) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nthreads = gridDim.x * blockDim.x; hiprandState_t *state = states + tid; for (i = tid; i < N; i += nthreads) { dRand[i] = hiprand_uniform(state); } } /* * An implementation of rand() that uses the cuRAND device API. */ float cuda_device_rand() { static hipStream_t stream = 0; static hiprandState_t *states = NULL; static float *dRand = NULL; static float *hRand = NULL; static int dRand_length = 1000000; static int dRand_used = dRand_length; int threads_per_block = 256; int blocks_per_grid = 30; if (dRand == NULL) { /* * If the cuRAND state hasn't been initialized yet, create a CUDA stream * to execute operations in, pre-allocate device memory to store the * generated random values in, and asynchronously launch a * refill_randoms kernel to begin generating random numbers. */ hipStreamCreate(&stream); hipMalloc((void **)&dRand, sizeof(float) * dRand_length); hipMalloc((void **)&states, sizeof(hiprandState_t) * threads_per_block * blocks_per_grid); hRand = (float *)malloc(sizeof(float) * dRand_length); hipLaunchKernelGGL(( initialize_state) , dim3(blocks_per_grid), dim3(threads_per_block), 0, stream , states); hipLaunchKernelGGL(( refill_randoms) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, dRand, dRand_length, states); } if (dRand_used == dRand_length) { /* * If all pre-generated random numbers have been consumed, wait for the * last launch of refill_randoms to complete, transfer those newly * generated random numbers back, and launch another batch random number * generation kernel asynchronously. */ hipStreamSynchronize(stream); hipMemcpy(hRand, dRand, sizeof(float) * dRand_length, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( refill_randoms) , dim3(blocks_per_grid), dim3(threads_per_block), 0, stream , dRand, dRand_length, states); dRand_used = 0; } // Return the next pre-generated random number return hRand[dRand_used++]; } /* * An implementation of rand() that uses the cuRAND host API. */ float cuda_host_rand() { static hipStream_t stream = 0; static float *dRand = NULL; static float *hRand = NULL; hiprandGenerator_t randGen; static int dRand_length = 1000000; static int dRand_used = 1000000; if (dRand == NULL) { /* * If the cuRAND state hasn't been initialized yet, construct a cuRAND * generator and configure it to use a CUDA stream. Pre-allocate device * memory to store the output random numbers and asynchronously launch * hiprandGenerateUniform. Because hiprandGenerateUniform uses the randGen * handle, it will execute in the set stream. */ hiprandCreateGenerator(&randGen, HIPRAND_RNG_PSEUDO_DEFAULT); hipStreamCreate(&stream); hiprandSetStream(randGen, stream); hipMalloc((void **)&dRand, sizeof(float) * dRand_length); hRand = (float *)malloc(sizeof(float) * dRand_length); hiprandGenerateUniform(randGen, dRand, dRand_length); } if (dRand_used == dRand_length) { /* * If all pre-generated random numbers have been consumed, wait for the * last asynchronous hiprandGenerateUniform to complete, transfer the new * batch of random numbers back to the host, and relaunch * hiprandGenerateUniform. */ hipStreamSynchronize(stream); hipMemcpy(hRand, dRand, sizeof(float) * dRand_length, hipMemcpyDeviceToHost); hiprandGenerateUniform(randGen, dRand, dRand_length); dRand_used = 0; } // Return the next pre-generated random number return hRand[dRand_used++]; } float host_rand() { return (float)rand() / (float)RAND_MAX; } int main(int argc, char **argv) { int i; int N = 8388608; for (i = 0; i < N; i++) { float h = host_rand(); float d = cuda_host_rand(); float dd = cuda_device_rand(); printf("%2.4f %2.4f %2.4f\n", h, d, dd); getchar(); } return 0; }
422a8ec96f7f7d467cb8e9eb9f07aad4113b2e4b.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> /* * This example is a clone of replace-rand.cu that uses CUDA streams to overlap * the generation of random numbers using cuSPARSE with any host computation. */ /* * initialize_state initializes cuRAND device state */ __global__ void initialize_state(curandState *states) { int tid = blockIdx.x * blockDim.x + threadIdx.x; curand_init(9384, tid, 0, states + tid); } /* * refill_randoms uses the cuRAND device API to generate N random values using * the states passed to the kernel. */ __global__ void refill_randoms(float *dRand, int N, curandState *states) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; int nthreads = gridDim.x * blockDim.x; curandState *state = states + tid; for (i = tid; i < N; i += nthreads) { dRand[i] = curand_uniform(state); } } /* * An implementation of rand() that uses the cuRAND device API. */ float cuda_device_rand() { static cudaStream_t stream = 0; static curandState *states = NULL; static float *dRand = NULL; static float *hRand = NULL; static int dRand_length = 1000000; static int dRand_used = dRand_length; int threads_per_block = 256; int blocks_per_grid = 30; if (dRand == NULL) { /* * If the cuRAND state hasn't been initialized yet, create a CUDA stream * to execute operations in, pre-allocate device memory to store the * generated random values in, and asynchronously launch a * refill_randoms kernel to begin generating random numbers. */ cudaStreamCreate(&stream); cudaMalloc((void **)&dRand, sizeof(float) * dRand_length); cudaMalloc((void **)&states, sizeof(curandState) * threads_per_block * blocks_per_grid); hRand = (float *)malloc(sizeof(float) * dRand_length); initialize_state <<<blocks_per_grid, threads_per_block, 0, stream >>>(states); refill_randoms <<<blocks_per_grid, threads_per_block >>>(dRand, dRand_length, states); } if (dRand_used == dRand_length) { /* * If all pre-generated random numbers have been consumed, wait for the * last launch of refill_randoms to complete, transfer those newly * generated random numbers back, and launch another batch random number * generation kernel asynchronously. */ cudaStreamSynchronize(stream); cudaMemcpy(hRand, dRand, sizeof(float) * dRand_length, cudaMemcpyDeviceToHost); refill_randoms <<<blocks_per_grid, threads_per_block, 0, stream >>>(dRand, dRand_length, states); dRand_used = 0; } // Return the next pre-generated random number return hRand[dRand_used++]; } /* * An implementation of rand() that uses the cuRAND host API. */ float cuda_host_rand() { static cudaStream_t stream = 0; static float *dRand = NULL; static float *hRand = NULL; curandGenerator_t randGen; static int dRand_length = 1000000; static int dRand_used = 1000000; if (dRand == NULL) { /* * If the cuRAND state hasn't been initialized yet, construct a cuRAND * generator and configure it to use a CUDA stream. Pre-allocate device * memory to store the output random numbers and asynchronously launch * curandGenerateUniform. Because curandGenerateUniform uses the randGen * handle, it will execute in the set stream. */ curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT); cudaStreamCreate(&stream); curandSetStream(randGen, stream); cudaMalloc((void **)&dRand, sizeof(float) * dRand_length); hRand = (float *)malloc(sizeof(float) * dRand_length); curandGenerateUniform(randGen, dRand, dRand_length); } if (dRand_used == dRand_length) { /* * If all pre-generated random numbers have been consumed, wait for the * last asynchronous curandGenerateUniform to complete, transfer the new * batch of random numbers back to the host, and relaunch * curandGenerateUniform. */ cudaStreamSynchronize(stream); cudaMemcpy(hRand, dRand, sizeof(float) * dRand_length, cudaMemcpyDeviceToHost); curandGenerateUniform(randGen, dRand, dRand_length); dRand_used = 0; } // Return the next pre-generated random number return hRand[dRand_used++]; } float host_rand() { return (float)rand() / (float)RAND_MAX; } int main(int argc, char **argv) { int i; int N = 8388608; for (i = 0; i < N; i++) { float h = host_rand(); float d = cuda_host_rand(); float dd = cuda_device_rand(); printf("%2.4f %2.4f %2.4f\n", h, d, dd); getchar(); } return 0; }
772672148c58feae84444c2ef3cf671938d0909e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseTensorUtils.h> #include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <THH/THHThrustAllocator.cuh> #include <THH/THHTensorSort.cuh> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/gather.h> #include <thrust/generate.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ #include <thrust/system/hip/execution_policy.h> #endif namespace at { namespace native { using namespace at::sparse; SparseTensor coalesce_sparse_cuda(const SparseTensor& self) { int64_t nnz = self._nnz(); if (self.is_coalesced()) { return self; } // NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false, // we should keep the original tensor intact and do coalesce on a copy of the tensor if (nnz < 2) { SparseTensor dst = self.clone(); dst._coalesced_(true); return dst; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); // Replace instances with // For indices, a simple sort + unique suffices // For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection). Tensor values = self._values(); int64_t sparse_dim = self.sparse_dim(); // indices will be modified by Thrust, so we have to clone or use new storage // here. LongTensor indices1D = flatten_indices(self._indices(), self.sizes(), true); LongTensor origIndices = at::empty({nnz}, self._indices().options()); LongTensor uniqueOffsets = at::empty({nnz}, self._indices().options()); typedef thrust::device_ptr<int64_t> thrust_ptr; thrust_ptr indicesIter(indices1D.data<int64_t>()); thrust_ptr origIndicesIter(origIndices.data<int64_t>()); thrust_ptr uniqueOffsetsIter(uniqueOffsets.data<int64_t>()); // Fill sortedOrigIndices with sequential indices thrust::counting_iterator<int64_t> countIterI(0); thrust::counting_iterator<int64_t> countIterO(0); thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter); thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter); thrust::sort_by_key(policy, indicesIter, indicesIter + nnz, origIndicesIter, ThrustLTOp<int64_t>() ); // this forces device-host synchronization! thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy, indicesIter, indicesIter + nnz, uniqueOffsetsIter ); int64_t newNnz = newEnd.first - indicesIter; indices1D.resize_({1, newNnz}); auto newValues_size = values.sizes().vec(); newValues_size[0] = newNnz; Tensor newValues = at::empty(newValues_size, values.options()); // If there is no values to copy, save running the kernel. if (newValues.numel() > 0) { values = values.contiguous(); int64_t stride = at::prod_intlist(values.sizes().slice(1)); dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half,values.type(), "coalesce_sparse_cuda", [&] { using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>; hipLaunchKernelGGL(( apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t>), dim3(grid), dim3(block), 0, stream, uniqueOffsets.data<int64_t>(), origIndices.data<int64_t>(), values.data<scalar_t>(), newValues.data<scalar_t>(), nnz, newNnz, stride ); }); } // this grid-strided version is slower but probably more flexible // to different sizes // int64_t blockX = min(stride, (int64_t) 512); // dim3 block(blockX, 512 / blockX); // int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y)); // THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>( // THCIndexTensor_(data)(state, uniqueOffsets), // THCIndexTensor_(data)(state, origIndices), // THCTensor_(data)(state, values), // THCTensor_(data)(state, newValues), // nnz, // newNnz, // stride // ); //////////////////////////////////////////////////////////// // unflatten indices if necessary LongTensor newIndices; if (sparse_dim == 1) { newIndices = indices1D; } else { newIndices = at::empty({sparse_dim, newNnz}, origIndices.options()); for (int64_t d = sparse_dim - 1; d >= 0; d--) { // NB: Not a select, so I can preserve the outer dimension LongTensor indicesSlice = newIndices.narrow(0, d, 1); // Note for the porting guide: THCTensor_(copy) does NOT do normal // broadcasting logic; instead, it will blast the elements from one // to the other so long as the numel is the same indicesSlice.copy_(indices1D); indices1D.div_(self.size(d)); indicesSlice.add_(indices1D, -self.size(d)); } } //////////////////////////////////////////////////////////// SparseTensor dst = ::at::native::sparse_coo_tensor(newIndices, newValues, self.sizes())._coalesced_(true); THCudaCheck(hipGetLastError()); return dst; } }} // namespace at::native
772672148c58feae84444c2ef3cf671938d0909e.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseTensorUtils.h> #include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCThrustAllocator.cuh> #include <THC/THCTensorSort.cuh> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/gather.h> #include <thrust/generate.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/unique.h> #if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__ #include <thrust/system/cuda/execution_policy.h> #endif namespace at { namespace native { using namespace at::sparse; SparseTensor coalesce_sparse_cuda(const SparseTensor& self) { int64_t nnz = self._nnz(); if (self.is_coalesced()) { return self; } // NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false, // we should keep the original tensor intact and do coalesce on a copy of the tensor if (nnz < 2) { SparseTensor dst = self.clone(); dst._coalesced_(true); return dst; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); // Replace instances with // For indices, a simple sort + unique suffices // For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection). Tensor values = self._values(); int64_t sparse_dim = self.sparse_dim(); // indices will be modified by Thrust, so we have to clone or use new storage // here. LongTensor indices1D = flatten_indices(self._indices(), self.sizes(), true); LongTensor origIndices = at::empty({nnz}, self._indices().options()); LongTensor uniqueOffsets = at::empty({nnz}, self._indices().options()); typedef thrust::device_ptr<int64_t> thrust_ptr; thrust_ptr indicesIter(indices1D.data<int64_t>()); thrust_ptr origIndicesIter(origIndices.data<int64_t>()); thrust_ptr uniqueOffsetsIter(uniqueOffsets.data<int64_t>()); // Fill sortedOrigIndices with sequential indices thrust::counting_iterator<int64_t> countIterI(0); thrust::counting_iterator<int64_t> countIterO(0); thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter); thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter); thrust::sort_by_key(policy, indicesIter, indicesIter + nnz, origIndicesIter, ThrustLTOp<int64_t>() ); // this forces device-host synchronization! thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy, indicesIter, indicesIter + nnz, uniqueOffsetsIter ); int64_t newNnz = newEnd.first - indicesIter; indices1D.resize_({1, newNnz}); auto newValues_size = values.sizes().vec(); newValues_size[0] = newNnz; Tensor newValues = at::empty(newValues_size, values.options()); // If there is no values to copy, save running the kernel. if (newValues.numel() > 0) { values = values.contiguous(); int64_t stride = at::prod_intlist(values.sizes().slice(1)); dim3 grid(THCCeilDiv(newNnz, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128)); dim3 block(32, 4); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half,values.type(), "coalesce_sparse_cuda", [&] { using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>; apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t><<<grid, block, 0, stream>>>( uniqueOffsets.data<int64_t>(), origIndices.data<int64_t>(), values.data<scalar_t>(), newValues.data<scalar_t>(), nnz, newNnz, stride ); }); } // this grid-strided version is slower but probably more flexible // to different sizes // int64_t blockX = min(stride, (int64_t) 512); // dim3 block(blockX, 512 / blockX); // int64_t grid = min((int64_t) 1024, THCCeilDiv((int64_t) newNnz * stride, (int64_t) block.x * block.y)); // THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream>>>( // THCIndexTensor_(data)(state, uniqueOffsets), // THCIndexTensor_(data)(state, origIndices), // THCTensor_(data)(state, values), // THCTensor_(data)(state, newValues), // nnz, // newNnz, // stride // ); //////////////////////////////////////////////////////////// // unflatten indices if necessary LongTensor newIndices; if (sparse_dim == 1) { newIndices = indices1D; } else { newIndices = at::empty({sparse_dim, newNnz}, origIndices.options()); for (int64_t d = sparse_dim - 1; d >= 0; d--) { // NB: Not a select, so I can preserve the outer dimension LongTensor indicesSlice = newIndices.narrow(0, d, 1); // Note for the porting guide: THCTensor_(copy) does NOT do normal // broadcasting logic; instead, it will blast the elements from one // to the other so long as the numel is the same indicesSlice.copy_(indices1D); indices1D.div_(self.size(d)); indicesSlice.add_(indices1D, -self.size(d)); } } //////////////////////////////////////////////////////////// SparseTensor dst = ::at::native::sparse_coo_tensor(newIndices, newValues, self.sizes())._coalesced_(true); THCudaCheck(cudaGetLastError()); return dst; } }} // namespace at::native
f5835fcecda116cd463e28f7a6d0ea4d8529c26a.hip
// !!! This is a file automatically generated by hipify!!! //#include "processing.cuh" #include "utils.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <string> #include <iostream> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/sequence.h> #include "loadSaveImage.h" #include <stdio.h> // simple cross correlation kernel copied from Mike's IPython Notebook __global__ void naive_normalized_cross_correlation ( float* d_response, unsigned char* d_original, unsigned char* d_template, int num_pixels_y, int num_pixels_x, int template_half_height, int template_height, int template_half_width, int template_width, int template_size, float template_mean ) { int ny = num_pixels_y; int nx = num_pixels_x; int knx = template_width; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y); int image_index_1d = (nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { // // compute image mean // float image_sum = 0.0f; for (int y = -template_half_height; y <= template_half_height; y++) { for (int x = - template_half_width; x <= template_half_width; x++) { int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y); int2 image_offset_index_2d_clamped = make_int2( min ( nx -1, max( 0, image_offset_index_2d.x)), min( ny - 1, max( 0, image_offset_index_2d.y))); int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x; unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ]; image_sum += (float) image_offset_value; } } float image_mean = image_sum / (float)template_size; // // compute sums // float sum_of_image_template_diff_products = 0.0f; float sum_of_squared_image_diffs = 0.0f; float sum_of_squared_template_diffs = 0.0f; for (int y = -template_half_height; y <= template_half_height; y++) { for (int x = - template_half_width; x <= template_half_width; x++) { int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y); int2 image_offset_index_2d_clamped = make_int2( min( nx -1, max(0, image_offset_index_2d.x) ), min( ny - 1, max(0, image_offset_index_2d.y ) ) ); int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x; unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ]; float image_diff = (float) image_offset_value - image_mean; int2 template_index_2d = make_int2( x + template_half_width, y + template_half_height ); int template_index_1d = ( knx * template_index_2d.y ) + template_index_2d.x; unsigned char template_value = d_template[ template_index_1d ]; float template_diff = template_value - template_mean; float image_template_diff_product = image_offset_value * template_diff; float squared_image_diff = image_diff * image_diff; float squared_template_diff = template_diff * template_diff; sum_of_image_template_diff_products += image_template_diff_product; sum_of_squared_image_diffs += squared_image_diff; sum_of_squared_template_diffs += squared_template_diff; } } // // compute final result // float result_value = 0.0f; if ( sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0 ) { result_value = sum_of_image_template_diff_products / sqrt( sum_of_squared_image_diffs * sum_of_squared_template_diffs ); } d_response[ image_index_1d ] = result_value; } } __global__ void remove_redness_from_coordinates ( const unsigned int* d_coordinates, unsigned char* d_r, unsigned char* d_b, unsigned char* d_g, unsigned char* d_r_output, int num_coordinates, int num_pixels_y, int num_pixels_x, int template_half_height, int template_half_width ) { int ny = num_pixels_y; int nx = num_pixels_x; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; int imgSize = num_pixels_x * num_pixels_y; if ( global_index_1d < num_coordinates ) { unsigned int image_index_1d = d_coordinates[ imgSize - global_index_1d - 1]; ushort2 image_index_2d = make_ushort2( image_index_1d % num_pixels_x, image_index_1d / num_pixels_x); for (int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++) { for (int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++) { int2 image_offset_index_2d = make_int2( x, y); int2 image_offset_index_2d_clamped = make_int2( min(nx -1, max(0, image_offset_index_2d.x)), min( ny - 1, max( 0, image_offset_index_2d.y) ) ); int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x; unsigned char g_value = d_g[ image_offset_index_1d_clamped ]; unsigned char b_value = d_b[ image_offset_index_1d_clamped ]; unsigned int gb_average = ( g_value + b_value ) / 2; d_r_output[ image_offset_index_1d_clamped ] = (unsigned char) gb_average; } } } } // Note std::unary_function and thrust::unary_function define the types of input and output only // // Because C++11 language support makes the functionality of unary_function obsolete, // its use is optional if C++11 language features are enabled. struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> >{ __host__ __device__ thrust::tuple<unsigned char, unsigned char, unsigned char> operator() (uchar4 pixel) { return thrust::make_tuple(pixel.x, pixel.y, pixel.z); } }; struct combineChannels : thrust::unary_function<thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4>{ __host__ __device__ uchar4 operator() (thrust::tuple<unsigned char, unsigned char, unsigned char> t){ return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255); } }; struct combineResponses : thrust::unary_function<float, thrust::tuple<float, float, float> >{ __host__ __device__ float operator() (thrust::tuple<float, float, float> t){ return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t); } }; // we need to save the input so we can remove the redeye for the output static thrust::device_vector<unsigned char> d_red; static thrust::device_vector<unsigned char> d_blue; static thrust::device_vector<unsigned char> d_green; static size_t numRowsImg; static size_t numColsImg; static size_t templateHalfWidth; static size_t templateHalfHeight; //return types are void since any internal error will be handled by quitting //no point in returning error codes... /* * - read image and template image * - transform array of structures (AoS) into structure of arrays (SoA) * - normalize each color channel * - calculate correlation between image and template image * - */ void preProcess(unsigned int **inputVals, unsigned int **inputPos, unsigned int **outputVals, unsigned int **outputPos, size_t &numElem, const std::string& filename, const std::string& templateFilename) { //make sure the context initializes ok checkCudaErrors(hipFree(0)); uchar4 *inImg; uchar4 *eyeTemplate; size_t numRowsTemplate, numColsTemplate; loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg); loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate, &numColsTemplate); templateHalfWidth = (numColsTemplate - 1) / 2; templateHalfHeight = (numRowsTemplate - 1) / 2; // we need to split each image into its separate channels // use thrust library to demonstrate basic uses numElem = numRowsImg * numColsImg; size_t templateSize = numRowsTemplate * numColsTemplate; thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg); thrust::device_vector<uchar4> d_Template(eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate); d_red.resize(numElem); d_blue.resize(numElem); d_green.resize(numElem); thrust::device_vector<unsigned char> d_red_template(templateSize); thrust::device_vector<unsigned char> d_blue_template(templateSize); thrust::device_vector<unsigned char> d_green_template(templateSize); // split the image, and store results into (d_red, d_green, d_blue) // d_red[0], d_green[0], d_blue[0] = d_Img[0].x, d_Img[0].y, d_Img[0].z thrust::transform(d_Img.begin(), d_Img.end(), thrust::make_zip_iterator( thrust::make_tuple(d_red.begin(), d_blue.begin(), d_green.begin())), splitChannels()); // split the template thrust::transform(d_Template.begin(), d_Template.end(), thrust::make_zip_iterator(thrust::make_tuple(d_red_template.begin(), d_blue_template.begin(), d_green_template.begin())), splitChannels()); thrust::device_vector<float> d_red_response(numElem); thrust::device_vector<float> d_blue_response(numElem); thrust::device_vector<float> d_green_response(numElem); // need to compute the mean for each template channel unsigned int r_sum = thrust::reduce(d_red_template.begin(), d_red_template.end(), 0); unsigned int b_sum = thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0); unsigned int g_sum = thrust::reduce(d_green_template.begin(), d_green_template.end(), 0); float r_mean = (double)r_sum / templateSize; float b_mean = (double)b_sum / templateSize; float g_mean = (double)g_sum / templateSize; const dim3 blockSize(32, 8, 1); const dim3 gridSize( (numColsImg + blockSize.x - 1) / blockSize.x, (numRowsImg + blockSize.y - 1) / blockSize.y, 1); // now compute the cross-correlations for each channel hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(d_red_response.data()), thrust::raw_pointer_cast(d_red.data()), thrust::raw_pointer_cast(d_red_template.data()), numRowsImg, numColsImg, templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate, numRowsTemplate * numColsTemplate, r_mean); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(d_blue_response.data()), thrust::raw_pointer_cast(d_blue.data()), thrust::raw_pointer_cast(d_blue_template.data()), numRowsImg, numColsImg, templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate, numRowsTemplate * numColsTemplate, b_mean); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( naive_normalized_cross_correlation), dim3(gridSize), dim3(blockSize), 0, 0, thrust::raw_pointer_cast(d_green_response.data()), thrust::raw_pointer_cast(d_green.data()), thrust::raw_pointer_cast(d_green_template.data()), numRowsImg, numColsImg, templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate, numRowsTemplate * numColsTemplate, g_mean); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // generate combined response - multiply all channels together thrust::device_vector<float> d_combined_response(numElem); // combine data: d_combined_response = (d_red, d_blue, d_green) thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( d_red_response.begin(), d_blue_response.begin(), d_green_response.begin())), thrust::make_zip_iterator(thrust::make_tuple( d_red_response.end(), d_blue_response.end(), d_green_response.end())), d_combined_response.begin(), combineResponses()); // find max/min of response typedef thrust::device_vector<float>::iterator floatIt; thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(d_combined_response.begin(), d_combined_response.end()); float bias = *minmax.first; // we need to make all the numbers positive so that the students can sort them without any bit twiddling thrust::transform(d_combined_response.begin(), d_combined_response.end(), thrust::make_constant_iterator(-bias), d_combined_response.begin(), thrust::plus<float>()); // now we need to create the 1-D coordinates that will be attached to the keys // allocate device mem with value = [0..numElem] thrust::device_vector<unsigned int> coords(numElem); thrust::sequence(coords.begin(), coords.end()); // coords=[0..numElem -1] // allocate memory for output and copy since our device vectors will go out of scope //and be deleted checkCudaErrors(hipMalloc(inputVals, sizeof(unsigned int) * numElem)); checkCudaErrors(hipMalloc(inputPos, sizeof(unsigned int) * numElem)); checkCudaErrors(hipMalloc(outputVals, sizeof(unsigned int) * numElem)); checkCudaErrors(hipMalloc(outputPos, sizeof(unsigned int) * numElem)); hipMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()), sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice); hipMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()), sizeof(unsigned int) * numElem, hipMemcpyDeviceToDevice); checkCudaErrors(hipMemset(*outputVals, 0, sizeof(unsigned int) * numElem)); checkCudaErrors(hipMemset(*outputPos, 0, sizeof(unsigned int) * numElem)); } void postProcess(const unsigned int* const outputVals, const unsigned int* const outputPos, const size_t numElems, const std::string& output_file){ thrust::device_vector<unsigned char> d_output_red = d_red; const dim3 blockSize(256, 1, 1); const dim3 gridSize( (40 + blockSize.x - 1) / blockSize.x, 1, 1); hipLaunchKernelGGL(( remove_redness_from_coordinates), dim3(gridSize), dim3(blockSize), 0, 0, outputPos, thrust::raw_pointer_cast(d_red.data()), thrust::raw_pointer_cast(d_blue.data()), thrust::raw_pointer_cast(d_green.data()), thrust::raw_pointer_cast(d_output_red.data()), 40, numRowsImg, numColsImg, 9, 9); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // combine the new red channel with original blue and green for output thrust::device_vector<uchar4> d_outputImg(numElems); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( d_output_red.begin(), d_blue.begin(), d_green.begin())), thrust::make_zip_iterator(thrust::make_tuple( d_output_red.end(), d_blue.end(), d_green.end())), d_outputImg.begin(), combineChannels()); thrust::host_vector<uchar4> h_Img = d_outputImg; saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file); /* clear the global vectors otherwise something goes wrong trying to free them * Note: std::vector.clear() will remove all elements in the vector * std::vector.shrink_to_fit() will delete pre-allocated memory */ d_red.clear(); d_red.shrink_to_fit(); d_blue.clear(); d_blue.shrink_to_fit(); d_green.clear(); d_green.shrink_to_fit(); }
f5835fcecda116cd463e28f7a6d0ea4d8529c26a.cu
//#include "processing.cuh" #include "utils.h" #include <cuda.h> #include <cuda_runtime.h> #include <string> #include <iostream> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/sequence.h> #include "loadSaveImage.h" #include <stdio.h> // simple cross correlation kernel copied from Mike's IPython Notebook __global__ void naive_normalized_cross_correlation ( float* d_response, unsigned char* d_original, unsigned char* d_template, int num_pixels_y, int num_pixels_x, int template_half_height, int template_height, int template_half_width, int template_width, int template_size, float template_mean ) { int ny = num_pixels_y; int nx = num_pixels_x; int knx = template_width; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y); int image_index_1d = (nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { // // compute image mean // float image_sum = 0.0f; for (int y = -template_half_height; y <= template_half_height; y++) { for (int x = - template_half_width; x <= template_half_width; x++) { int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y); int2 image_offset_index_2d_clamped = make_int2( min ( nx -1, max( 0, image_offset_index_2d.x)), min( ny - 1, max( 0, image_offset_index_2d.y))); int image_offset_index_1d_clamped = ( nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x; unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ]; image_sum += (float) image_offset_value; } } float image_mean = image_sum / (float)template_size; // // compute sums // float sum_of_image_template_diff_products = 0.0f; float sum_of_squared_image_diffs = 0.0f; float sum_of_squared_template_diffs = 0.0f; for (int y = -template_half_height; y <= template_half_height; y++) { for (int x = - template_half_width; x <= template_half_width; x++) { int2 image_offset_index_2d = make_int2( image_index_2d.x + x, image_index_2d.y + y); int2 image_offset_index_2d_clamped = make_int2( min( nx -1, max(0, image_offset_index_2d.x) ), min( ny - 1, max(0, image_offset_index_2d.y ) ) ); int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x; unsigned char image_offset_value = d_original[ image_offset_index_1d_clamped ]; float image_diff = (float) image_offset_value - image_mean; int2 template_index_2d = make_int2( x + template_half_width, y + template_half_height ); int template_index_1d = ( knx * template_index_2d.y ) + template_index_2d.x; unsigned char template_value = d_template[ template_index_1d ]; float template_diff = template_value - template_mean; float image_template_diff_product = image_offset_value * template_diff; float squared_image_diff = image_diff * image_diff; float squared_template_diff = template_diff * template_diff; sum_of_image_template_diff_products += image_template_diff_product; sum_of_squared_image_diffs += squared_image_diff; sum_of_squared_template_diffs += squared_template_diff; } } // // compute final result // float result_value = 0.0f; if ( sum_of_squared_image_diffs != 0 && sum_of_squared_template_diffs != 0 ) { result_value = sum_of_image_template_diff_products / sqrt( sum_of_squared_image_diffs * sum_of_squared_template_diffs ); } d_response[ image_index_1d ] = result_value; } } __global__ void remove_redness_from_coordinates ( const unsigned int* d_coordinates, unsigned char* d_r, unsigned char* d_b, unsigned char* d_g, unsigned char* d_r_output, int num_coordinates, int num_pixels_y, int num_pixels_x, int template_half_height, int template_half_width ) { int ny = num_pixels_y; int nx = num_pixels_x; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; int imgSize = num_pixels_x * num_pixels_y; if ( global_index_1d < num_coordinates ) { unsigned int image_index_1d = d_coordinates[ imgSize - global_index_1d - 1]; ushort2 image_index_2d = make_ushort2( image_index_1d % num_pixels_x, image_index_1d / num_pixels_x); for (int y = image_index_2d.y - template_half_height; y <= image_index_2d.y + template_half_height; y++) { for (int x = image_index_2d.x - template_half_width; x <= image_index_2d.x + template_half_width; x++) { int2 image_offset_index_2d = make_int2( x, y); int2 image_offset_index_2d_clamped = make_int2( min(nx -1, max(0, image_offset_index_2d.x)), min( ny - 1, max( 0, image_offset_index_2d.y) ) ); int image_offset_index_1d_clamped = (nx * image_offset_index_2d_clamped.y ) + image_offset_index_2d_clamped.x; unsigned char g_value = d_g[ image_offset_index_1d_clamped ]; unsigned char b_value = d_b[ image_offset_index_1d_clamped ]; unsigned int gb_average = ( g_value + b_value ) / 2; d_r_output[ image_offset_index_1d_clamped ] = (unsigned char) gb_average; } } } } // Note std::unary_function and thrust::unary_function define the types of input and output only // // Because C++11 language support makes the functionality of unary_function obsolete, // its use is optional if C++11 language features are enabled. struct splitChannels : thrust::unary_function<uchar4, thrust::tuple<unsigned char, unsigned char, unsigned char> >{ __host__ __device__ thrust::tuple<unsigned char, unsigned char, unsigned char> operator() (uchar4 pixel) { return thrust::make_tuple(pixel.x, pixel.y, pixel.z); } }; struct combineChannels : thrust::unary_function<thrust::tuple<unsigned char, unsigned char, unsigned char>, uchar4>{ __host__ __device__ uchar4 operator() (thrust::tuple<unsigned char, unsigned char, unsigned char> t){ return make_uchar4(thrust::get<0>(t), thrust::get<1>(t), thrust::get<2>(t), 255); } }; struct combineResponses : thrust::unary_function<float, thrust::tuple<float, float, float> >{ __host__ __device__ float operator() (thrust::tuple<float, float, float> t){ return thrust::get<0>(t) * thrust::get<1>(t) * thrust::get<2>(t); } }; // we need to save the input so we can remove the redeye for the output static thrust::device_vector<unsigned char> d_red; static thrust::device_vector<unsigned char> d_blue; static thrust::device_vector<unsigned char> d_green; static size_t numRowsImg; static size_t numColsImg; static size_t templateHalfWidth; static size_t templateHalfHeight; //return types are void since any internal error will be handled by quitting //no point in returning error codes... /* * - read image and template image * - transform array of structures (AoS) into structure of arrays (SoA) * - normalize each color channel * - calculate correlation between image and template image * - */ void preProcess(unsigned int **inputVals, unsigned int **inputPos, unsigned int **outputVals, unsigned int **outputPos, size_t &numElem, const std::string& filename, const std::string& templateFilename) { //make sure the context initializes ok checkCudaErrors(cudaFree(0)); uchar4 *inImg; uchar4 *eyeTemplate; size_t numRowsTemplate, numColsTemplate; loadImageRGBA(filename, &inImg, &numRowsImg, &numColsImg); loadImageRGBA(templateFilename, &eyeTemplate, &numRowsTemplate, &numColsTemplate); templateHalfWidth = (numColsTemplate - 1) / 2; templateHalfHeight = (numRowsTemplate - 1) / 2; // we need to split each image into its separate channels // use thrust library to demonstrate basic uses numElem = numRowsImg * numColsImg; size_t templateSize = numRowsTemplate * numColsTemplate; thrust::device_vector<uchar4> d_Img(inImg, inImg + numRowsImg * numColsImg); thrust::device_vector<uchar4> d_Template(eyeTemplate, eyeTemplate + numRowsTemplate * numColsTemplate); d_red.resize(numElem); d_blue.resize(numElem); d_green.resize(numElem); thrust::device_vector<unsigned char> d_red_template(templateSize); thrust::device_vector<unsigned char> d_blue_template(templateSize); thrust::device_vector<unsigned char> d_green_template(templateSize); // split the image, and store results into (d_red, d_green, d_blue) // d_red[0], d_green[0], d_blue[0] = d_Img[0].x, d_Img[0].y, d_Img[0].z thrust::transform(d_Img.begin(), d_Img.end(), thrust::make_zip_iterator( thrust::make_tuple(d_red.begin(), d_blue.begin(), d_green.begin())), splitChannels()); // split the template thrust::transform(d_Template.begin(), d_Template.end(), thrust::make_zip_iterator(thrust::make_tuple(d_red_template.begin(), d_blue_template.begin(), d_green_template.begin())), splitChannels()); thrust::device_vector<float> d_red_response(numElem); thrust::device_vector<float> d_blue_response(numElem); thrust::device_vector<float> d_green_response(numElem); // need to compute the mean for each template channel unsigned int r_sum = thrust::reduce(d_red_template.begin(), d_red_template.end(), 0); unsigned int b_sum = thrust::reduce(d_blue_template.begin(), d_blue_template.end(), 0); unsigned int g_sum = thrust::reduce(d_green_template.begin(), d_green_template.end(), 0); float r_mean = (double)r_sum / templateSize; float b_mean = (double)b_sum / templateSize; float g_mean = (double)g_sum / templateSize; const dim3 blockSize(32, 8, 1); const dim3 gridSize( (numColsImg + blockSize.x - 1) / blockSize.x, (numRowsImg + blockSize.y - 1) / blockSize.y, 1); // now compute the cross-correlations for each channel naive_normalized_cross_correlation<<<gridSize, blockSize>>>(thrust::raw_pointer_cast(d_red_response.data()), thrust::raw_pointer_cast(d_red.data()), thrust::raw_pointer_cast(d_red_template.data()), numRowsImg, numColsImg, templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate, numRowsTemplate * numColsTemplate, r_mean); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); naive_normalized_cross_correlation<<<gridSize, blockSize>>>(thrust::raw_pointer_cast(d_blue_response.data()), thrust::raw_pointer_cast(d_blue.data()), thrust::raw_pointer_cast(d_blue_template.data()), numRowsImg, numColsImg, templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate, numRowsTemplate * numColsTemplate, b_mean); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); naive_normalized_cross_correlation<<<gridSize, blockSize>>>(thrust::raw_pointer_cast(d_green_response.data()), thrust::raw_pointer_cast(d_green.data()), thrust::raw_pointer_cast(d_green_template.data()), numRowsImg, numColsImg, templateHalfHeight, numRowsTemplate, templateHalfWidth, numColsTemplate, numRowsTemplate * numColsTemplate, g_mean); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // generate combined response - multiply all channels together thrust::device_vector<float> d_combined_response(numElem); // combine data: d_combined_response = (d_red, d_blue, d_green) thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( d_red_response.begin(), d_blue_response.begin(), d_green_response.begin())), thrust::make_zip_iterator(thrust::make_tuple( d_red_response.end(), d_blue_response.end(), d_green_response.end())), d_combined_response.begin(), combineResponses()); // find max/min of response typedef thrust::device_vector<float>::iterator floatIt; thrust::pair<floatIt, floatIt> minmax = thrust::minmax_element(d_combined_response.begin(), d_combined_response.end()); float bias = *minmax.first; // we need to make all the numbers positive so that the students can sort them without any bit twiddling thrust::transform(d_combined_response.begin(), d_combined_response.end(), thrust::make_constant_iterator(-bias), d_combined_response.begin(), thrust::plus<float>()); // now we need to create the 1-D coordinates that will be attached to the keys // allocate device mem with value = [0..numElem] thrust::device_vector<unsigned int> coords(numElem); thrust::sequence(coords.begin(), coords.end()); // coords=[0..numElem -1] // allocate memory for output and copy since our device vectors will go out of scope //and be deleted checkCudaErrors(cudaMalloc(inputVals, sizeof(unsigned int) * numElem)); checkCudaErrors(cudaMalloc(inputPos, sizeof(unsigned int) * numElem)); checkCudaErrors(cudaMalloc(outputVals, sizeof(unsigned int) * numElem)); checkCudaErrors(cudaMalloc(outputPos, sizeof(unsigned int) * numElem)); cudaMemcpy(*inputVals, thrust::raw_pointer_cast(d_combined_response.data()), sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice); cudaMemcpy(*inputPos, thrust::raw_pointer_cast(coords.data()), sizeof(unsigned int) * numElem, cudaMemcpyDeviceToDevice); checkCudaErrors(cudaMemset(*outputVals, 0, sizeof(unsigned int) * numElem)); checkCudaErrors(cudaMemset(*outputPos, 0, sizeof(unsigned int) * numElem)); } void postProcess(const unsigned int* const outputVals, const unsigned int* const outputPos, const size_t numElems, const std::string& output_file){ thrust::device_vector<unsigned char> d_output_red = d_red; const dim3 blockSize(256, 1, 1); const dim3 gridSize( (40 + blockSize.x - 1) / blockSize.x, 1, 1); remove_redness_from_coordinates<<<gridSize, blockSize>>>(outputPos, thrust::raw_pointer_cast(d_red.data()), thrust::raw_pointer_cast(d_blue.data()), thrust::raw_pointer_cast(d_green.data()), thrust::raw_pointer_cast(d_output_red.data()), 40, numRowsImg, numColsImg, 9, 9); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // combine the new red channel with original blue and green for output thrust::device_vector<uchar4> d_outputImg(numElems); thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( d_output_red.begin(), d_blue.begin(), d_green.begin())), thrust::make_zip_iterator(thrust::make_tuple( d_output_red.end(), d_blue.end(), d_green.end())), d_outputImg.begin(), combineChannels()); thrust::host_vector<uchar4> h_Img = d_outputImg; saveImageRGBA(&h_Img[0], numRowsImg, numColsImg, output_file); /* clear the global vectors otherwise something goes wrong trying to free them * Note: std::vector.clear() will remove all elements in the vector * std::vector.shrink_to_fit() will delete pre-allocated memory */ d_red.clear(); d_red.shrink_to_fit(); d_blue.clear(); d_blue.shrink_to_fit(); d_green.clear(); d_green.shrink_to_fit(); }
531b223b712cf932b5bbfc78a54345db5685af1b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "stdio.h" #include <stdlib.h> #include <sys/time.h> void sumMatrixOnHost(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { float *ia = MatA; float *ib = MatB; float *ic = MatC; float *end = ic + nx * ny; while (ic != end) { *ic = *ia + *ib; ++ia; ++ib; ++ic; } } __global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) { MatC[idx] = MatA[idx] + MatB[idx]; } } __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx) { unsigned end = nx * ny; unsigned idx = ix; while (idx < end) { MatC[idx] = MatA[idx] + MatB[idx]; idx += nx; } } } __global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) { MatC[idx] = MatA[idx] + MatB[idx]; } } double cpuSecond(); char are_equal(float *a, float *b, unsigned n); int main() { /* CUDA task2 block/thread speed comparison */ const unsigned nx = 1 << 14; const unsigned ny = 1 << 14; const unsigned N = nx * ny; float *MatA = (float *)malloc(N * sizeof(float)); float *MatB = (float *)malloc(N * sizeof(float)); float *MatC = (float *)malloc(N * sizeof(float)); float *Res = (float *)malloc(N * sizeof(float)); sumMatrixOnHost(MatA, MatB, Res, nx, ny); dim3 block; dim3 grid; double time; float *dev_MatA, *dev_MatB, *dev_MatC; hipMalloc(&dev_MatA, N * sizeof(float)); hipMalloc(&dev_MatB, N * sizeof(float)); hipMalloc(&dev_MatC, N * sizeof(float)); hipMemcpy(dev_MatA, MatA, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_MatB, MatB, N * sizeof(float), hipMemcpyHostToDevice); // 2D grid - 2D block block = {32, 16}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); hipLaunchKernelGGL(( sumMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, dev_MatA, dev_MatB, dev_MatC, nx, ny); hipDeviceSynchronize(); time = cpuSecond() - time; for (unsigned i = 0; i < N; ++i) { MatC[i] = -1; } hipMemcpy(MatC, dev_MatC, N * sizeof(float), hipMemcpyDeviceToHost); printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y, time, are_equal(MatC, Res, N) ? "correct":"BAD"); // 1D grid - 1D block block = {128, 1}; grid = {(nx + block.x - 1) / block.x, 1}; time = cpuSecond(); hipLaunchKernelGGL(( sumMatrixOnGPU1D), dim3(grid), dim3(block), 0, 0, dev_MatA, dev_MatB, dev_MatC, nx, ny); hipDeviceSynchronize(); time = cpuSecond() - time; for (unsigned i = 0; i < N; ++i) { MatC[i] = -1; } hipMemcpy(MatC, dev_MatC, N * sizeof(float), hipMemcpyDeviceToHost); printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y, time, are_equal(MatC, Res, N) ? "correct":"BAD"); // 2D grid - 1D block block = {256, 1}; grid = {(nx + block.x - 1) / block.x, ny}; time = cpuSecond(); hipLaunchKernelGGL(( sumMatrixOnGPUMix), dim3(grid), dim3(block), 0, 0, dev_MatA, dev_MatB, dev_MatC, nx, ny); hipDeviceSynchronize(); time = cpuSecond() - time; for (unsigned i = 0; i < N; ++i) { MatC[i] = -1; } hipMemcpy(MatC, dev_MatC, N * sizeof(float), hipMemcpyDeviceToHost); printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y, time, are_equal(MatC, Res, N) ? "correct":"BAD"); // custom configurations block = {1, 1}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); hipLaunchKernelGGL(( sumMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, dev_MatA, dev_MatB, dev_MatC, nx, ny); hipDeviceSynchronize(); time = cpuSecond() - time; printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time); block = {128, 128}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); hipLaunchKernelGGL(( sumMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, dev_MatA, dev_MatB, dev_MatC, nx, ny); hipDeviceSynchronize(); time = cpuSecond() - time; printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time); block = {nx, ny}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); hipLaunchKernelGGL(( sumMatrixOnGPU2D), dim3(grid), dim3(block), 0, 0, dev_MatA, dev_MatB, dev_MatC, nx, ny); hipDeviceSynchronize(); time = cpuSecond() - time; printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time); return 0; } double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } char are_equal(float *a, float *b, unsigned n) { for (unsigned i = 0; i < n; ++i) { if (a[i] != b[i]) { return 0; } } return 1; }
531b223b712cf932b5bbfc78a54345db5685af1b.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "stdio.h" #include <stdlib.h> #include <sys/time.h> void sumMatrixOnHost(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { float *ia = MatA; float *ib = MatB; float *ic = MatC; float *end = ic + nx * ny; while (ic != end) { *ic = *ia + *ib; ++ia; ++ib; ++ic; } } __global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) { MatC[idx] = MatA[idx] + MatB[idx]; } } __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx) { unsigned end = nx * ny; unsigned idx = ix; while (idx < end) { MatC[idx] = MatA[idx] + MatB[idx]; idx += nx; } } } __global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, const unsigned nx, const unsigned ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny) { MatC[idx] = MatA[idx] + MatB[idx]; } } double cpuSecond(); char are_equal(float *a, float *b, unsigned n); int main() { /* CUDA task2 block/thread speed comparison */ const unsigned nx = 1 << 14; const unsigned ny = 1 << 14; const unsigned N = nx * ny; float *MatA = (float *)malloc(N * sizeof(float)); float *MatB = (float *)malloc(N * sizeof(float)); float *MatC = (float *)malloc(N * sizeof(float)); float *Res = (float *)malloc(N * sizeof(float)); sumMatrixOnHost(MatA, MatB, Res, nx, ny); dim3 block; dim3 grid; double time; float *dev_MatA, *dev_MatB, *dev_MatC; cudaMalloc(&dev_MatA, N * sizeof(float)); cudaMalloc(&dev_MatB, N * sizeof(float)); cudaMalloc(&dev_MatC, N * sizeof(float)); cudaMemcpy(dev_MatA, MatA, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_MatB, MatB, N * sizeof(float), cudaMemcpyHostToDevice); // 2D grid - 2D block block = {32, 16}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny); cudaDeviceSynchronize(); time = cpuSecond() - time; for (unsigned i = 0; i < N; ++i) { MatC[i] = -1; } cudaMemcpy(MatC, dev_MatC, N * sizeof(float), cudaMemcpyDeviceToHost); printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y, time, are_equal(MatC, Res, N) ? "correct":"BAD"); // 1D grid - 1D block block = {128, 1}; grid = {(nx + block.x - 1) / block.x, 1}; time = cpuSecond(); sumMatrixOnGPU1D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny); cudaDeviceSynchronize(); time = cpuSecond() - time; for (unsigned i = 0; i < N; ++i) { MatC[i] = -1; } cudaMemcpy(MatC, dev_MatC, N * sizeof(float), cudaMemcpyDeviceToHost); printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y, time, are_equal(MatC, Res, N) ? "correct":"BAD"); // 2D grid - 1D block block = {256, 1}; grid = {(nx + block.x - 1) / block.x, ny}; time = cpuSecond(); sumMatrixOnGPUMix<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny); cudaDeviceSynchronize(); time = cpuSecond() - time; for (unsigned i = 0; i < N; ++i) { MatC[i] = -1; } cudaMemcpy(MatC, dev_MatC, N * sizeof(float), cudaMemcpyDeviceToHost); printf("(%d, %d) (%d %d): %f s - %s\n", grid.x, grid.y, block.x, block.y, time, are_equal(MatC, Res, N) ? "correct":"BAD"); // custom configurations block = {1, 1}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny); cudaDeviceSynchronize(); time = cpuSecond() - time; printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time); block = {128, 128}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny); cudaDeviceSynchronize(); time = cpuSecond() - time; printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time); block = {nx, ny}; grid = {(nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y}; time = cpuSecond(); sumMatrixOnGPU2D<<<grid, block>>>(dev_MatA, dev_MatB, dev_MatC, nx, ny); cudaDeviceSynchronize(); time = cpuSecond() - time; printf("(%d, %d) (%d %d): %f s\n", grid.x, grid.y, block.x, block.y, time); return 0; } double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6); } char are_equal(float *a, float *b, unsigned n) { for (unsigned i = 0; i < n; ++i) { if (a[i] != b[i]) { return 0; } } return 1; }
5985f3a63c21f434154ff16bed6221d935ef53bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<cuda_runtime.h> #include "../common-lib/num_defs.h" #include "../common-lib/rt_engine_errors.h" #include "rt_cuda_engine_common.h" #include <cstdlib> #include "iostream" /******************************************** * * * nVIDIA CUDA font-end realization * * for TabulatedFunction class functions * * * ********************************************/ __global__ static void spline_init_kernel(size_t N, real_t *dev_x, real_t *dev_y, real_t *dev_dy2, real_t *dev_u) { size_t idx = threadIdx.x + blockIdx.x*blockDim.x; real_t sig, p; idx += 1; while ( idx < (N-1) ) { // skip the first and last element computations sig=(dev_x[idx]-dev_x[idx-1])/(dev_x[idx+1]-dev_x[idx-1]); p=sig*dev_dy2[idx-1]+2.0; dev_dy2[idx]=(sig-1.0)/p; dev_u[idx]=(dev_y[idx+1]-dev_y[idx])/(dev_x[idx+1]-dev_x[idx]) - (dev_y[idx]-dev_y[idx-1])/(dev_x[idx]-dev_x[idx-1]); dev_u[idx]=(6.0*dev_u[idx]/(dev_x[idx+1]-dev_x[idx-1])-sig*dev_u[idx-1])/p; idx += blockDim.x*gridDim.x; } } // callable host function // The function computes the second derivatives of the interpolating function at the tabulated x-points // The algorithm is adopted from Numerical Recipts in C __host__ RT_engine_error spline_init(size_t N, real_t *x, real_t *y, real_t *dy2) { hipError_t cuda_err; RT_engine_error ret_err; int N_cuda_blocks, N_cuda_threads; size_t dev_N, N_chunks, rest_N, start_elem; real_t *u, *dev_x, *dev_y, *dev_dy2, *dev_u; u = (real_t*) malloc(N*sizeof(real_t)); if ( u == NULL ) { return ENGINE_ERROR_BAD_ALLOC; } // cuda_err = cuda_malloc_vectors(N,&dev_N,XYcXcY_vect_flags,&dev_x,&dev_y,&dev_dy2,&dev_u); // allocate memmory for 4 vectors cuda_err = cudaMallocChunk(N,&dev_N,4,false,&dev_x,&dev_y,&dev_dy2,&dev_u); if ( cuda_err != hipSuccess ) { free(u); return ENGINE_ERROR_BAD_ALLOC; } cuda_err = cuda_kernel_props(dev_N-2,&N_cuda_blocks,&N_cuda_threads); // "dev_N-2" because of the first and last elements is not computed!!! if ( cuda_err != hipSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } N_chunks = N/dev_N; ret_err = ENGINE_ERROR_OK; // natural spline dy2[0] = 0.0; dy2[N-1] = 0.0; u[0] = 0.0; start_elem = 0; for ( size_t i = 0; i < N_chunks; ++i) { // cuda_err = cuda_copy_mem(dev_N,start_elem,hipMemcpyHostToDevice,XYcXcY_vect_flags,x,dev_x,y,dev_y,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(dev_N,hipMemcpyHostToDevice,4,false,dev_x,x+start_elem,dev_y,y+start_elem,dev_dy2,dy2+start_elem,dev_u,u+start_elem); if ( cuda_err != hipSuccess ) { break; } hipLaunchKernelGGL(( spline_init_kernel), dim3(N_cuda_blocks),dim3(N_cuda_threads), 0, 0, dev_N,dev_x,dev_y,dev_dy2,dev_u); // cuda_err = cuda_copy_mem(dev_N,start_elem,hipMemcpyDeviceToHost,X_vect_flag | Y_vect_flag,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(dev_N,hipMemcpyDeviceToHost,4,false,dy2+start_elem,dev_dy2,u+start_elem,dev_u); if ( cuda_err != hipSuccess ) { break; } start_elem += dev_N; } // process rest of rays rest_N = N % dev_N; if ( rest_N && (ret_err == ENGINE_ERROR_OK) ) { cuda_err = cuda_kernel_props(rest_N-2,&N_cuda_blocks,&N_cuda_threads); // "rest_N-2" because of the first and last elements is not computed!!! if ( cuda_err != hipSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } // cuda_err = cuda_copy_mem(rest_N,start_elem,hipMemcpyHostToDevice,XYcXcY_vect_flags,x,dev_x,y,dev_y,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(rest_N,hipMemcpyHostToDevice,4,false,dev_x,x+start_elem,dev_y,y+start_elem,dev_dy2,dy2+start_elem,dev_u,u+start_elem); if ( cuda_err != hipSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } hipLaunchKernelGGL(( spline_init_kernel), dim3(N_cuda_blocks),dim3(N_cuda_threads), 0, 0, rest_N,dev_x,dev_y,dev_dy2,dev_u); // cuda_err = cuda_copy_mem(rest_N,start_elem,hipMemcpyDeviceToHost,X_vect_flag | Y_vect_flag,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(rest_N,hipMemcpyDeviceToHost,4,false,dy2+start_elem,dev_dy2,u+start_elem,dev_u); if ( cuda_err != hipSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } } // final backsubstitution loop for (size_t k = N-2; k > 0; --k) dy2[k] = dy2[k]*dy2[k+1] + u[k]; free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ret_err; } // callable host function // The function computes interpolated value of the tabulated function at point xx (dy2 is from previous call of spline_init function) //__host__ //RT_engine_error spline_inter(size_t N, real_t *x, real_t *y, real_t dy2, real_t xx, real_t *yy) //{ //}
5985f3a63c21f434154ff16bed6221d935ef53bb.cu
#include<cuda.h> #include<cuda_runtime.h> #include "../common-lib/num_defs.h" #include "../common-lib/rt_engine_errors.h" #include "rt_cuda_engine_common.h" #include <cstdlib> #include "iostream" /******************************************** * * * nVIDIA CUDA font-end realization * * for TabulatedFunction class functions * * * ********************************************/ __global__ static void spline_init_kernel(size_t N, real_t *dev_x, real_t *dev_y, real_t *dev_dy2, real_t *dev_u) { size_t idx = threadIdx.x + blockIdx.x*blockDim.x; real_t sig, p; idx += 1; while ( idx < (N-1) ) { // skip the first and last element computations sig=(dev_x[idx]-dev_x[idx-1])/(dev_x[idx+1]-dev_x[idx-1]); p=sig*dev_dy2[idx-1]+2.0; dev_dy2[idx]=(sig-1.0)/p; dev_u[idx]=(dev_y[idx+1]-dev_y[idx])/(dev_x[idx+1]-dev_x[idx]) - (dev_y[idx]-dev_y[idx-1])/(dev_x[idx]-dev_x[idx-1]); dev_u[idx]=(6.0*dev_u[idx]/(dev_x[idx+1]-dev_x[idx-1])-sig*dev_u[idx-1])/p; idx += blockDim.x*gridDim.x; } } // callable host function // The function computes the second derivatives of the interpolating function at the tabulated x-points // The algorithm is adopted from Numerical Recipts in C __host__ RT_engine_error spline_init(size_t N, real_t *x, real_t *y, real_t *dy2) { cudaError_t cuda_err; RT_engine_error ret_err; int N_cuda_blocks, N_cuda_threads; size_t dev_N, N_chunks, rest_N, start_elem; real_t *u, *dev_x, *dev_y, *dev_dy2, *dev_u; u = (real_t*) malloc(N*sizeof(real_t)); if ( u == NULL ) { return ENGINE_ERROR_BAD_ALLOC; } // cuda_err = cuda_malloc_vectors(N,&dev_N,XYcXcY_vect_flags,&dev_x,&dev_y,&dev_dy2,&dev_u); // allocate memmory for 4 vectors cuda_err = cudaMallocChunk(N,&dev_N,4,false,&dev_x,&dev_y,&dev_dy2,&dev_u); if ( cuda_err != cudaSuccess ) { free(u); return ENGINE_ERROR_BAD_ALLOC; } cuda_err = cuda_kernel_props(dev_N-2,&N_cuda_blocks,&N_cuda_threads); // "dev_N-2" because of the first and last elements is not computed!!! if ( cuda_err != cudaSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } N_chunks = N/dev_N; ret_err = ENGINE_ERROR_OK; // natural spline dy2[0] = 0.0; dy2[N-1] = 0.0; u[0] = 0.0; start_elem = 0; for ( size_t i = 0; i < N_chunks; ++i) { // cuda_err = cuda_copy_mem(dev_N,start_elem,cudaMemcpyHostToDevice,XYcXcY_vect_flags,x,dev_x,y,dev_y,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(dev_N,cudaMemcpyHostToDevice,4,false,dev_x,x+start_elem,dev_y,y+start_elem,dev_dy2,dy2+start_elem,dev_u,u+start_elem); if ( cuda_err != cudaSuccess ) { break; } spline_init_kernel<<<N_cuda_blocks,N_cuda_threads>>>(dev_N,dev_x,dev_y,dev_dy2,dev_u); // cuda_err = cuda_copy_mem(dev_N,start_elem,cudaMemcpyDeviceToHost,X_vect_flag | Y_vect_flag,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(dev_N,cudaMemcpyDeviceToHost,4,false,dy2+start_elem,dev_dy2,u+start_elem,dev_u); if ( cuda_err != cudaSuccess ) { break; } start_elem += dev_N; } // process rest of rays rest_N = N % dev_N; if ( rest_N && (ret_err == ENGINE_ERROR_OK) ) { cuda_err = cuda_kernel_props(rest_N-2,&N_cuda_blocks,&N_cuda_threads); // "rest_N-2" because of the first and last elements is not computed!!! if ( cuda_err != cudaSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } // cuda_err = cuda_copy_mem(rest_N,start_elem,cudaMemcpyHostToDevice,XYcXcY_vect_flags,x,dev_x,y,dev_y,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(rest_N,cudaMemcpyHostToDevice,4,false,dev_x,x+start_elem,dev_y,y+start_elem,dev_dy2,dy2+start_elem,dev_u,u+start_elem); if ( cuda_err != cudaSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } spline_init_kernel<<<N_cuda_blocks,N_cuda_threads>>>(rest_N,dev_x,dev_y,dev_dy2,dev_u); // cuda_err = cuda_copy_mem(rest_N,start_elem,cudaMemcpyDeviceToHost,X_vect_flag | Y_vect_flag,dy2,dev_dy2,u,dev_u); cuda_err = cudaMemcpyChunk(rest_N,cudaMemcpyDeviceToHost,4,false,dy2+start_elem,dev_dy2,u+start_elem,dev_u); if ( cuda_err != cudaSuccess ) { free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ENGINE_ERROR_FAILED; } } // final backsubstitution loop for (size_t k = N-2; k > 0; --k) dy2[k] = dy2[k]*dy2[k+1] + u[k]; free(u); // cuda_free_mem(XYcXcY_vect_flags,dev_x,dev_y,dev_dy2,dev_u); cudaFreeChunk(4,dev_x,dev_y,dev_dy2,dev_u); return ret_err; } // callable host function // The function computes interpolated value of the tabulated function at point xx (dy2 is from previous call of spline_init function) //__host__ //RT_engine_error spline_inter(size_t N, real_t *x, real_t *y, real_t dy2, real_t xx, real_t *yy) //{ //}
d5f8914375296d1e61ef93ef219a3fd63577bcc8.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <cuPrintf.cu> #include <shrUtils.h> #include "cutil_inline.h" #define CUPRINTF cuPrintf // ok __device__ bool isColissionForVehicle(TColor *dst, int id, int imageW, int imageH, int x, int y, int dx, int dy){ //return false; int nx=x+dx; int ny=y+dy; if (nx >= imageW || nx < 1) return true; if (ny >= imageH || ny < 1) return true; TColor color = dst[imageW * ny + nx]; int r = (color >> 0) & 0xFF; int g = (color >> 8) & 0xFF; int b = (color >> 16) & 0xFF; int a = (color >> 24) & 0xFF; int area= r & 0xE0; if ( (area >> 5) == 7) //hay un edificio alli return true; if ( (area >> 5) == 6) //hay una estacion alli return true; if ( (area >> 5) == 5) //via peatonal return true; if ( ((area >> 5) != 4) && (area >> 5) != 2) //no es una via de transmilenio/carro return true; if (a != id && b==255) //hay un vehiculo, peaton o transmilenio ocupando el sitio. return true; bool up = ((g & 0x80) >> 7) == 1; bool down = ((g & 0x40) >> 6) == 1; bool left = ((g & 0x20) >> 5) == 1; bool right = ((g & 0x10) >> 4) == 1; if ((dy>-1 && up) || (dy<1 && down) || (dx>-1 && right) || (dx<1 && left)) //if (up || down || left || right) return false; return true; } //ok __device__ void getFirstStepForVehicle(TColor *dst, int id, int imageW, int imageH, int x, int y, int &px, int &py){ if (isColissionForVehicle(dst,id,imageW,imageH, px, py, x, y) ){// de frente if (x==0){ //para direccion arriba-abajo //asumiendo direcion hacia arriba if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,y,y) ){ // (+,+) - derecha de frente px+=y; py+=y; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,-y,y)){ // (-,+) - izquierda de frente px-=y; py+=y; } }else if (y==0){ //para direccion izquierda-derecha //asumiendo direccion hacia la derecha if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,x,-x) ){ // (+,-) - diagonal derecha px+=x; py-=x; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,x,x)){ // (+,+) - diagonal izquierda px+=x; py+=x; } }else if (x==y){ //para diagonal so-ne // tomando como direccion (1,1) derecha-arriba if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,x,0) ){ // (+,0) - miro diagonal derecha px+=x; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,0,y)){ // (0,+) - miro diagonal izquierda py+=y; } }else if (x==-y){ //para diagonal se-no //asumiendo como direccion (1,-1) derecha-abajo if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,0,y) ){ // (0,-) - miro diagonal derecha (asumo y=-1) py+=y; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,x,0)){ // (0,+) - miro diagonal izquierda (asumo x=1) px+=x; } } }else{ px+=x; py+=y; } } //OK __device__ void frontSidersForVehicle(int id, int rx, int ry, int &dx, int &dy){ dy=0; dx=ry; return; if (rx==0){ //para direccion arriba-abajo dy=0; dx=ry; }else if (ry==0){ //para direccion izquierda-derecha dy=-rx; dx=0; }else if (rx==ry){ //para diagonal so-ne dy=-ry; dx=rx; }else if (rx==-ry){ //para diagonal se-no dy=ry; dx=-rx; } } //OK __device__ bool isFrontCollisionForVehicle(TColor *dst, int id, int imageW, int imageH, int px, int py, int x, int y, int dx, int dy, int rightSize, int leftSize){ if (isColissionForVehicle(dst,id,imageW,imageH, px, py, x, y)) return true; /*for(int n=1; n<rightSize+1; n++){ if(isColissionForVehicle(dst,id,imageW,imageH, px+x, py+y, n*dx, n*dy)) return true; } for(int n=1; n<leftSize+1; n++){ if(isColissionForVehicle(dst,id,imageW,imageH, px+x, py+y, -n*dx, -n*dy)) return true; }*/ return false; } __device__ void getNextStepForVehicle(TColor *dst, int id, int imageW, int imageH, int x, int y, int &px, int &py, int *devTraceX, int *devTraceY, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout){ if (devTraceX[0]<0 && devTraceY[0]<0){ getFirstStepForVehicle(dst, id, imageW, imageH, x, y, px, py); return; } int size = (dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[0]-1)/2; int res = (( (float)dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[0] - 1.f )/2.f - (float)size) * 2; int sizeZ = dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[1]; int leftSize=size; int rightSize=size+res; int dx=0; int dy=0; frontSidersForVehicle(id, x, y, dx, dy); if ( isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, y, dx, dy, rightSize, leftSize) ){// de frente if (x==0){ //para direccion arriba-abajo //asumiendo direcion hacia arriba frontSidersForVehicle(id, y, y, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, y, y, dx, dy, rightSize, leftSize) ){ // (+,+) - derecha de frente px+=y; py+=y; }else{ frontSidersForVehicle(id, -y, y, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, -y, y, dx, dy, rightSize, leftSize) ){ // (-,+) - izquierda de frente px-=y; py+=y; } } }else if (y==0){ //para direccion izquierda-derecha //asumiendo direccion hacia la derecha frontSidersForVehicle(id, x, -x, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, -x, dx, dy, rightSize, leftSize) ){ // (+,-) - diagonal derecha px+=x; py-=x; }else{ frontSidersForVehicle(id, x, x, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, x, dx, dy, rightSize, leftSize) ){ // (+,+) - diagonal izquierda px+=x; py+=x; } } }else if (x==y){ //para diagonal so-ne // tomando como direccion (1,1) derecha-arriba frontSidersForVehicle(id, x, 0, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, 0, dx, dy, rightSize, leftSize) ){ // (+,0) - miro diagonal derecha px+=x; }else{ frontSidersForVehicle(id, 0, y, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, 0, y, dx, dy, rightSize, leftSize) ){ // (0,+) - miro diagonal izquierda py+=y; } } }else if (x==-y){ //para diagonal se-no //asumiendo como direccion (1,-1) derecha-abajo frontSidersForVehicle(id, 0, y, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, 0, y, dx, dy, rightSize, leftSize) ){ // (0,-) - miro diagonal derecha (asumo y=-1) py+=y; }else{ frontSidersForVehicle(id, x, 0, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, 0, dx, dy, rightSize, leftSize)){ // (0,+) - miro diagonal izquierda (asumo x=1) px+=x; } } } }else{ px+=x; py+=y; } } // // // AQUI EMPIEZAN LAS FASES DEL PASO DE SIMULACION // // /********* SPEED MANAGER *******/ __global__ void VehiclePhase0( int maxVehicle, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < maxVehicle) { if(devTimeOut[id]==-10) //solo entra la primera vez. { int cellSize=maxVehicle*dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2]; int cellNumber=id/cellSize; devTimeOut[id]=cellNumber; return; } if(devTimeOut[id]<0) { devTimeOut[id]=1.f/dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2]-1; }else{ devTimeOut[id]--; } } } /********* PHASE 1: P'=f(p) *******/ __global__ void VehiclePhase1( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devMaxLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < maxVehicle){ if(devTimeOut[id]!=0) return; if (devLocalStep[id]<0){ devLocalStep[id]++; return; } int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // return; //} if (devLocalStep[id]==0){ devPreviousX[id]=devLocalX[id][5*devLocalStep[id] + 4]; devPreviousY[id]=devLocalY[id][5*devLocalStep[id] + 4]; devNextX[id]=devPreviousX[id]; devNextY[id]=devPreviousY[id]; devLocalStep[id]++; return; //comentar? } if (devLocalStep[id]==devMaxLocalStep[id]){ float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); devLocalStep[id]=0; return; //comentar? } int x=0; int y=0; if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 2)) return; int px=devNextX[id]; int py=devNextY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); if (px != devNextX[id] || py != devNextY[id]){ //nueva posicion devPreviousX[id]=devNextX[id]; devPreviousY[id]=devNextY[id]; devNextX[id]=px; devNextY[id]=py; dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); float disX=(float)(devLocalX[id][5*(devLocalStep[id]) + 4]-devNextX[id]); float disY=(float)(devLocalY[id][5*(devLocalStep[id]) + 4]-devNextY[id]); float hyp=sqrt(disX*disX+disY*disY); if ( hyp < 2.f ){ devLocalStep[id]++; if (devLocalStep[id]!=devMaxLocalStep[id]){ devTimeOut[id]+=dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[4]; // comentar las dos lineas de abajo y observar float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); } } devConflicted[imageW * devNextY[id] + devNextX[id]] = id; //no es necesario hacerlo con todas las partes dado que solo se mueve el frente, el resto queda quieto. } } } /********* PHASE 2: Se intenta solucionar conflictos en paralelo********/ __global__ void VehiclePhase2( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if((id < maxVehicle) && (devLocalStep[id] >= 0)){ if(devTimeOut[id]!=0) return; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // return; //} if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); return; } if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) ) //esta en conflicto pero no se ha movido (no deberia pasar nunca) return; int x=0; int y=0; if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 2)) return; int px=devPreviousX[id]; int py=devPreviousY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //borro la posicion siguiente ya que el id de mas prioridad lo ocupo. //float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); //guardo mi nueva posicion, no sobreescribo la anterior. devNextX[id]=px; devNextY[id]=py; if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){ // si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones. float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); devConflicted[imageW * devNextY[id] + devNextX[id]] = id; }else{ //el peaton no se pudo mover. no hago nada. } } } /********* PHASE 3: Se solucionan los conflictos (serial) que no se pudo resolver en la fase 3********/ __global__ void VehiclePhase3( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if((id < maxVehicle) && (devLocalStep[id] >= 0)){ if(devTimeOut[id]!=0) return; if (devLocalStep[id]<=0) return; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay!=-1) devTimeOut[id]+=delay; if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas // no se ha iniciado o no tiene conflicto //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); return; } devNextX[id]=devPreviousX[id]; devNextY[id]=devPreviousY[id]; } } /********* PHASE 2: Detecto Colisiones (serial)********/ __global__ void VehicleCollision( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int *devClass, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int *devConflicted, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devTimeOut, float *devSpeed, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ //const int id = blockDim.x * blockIdx.x + threadIdx.x; for (int id=0; id < maxVehicle; id++){ devConflicted[id]=-1; if(devTimeOut[id]!=0) continue; if (devLocalStep[id] >= 0){ if (devClass[imageW*devNextY[id] + devNextX[id]]==-1) devClass[imageW*devNextY[id] + devNextX[id]]=id; else devConflicted[id]=devClass[imageW*devNextY[id] + devNextX[id]]; } } } /********* PHASE 3: Se intenta solucionar conflictos en paralelo********/ __global__ void VehiclePhase2OLD( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int *devConflicted, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devClass, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if((id < maxVehicle) && (devLocalStep[id] >= 0)){ if(devTimeOut[id]!=0) return; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // return; //} if (devConflicted[id]==-1 || devConflicted[id]==id){ //no tiene conflicto, borro con confianza el paso anterior //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); return; } if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) ) //esta en conflicto pero no se ha movido (no deberia pasar nunca) return; int x=0; int y=0; if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 2)) return; int px=devPreviousX[id]; int py=devPreviousY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //borro la posicion siguiente ya que el id de mas prioridad lo ocupo. //float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); //guardo mi nueva posicion, no sobreescribo la anterior. devNextX[id]=px; devNextY[id]=py; if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){ // si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones. float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); devConflicted[imageW * devNextY[id] + devNextX[id]] = id; devClass[0]=100; //esta variable no la uso mas, asi que tomo la primera posicion para indicar que al menos hubo un movimiento }else{ //el peaton no se pudo mover. no hago nada. } } } /********* PHASE 4: Se solucionan los conflictos (serial) que no se pudo resolver en la fase 3********/ __global__ void VehiclePhase3OLD( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int *devConflicted, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devClass, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ if(devClass[0]!=100) return; //no hubo conflictos en la fase 2, asi que esta fase sobra. for (int id=0; id < maxVehicle; id++){ if(devTimeOut[id]!=0) continue; if (devLocalStep[id]<=0) continue; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; else devTimeOut[id]+=delay; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // continue; //} if (devConflicted[id]==-1 || devConflicted[id]==id){ // no se ha iniciado o no tiene conflicto //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); continue; } if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) ) //esta en conflicto pero no se ha movido (no deberia pasar nunca) continue; int x=devNextX[id]-devPreviousX[id]; int y=devNextY[id]-devPreviousY[id]; int px=devPreviousX[id]; int py=devPreviousY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //borro la posicion siguiente ya que el id de mas prioridad lo ocupo. //float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); //guardo mi nueva posicion, no sobreescribo la anterior. devNextX[id]=px; devNextY[id]=py; if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){ // si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones. //float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 0.f, 1.f, ((float)id)/255.0f); //float4 preFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(preFresult.x, preFresult.y, preFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); }else{ //el peaton no se pudo mover. no hago nada. } } } extern "C" void run_Vehicle( TColor *d_dst, int *devClass, int imageW, int imageH, int maxVehicle, bool parallelDetection, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devMaxLocalStep, int *devCurrentX, //para phase 1 int *devCurrentY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, //para phase 2 y 4 int **devRelated, //para phase 3 int *devTimeOut, float *devSpeed, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ /******PARA IMPRIMIR EN CONSOLA******/ //cudaPrintfInit(); //descomentar esta linea para activar la impresion por consola //CUPRINTF("aqui va el mensaje, funciona igual que un printf"); //copiar este comando en los kernels donde se desee imprimir //hipMemset(devConflicted,-1,imageW*imageH*sizeof(int)); //REVISAR ESTO !!!! :OOOOOOOOOOOOOO hipMemset(devClass,-1,imageW*imageH*sizeof(int)); //REVISAR ESTO !!!! :OOOOOOOOOOOOOO dim3 dimGrid(maxVehicle, 1); //define en total cuantas veces se ejecuta. (multiplicacion de ambos) dim3 dimBlock(1, 1, 1); // para saber cuantos se ejecuta, solo multiplique todos los valores, no use Z. if(parallelDetection){ //Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no. hipLaunchKernelGGL(( VehiclePhase0), dim3(dimGrid), dim3(dimBlock), 0, 0, maxVehicle, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 1: realizo movimientos as hayan colisiones. hipLaunchKernelGGL(( VehiclePhase1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devMaxLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos hipLaunchKernelGGL(( VehiclePhase2), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 3: detecto y corrigo movimientos de peatones en conflicto, pero no genero movimientos nuevos, me devuelvo mejor a un estado estable. hipLaunchKernelGGL(( VehiclePhase3), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted,devTimeOut,dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); }else{ //Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no. hipLaunchKernelGGL(( VehiclePhase0), dim3(dimGrid), dim3(dimBlock), 0, 0, maxVehicle, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 1: realizo movimientos as hayan colisiones. hipLaunchKernelGGL(( VehiclePhase1), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devMaxLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase intermedia: detecto colisiones. (solo se detectan, no se arreglan) hipMemset(devClass,-1,imageW*imageH*sizeof(int)); hipLaunchKernelGGL(( VehicleCollision), dim3(1),dim3(1), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devClass, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devTimeOut, devSpeed, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos hipLaunchKernelGGL(( VehiclePhase2OLD), dim3(dimGrid), dim3(dimBlock), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devClass, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase intermedia: detecto nuevas colisiones, que quedaron de la fase anterior. hipMemset(devClass,-1,imageW*imageH*sizeof(int)); hipLaunchKernelGGL(( VehicleCollision), dim3(1),dim3(1), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devClass, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devTimeOut, devSpeed, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 3: arreglo las colisiones resultantes de manera serial. hipLaunchKernelGGL(( VehiclePhase3OLD), dim3(1),dim3(1), 0, 0, d_dst, imageW, imageH, maxVehicle, semaphore, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devClass, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); } /***********PARA IMPRIMIR EN CONSOLA********/ //cudaPrintfDisplay(stdout, true); //descomentar esta linea para imprimer por consola, no modificar los atributos. //cudaPrintfEnd(); //descomentar esta linea para finalizar la impresion por consola. }
d5f8914375296d1e61ef93ef219a3fd63577bcc8.cu
#include <cuda.h> #include <stdio.h> #include <cuPrintf.cu> #include <shrUtils.h> #include "cutil_inline.h" #define CUPRINTF cuPrintf // ok __device__ bool isColissionForVehicle(TColor *dst, int id, int imageW, int imageH, int x, int y, int dx, int dy){ //return false; int nx=x+dx; int ny=y+dy; if (nx >= imageW || nx < 1) return true; if (ny >= imageH || ny < 1) return true; TColor color = dst[imageW * ny + nx]; int r = (color >> 0) & 0xFF; int g = (color >> 8) & 0xFF; int b = (color >> 16) & 0xFF; int a = (color >> 24) & 0xFF; int area= r & 0xE0; if ( (area >> 5) == 7) //hay un edificio alli return true; if ( (area >> 5) == 6) //hay una estacion alli return true; if ( (area >> 5) == 5) //via peatonal return true; if ( ((area >> 5) != 4) && (area >> 5) != 2) //no es una via de transmilenio/carro return true; if (a != id && b==255) //hay un vehiculo, peaton o transmilenio ocupando el sitio. return true; bool up = ((g & 0x80) >> 7) == 1; bool down = ((g & 0x40) >> 6) == 1; bool left = ((g & 0x20) >> 5) == 1; bool right = ((g & 0x10) >> 4) == 1; if ((dy>-1 && up) || (dy<1 && down) || (dx>-1 && right) || (dx<1 && left)) //if (up || down || left || right) return false; return true; } //ok __device__ void getFirstStepForVehicle(TColor *dst, int id, int imageW, int imageH, int x, int y, int &px, int &py){ if (isColissionForVehicle(dst,id,imageW,imageH, px, py, x, y) ){// de frente if (x==0){ //para direccion arriba-abajo //asumiendo direcion hacia arriba if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,y,y) ){ // (+,+) - derecha de frente px+=y; py+=y; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,-y,y)){ // (-,+) - izquierda de frente px-=y; py+=y; } }else if (y==0){ //para direccion izquierda-derecha //asumiendo direccion hacia la derecha if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,x,-x) ){ // (+,-) - diagonal derecha px+=x; py-=x; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,x,x)){ // (+,+) - diagonal izquierda px+=x; py+=x; } }else if (x==y){ //para diagonal so-ne // tomando como direccion (1,1) derecha-arriba if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,x,0) ){ // (+,0) - miro diagonal derecha px+=x; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,0,y)){ // (0,+) - miro diagonal izquierda py+=y; } }else if (x==-y){ //para diagonal se-no //asumiendo como direccion (1,-1) derecha-abajo if ( !isColissionForVehicle(dst,id,imageW,imageH,px,py,0,y) ){ // (0,-) - miro diagonal derecha (asumo y=-1) py+=y; }else if (!isColissionForVehicle(dst,id,imageW,imageH,px,py,x,0)){ // (0,+) - miro diagonal izquierda (asumo x=1) px+=x; } } }else{ px+=x; py+=y; } } //OK __device__ void frontSidersForVehicle(int id, int rx, int ry, int &dx, int &dy){ dy=0; dx=ry; return; if (rx==0){ //para direccion arriba-abajo dy=0; dx=ry; }else if (ry==0){ //para direccion izquierda-derecha dy=-rx; dx=0; }else if (rx==ry){ //para diagonal so-ne dy=-ry; dx=rx; }else if (rx==-ry){ //para diagonal se-no dy=ry; dx=-rx; } } //OK __device__ bool isFrontCollisionForVehicle(TColor *dst, int id, int imageW, int imageH, int px, int py, int x, int y, int dx, int dy, int rightSize, int leftSize){ if (isColissionForVehicle(dst,id,imageW,imageH, px, py, x, y)) return true; /*for(int n=1; n<rightSize+1; n++){ if(isColissionForVehicle(dst,id,imageW,imageH, px+x, py+y, n*dx, n*dy)) return true; } for(int n=1; n<leftSize+1; n++){ if(isColissionForVehicle(dst,id,imageW,imageH, px+x, py+y, -n*dx, -n*dy)) return true; }*/ return false; } __device__ void getNextStepForVehicle(TColor *dst, int id, int imageW, int imageH, int x, int y, int &px, int &py, int *devTraceX, int *devTraceY, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout){ if (devTraceX[0]<0 && devTraceY[0]<0){ getFirstStepForVehicle(dst, id, imageW, imageH, x, y, px, py); return; } int size = (dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[0]-1)/2; int res = (( (float)dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[0] - 1.f )/2.f - (float)size) * 2; int sizeZ = dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[1]; int leftSize=size; int rightSize=size+res; int dx=0; int dy=0; frontSidersForVehicle(id, x, y, dx, dy); if ( isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, y, dx, dy, rightSize, leftSize) ){// de frente if (x==0){ //para direccion arriba-abajo //asumiendo direcion hacia arriba frontSidersForVehicle(id, y, y, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, y, y, dx, dy, rightSize, leftSize) ){ // (+,+) - derecha de frente px+=y; py+=y; }else{ frontSidersForVehicle(id, -y, y, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, -y, y, dx, dy, rightSize, leftSize) ){ // (-,+) - izquierda de frente px-=y; py+=y; } } }else if (y==0){ //para direccion izquierda-derecha //asumiendo direccion hacia la derecha frontSidersForVehicle(id, x, -x, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, -x, dx, dy, rightSize, leftSize) ){ // (+,-) - diagonal derecha px+=x; py-=x; }else{ frontSidersForVehicle(id, x, x, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, x, dx, dy, rightSize, leftSize) ){ // (+,+) - diagonal izquierda px+=x; py+=x; } } }else if (x==y){ //para diagonal so-ne // tomando como direccion (1,1) derecha-arriba frontSidersForVehicle(id, x, 0, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, 0, dx, dy, rightSize, leftSize) ){ // (+,0) - miro diagonal derecha px+=x; }else{ frontSidersForVehicle(id, 0, y, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, 0, y, dx, dy, rightSize, leftSize) ){ // (0,+) - miro diagonal izquierda py+=y; } } }else if (x==-y){ //para diagonal se-no //asumiendo como direccion (1,-1) derecha-abajo frontSidersForVehicle(id, 0, y, dx, dy); if ( !isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, 0, y, dx, dy, rightSize, leftSize) ){ // (0,-) - miro diagonal derecha (asumo y=-1) py+=y; }else{ frontSidersForVehicle(id, x, 0, dx, dy); if (!isFrontCollisionForVehicle(dst, id, imageW, imageH, px, py, x, 0, dx, dy, rightSize, leftSize)){ // (0,+) - miro diagonal izquierda (asumo x=1) px+=x; } } } }else{ px+=x; py+=y; } } // // // AQUI EMPIEZAN LAS FASES DEL PASO DE SIMULACION // // /********* SPEED MANAGER *******/ __global__ void VehiclePhase0( int maxVehicle, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < maxVehicle) { if(devTimeOut[id]==-10) //solo entra la primera vez. { int cellSize=maxVehicle*dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2]; int cellNumber=id/cellSize; devTimeOut[id]=cellNumber; return; } if(devTimeOut[id]<0) { devTimeOut[id]=1.f/dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[2]-1; }else{ devTimeOut[id]--; } } } /********* PHASE 1: P'=f(p) *******/ __global__ void VehiclePhase1( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devMaxLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < maxVehicle){ if(devTimeOut[id]!=0) return; if (devLocalStep[id]<0){ devLocalStep[id]++; return; } int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // return; //} if (devLocalStep[id]==0){ devPreviousX[id]=devLocalX[id][5*devLocalStep[id] + 4]; devPreviousY[id]=devLocalY[id][5*devLocalStep[id] + 4]; devNextX[id]=devPreviousX[id]; devNextY[id]=devPreviousY[id]; devLocalStep[id]++; return; //comentar? } if (devLocalStep[id]==devMaxLocalStep[id]){ float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); devLocalStep[id]=0; return; //comentar? } int x=0; int y=0; if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 2)) return; int px=devNextX[id]; int py=devNextY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); if (px != devNextX[id] || py != devNextY[id]){ //nueva posicion devPreviousX[id]=devNextX[id]; devPreviousY[id]=devNextY[id]; devNextX[id]=px; devNextY[id]=py; dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); float disX=(float)(devLocalX[id][5*(devLocalStep[id]) + 4]-devNextX[id]); float disY=(float)(devLocalY[id][5*(devLocalStep[id]) + 4]-devNextY[id]); float hyp=sqrt(disX*disX+disY*disY); if ( hyp < 2.f ){ devLocalStep[id]++; if (devLocalStep[id]!=devMaxLocalStep[id]){ devTimeOut[id]+=dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout[4]; // comentar las dos lineas de abajo y observar float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); } } devConflicted[imageW * devNextY[id] + devNextX[id]] = id; //no es necesario hacerlo con todas las partes dado que solo se mueve el frente, el resto queda quieto. } } } /********* PHASE 2: Se intenta solucionar conflictos en paralelo********/ __global__ void VehiclePhase2( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if((id < maxVehicle) && (devLocalStep[id] >= 0)){ if(devTimeOut[id]!=0) return; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // return; //} if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); return; } if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) ) //esta en conflicto pero no se ha movido (no deberia pasar nunca) return; int x=0; int y=0; if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 2)) return; int px=devPreviousX[id]; int py=devPreviousY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //borro la posicion siguiente ya que el id de mas prioridad lo ocupo. //float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); //guardo mi nueva posicion, no sobreescribo la anterior. devNextX[id]=px; devNextY[id]=py; if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){ // si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones. float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); devConflicted[imageW * devNextY[id] + devNextX[id]] = id; }else{ //el peaton no se pudo mover. no hago nada. } } } /********* PHASE 3: Se solucionan los conflictos (serial) que no se pudo resolver en la fase 3********/ __global__ void VehiclePhase3( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if((id < maxVehicle) && (devLocalStep[id] >= 0)){ if(devTimeOut[id]!=0) return; if (devLocalStep[id]<=0) return; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay!=-1) devTimeOut[id]+=delay; if (devConflicted[imageW * devNextY[id] + devNextX[id]] == id){ //talves tenga conflicto, pero tiene prioridad sobre los demas // no se ha iniciado o no tiene conflicto //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); return; } devNextX[id]=devPreviousX[id]; devNextY[id]=devPreviousY[id]; } } /********* PHASE 2: Detecto Colisiones (serial)********/ __global__ void VehicleCollision( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int *devClass, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int *devConflicted, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devTimeOut, float *devSpeed, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ //const int id = blockDim.x * blockIdx.x + threadIdx.x; for (int id=0; id < maxVehicle; id++){ devConflicted[id]=-1; if(devTimeOut[id]!=0) continue; if (devLocalStep[id] >= 0){ if (devClass[imageW*devNextY[id] + devNextX[id]]==-1) devClass[imageW*devNextY[id] + devNextX[id]]=id; else devConflicted[id]=devClass[imageW*devNextY[id] + devNextX[id]]; } } } /********* PHASE 3: Se intenta solucionar conflictos en paralelo********/ __global__ void VehiclePhase2OLD( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int *devConflicted, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devClass, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ const int id = blockDim.x * blockIdx.x + threadIdx.x; if((id < maxVehicle) && (devLocalStep[id] >= 0)){ if(devTimeOut[id]!=0) return; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // return; //} if (devConflicted[id]==-1 || devConflicted[id]==id){ //no tiene conflicto, borro con confianza el paso anterior //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); return; } if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) ) //esta en conflicto pero no se ha movido (no deberia pasar nunca) return; int x=0; int y=0; if (!getNextDirection(id , x, y, devLocalX, devLocalY, devLocalStep, devNextX, devNextY, 2)) return; int px=devPreviousX[id]; int py=devPreviousY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //borro la posicion siguiente ya que el id de mas prioridad lo ocupo. //float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); //guardo mi nueva posicion, no sobreescribo la anterior. devNextX[id]=px; devNextY[id]=py; if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){ // si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones. float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); devConflicted[imageW * devNextY[id] + devNextX[id]] = id; devClass[0]=100; //esta variable no la uso mas, asi que tomo la primera posicion para indicar que al menos hubo un movimiento }else{ //el peaton no se pudo mover. no hago nada. } } } /********* PHASE 4: Se solucionan los conflictos (serial) que no se pudo resolver en la fase 3********/ __global__ void VehiclePhase3OLD( TColor *dst, int imageW, int imageH, int maxVehicle, bool semaphore, int *devLocalStep, int *devNextX, int *devNextY, int *devPreviousX, int *devPreviousY, int *devConflicted, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devClass, int *devTimeOut, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ if(devClass[0]!=100) return; //no hubo conflictos en la fase 2, asi que esta fase sobra. for (int id=0; id < maxVehicle; id++){ if(devTimeOut[id]!=0) continue; if (devLocalStep[id]<=0) continue; int delay=isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f), semaphore, 2); if(delay==-1) return; else devTimeOut[id]+=delay; //if (isSemaphoreRed(tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f) // , semaphore, 2) ){ // continue; //} if (devConflicted[id]==-1 || devConflicted[id]==id){ // no se ha iniciado o no tiene conflicto //float4 nextFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); continue; } if ( (devNextX[id]==devPreviousX[id]) && (devNextY[id]==devPreviousY[id]) ) //esta en conflicto pero no se ha movido (no deberia pasar nunca) continue; int x=devNextX[id]-devPreviousX[id]; int y=devNextY[id]-devPreviousY[id]; int px=devPreviousX[id]; int py=devPreviousY[id]; getNextStepForVehicle(dst, id, imageW, imageH, x, y, px, py, devTraceX[id], devTraceY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //borro la posicion siguiente ya que el id de mas prioridad lo ocupo. //float4 nextFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(nextFresult.x, nextFresult.y, nextFresult.z, 0.0f); //guardo mi nueva posicion, no sobreescribo la anterior. devNextX[id]=px; devNextY[id]=py; if ( (px!=devPreviousX[id]) || (py!=devPreviousY[id]) ){ // si me pude mover, me muevo a mi nueva coordenada, aunque este movimiento puede generar colisiones. //float4 newFresult = tex2D(texImage, (float)devNextX[id] + 0.5f, (float)devNextY[id] + 0.5f); //dst[imageW * devNextY[id] + devNextX[id]] = make_color(1.f, 0.f, 1.f, ((float)id)/255.0f); //float4 preFresult = tex2D(texImage, (float)devPreviousX[id] + 0.5f, (float)devPreviousY[id] + 0.5f); //dst[imageW * devPreviousY[id] + devPreviousX[id]] = make_color(preFresult.x, preFresult.y, preFresult.z, 0.0f); TColor color=make_color(1.f, 1.f, 1.f, ((float)id)/255.0f); drawAllTrace(dst, color, imageW, imageH, devNextX[id], devNextY[id], devNextX[id]-devPreviousX[id], devNextY[id]-devPreviousY[id], devTraceX[id], devTraceY[id], devTraceRotX[id], devTraceRotY[id], dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); }else{ //el peaton no se pudo mover. no hago nada. } } } extern "C" void run_Vehicle( TColor *d_dst, int *devClass, int imageW, int imageH, int maxVehicle, bool parallelDetection, bool semaphore, int **devLocalX, int **devLocalY, int *devLocalStep, int *devMaxLocalStep, int *devCurrentX, //para phase 1 int *devCurrentY, int *devPreviousX, int *devPreviousY, int **devTraceX, int **devTraceY, int **devTraceRotX, int **devTraceRotY, int *devConflicted, //para phase 2 y 4 int **devRelated, //para phase 3 int *devTimeOut, float *devSpeed, float *dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout ){ /******PARA IMPRIMIR EN CONSOLA******/ //cudaPrintfInit(); //descomentar esta linea para activar la impresion por consola //CUPRINTF("aqui va el mensaje, funciona igual que un printf"); //copiar este comando en los kernels donde se desee imprimir //cudaMemset(devConflicted,-1,imageW*imageH*sizeof(int)); //REVISAR ESTO !!!! :OOOOOOOOOOOOOO cudaMemset(devClass,-1,imageW*imageH*sizeof(int)); //REVISAR ESTO !!!! :OOOOOOOOOOOOOO dim3 dimGrid(maxVehicle, 1); //define en total cuantas veces se ejecuta. (multiplicacion de ambos) dim3 dimBlock(1, 1, 1); // para saber cuantos se ejecuta, solo multiplique todos los valores, no use Z. if(parallelDetection){ //Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no. VehiclePhase0<<<dimGrid, dimBlock>>>(maxVehicle, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 1: realizo movimientos así hayan colisiones. VehiclePhase1<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devMaxLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos VehiclePhase2<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 3: detecto y corrigo movimientos de peatones en conflicto, pero no genero movimientos nuevos, me devuelvo mejor a un estado estable. VehiclePhase3<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted,devTimeOut,dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); }else{ //Fase 0: Control de velocidad, determino si en este paso de simulacion intento moverme o no. VehiclePhase0<<<dimGrid, dimBlock>>>(maxVehicle, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 1: realizo movimientos así hayan colisiones. VehiclePhase1<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devMaxLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devConflicted, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase intermedia: detecto colisiones. (solo se detectan, no se arreglan) cudaMemset(devClass,-1,imageW*imageH*sizeof(int)); VehicleCollision<<<1,1>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devClass, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devTimeOut, devSpeed, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 2: detecto y corrigo movimientos de peatones en conflicto con nuevos movimientos, aun asi pueden haber conflictos en los nuevos movimientos VehiclePhase2OLD<<<dimGrid, dimBlock>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devLocalX, devLocalY, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devClass, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase intermedia: detecto nuevas colisiones, que quedaron de la fase anterior. cudaMemset(devClass,-1,imageW*imageH*sizeof(int)); VehicleCollision<<<1,1>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devClass, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devTimeOut, devSpeed, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); //Fase 3: arreglo las colisiones resultantes de manera serial. VehiclePhase3OLD<<<1,1>>>(d_dst, imageW, imageH, maxVehicle, semaphore, devLocalStep, devCurrentX, devCurrentY, devPreviousX, devPreviousY, devConflicted, devTraceX, devTraceY, devTraceRotX, devTraceRotY, devClass, devTimeOut, dev_sizeX_sizeZ_maxSpeed_maxAcce_maxTimeout); } /***********PARA IMPRIMIR EN CONSOLA********/ //cudaPrintfDisplay(stdout, true); //descomentar esta linea para imprimer por consola, no modificar los atributos. //cudaPrintfEnd(); //descomentar esta linea para finalizar la impresion por consola. }
975efd88a258dd5229d98f175f1ced1eb520db21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./common/book.h" #define N 16 __global__ void add(int *a, int *b, int *c) { int tid=blockIdx.x; printf(" thread id is : %d \n", tid); // just for fun if (tid <N) { c[tid] = a[tid] + b[tid]; } } int main( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // Assume that arrays are given HANDLE_ERROR( hipMalloc( ( void**)&dev_a, N*sizeof(int))); HANDLE_ERROR( hipMalloc( ( void**)&dev_b, N*sizeof(int))); HANDLE_ERROR( hipMalloc( ( void**)&dev_c, N*sizeof(int))); for (int i=0; i<N; i++) { a[i] = -i; b[i] = i*i; } // Copy from host to device (GPU) HANDLE_ERROR( hipMemcpy( dev_a, a, N*sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR( hipMemcpy( dev_b, b, N*sizeof(int), hipMemcpyHostToDevice)); // --------------------------- GPU code starts----------------------------- hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a, dev_b, dev_c); // --------------------------- GPU code ends----------------------------- // Copy from device to host HANDLE_ERROR( hipMemcpy( c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost)); for (int i=0; i<N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } hipFree( dev_a); hipFree( dev_b); hipFree( dev_c); return 0; }
975efd88a258dd5229d98f175f1ced1eb520db21.cu
#include "./common/book.h" #define N 16 __global__ void add(int *a, int *b, int *c) { int tid=blockIdx.x; printf(" thread id is : %d \n", tid); // just for fun if (tid <N) { c[tid] = a[tid] + b[tid]; } } int main( void ) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; // Assume that arrays are given HANDLE_ERROR( cudaMalloc( ( void**)&dev_a, N*sizeof(int))); HANDLE_ERROR( cudaMalloc( ( void**)&dev_b, N*sizeof(int))); HANDLE_ERROR( cudaMalloc( ( void**)&dev_c, N*sizeof(int))); for (int i=0; i<N; i++) { a[i] = -i; b[i] = i*i; } // Copy from host to device (GPU) HANDLE_ERROR( cudaMemcpy( dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR( cudaMemcpy( dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice)); // --------------------------- GPU code starts----------------------------- add<<<N,1>>>(dev_a, dev_b, dev_c); // --------------------------- GPU code ends----------------------------- // Copy from device to host HANDLE_ERROR( cudaMemcpy( c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost)); for (int i=0; i<N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } cudaFree( dev_a); cudaFree( dev_b); cudaFree( dev_c); return 0; }
5c06dab9a760fe39c225a4b54c2f00580338821e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/concat_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Concat(const int nthreads, const Dtype* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, Caffe::cuda_stream(), nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream())); } template <typename Dtype> void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { const int bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i]) { Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, Caffe::cuda_stream(), nthreads, top_diff, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); } offset_concat_axis += bottom_concat_axis; } CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream())); } INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); } // namespace caffe
5c06dab9a760fe39c225a4b54c2f00580338821e.cu
#include <vector> #include "caffe/layers/concat_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void Concat(const int nthreads, const Dtype* in_data, const bool forward, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, Dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; if (forward) { out_data[top_index] = in_data[index]; } else { out_data[index] = in_data[top_index]; } } } template <typename Dtype> void ConcatLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (bottom.size() == 1) { return; } Dtype* top_data = top[0]->mutable_gpu_data(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = true; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS, 0, Caffe::cuda_stream()>>>( nthreads, bottom_data, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, top_data); offset_concat_axis += bottom_concat_axis; } CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream())); } template <typename Dtype> void ConcatLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bottom.size() == 1) { return; } const Dtype* top_diff = top[0]->gpu_diff(); int offset_concat_axis = 0; const int top_concat_axis = top[0]->shape(concat_axis_); const bool kForward = false; for (int i = 0; i < bottom.size(); ++i) { const int bottom_concat_axis = bottom[i]->shape(concat_axis_); if (propagate_down[i]) { Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); const int bottom_concat_size = bottom_concat_axis * concat_input_size_; const int nthreads = bottom_concat_size * num_concats_; Concat<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS, 0, Caffe::cuda_stream()>>>( nthreads, top_diff, kForward, num_concats_, concat_input_size_, top_concat_axis, bottom_concat_axis, offset_concat_axis, bottom_diff); } offset_concat_axis += bottom_concat_axis; } CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream())); } INSTANTIATE_LAYER_GPU_FUNCS(ConcatLayer); } // namespace caffe
ddeadf008c5b20e6a82c2f245f0737c3adb6908a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2018 XGBoost contributors */ #include <gtest/gtest.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include "../../../src/common/device_helpers.cuh" #include <xgboost/span.h> #include "test_span.h" namespace xgboost { namespace common { struct TestStatus { private: int *status_; public: TestStatus () { dh::safe_cuda(hipMalloc(&status_, sizeof(int))); int h_status = 1; dh::safe_cuda(hipMemcpy(status_, &h_status, sizeof(int), hipMemcpyHostToDevice)); } ~TestStatus() { dh::safe_cuda(hipFree(status_)); } int Get() { int h_status; dh::safe_cuda(hipMemcpy(&h_status, status_, sizeof(int), hipMemcpyDeviceToHost)); return h_status; } int* Data() { return status_; } }; __global__ void TestFromOtherKernel(Span<float> span) { // don't get optimized out size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= span.size()) { return; } } // Test converting different T __global__ void TestFromOtherKernelConst(Span<float const, 16> span) { // don't get optimized out size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= span.size()) { return; } } /*! * \brief Here we just test whether the code compiles. */ TEST(GPUSpan, FromOther) { thrust::host_vector<float> h_vec (16); std::iota(h_vec.begin(), h_vec.end(), 0); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); // dynamic extent { Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestFromOtherKernel), dim3(1), dim3(16), 0, 0, span); } { Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestFromOtherKernelConst), dim3(1), dim3(16), 0, 0, span); } // static extent { Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16); hipLaunchKernelGGL(( TestFromOtherKernel), dim3(1), dim3(16), 0, 0, span); } { Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16); hipLaunchKernelGGL(( TestFromOtherKernelConst), dim3(1), dim3(16), 0, 0, span); } } TEST(GPUSpan, Assignment) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestAssignment{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, TestStatus) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestTestStatus{status.Data()}); ASSERT_EQ(status.Get(), -1); } template <typename T> struct TestEqual { private: T *lhs_, *rhs_; int *status_; public: TestEqual(T* _lhs, T* _rhs, int * _status) : lhs_(_lhs), rhs_(_rhs), status_(_status) {} XGBOOST_DEVICE void operator()(size_t _idx) { bool res = lhs_[_idx] == rhs_[_idx]; SPAN_ASSERT_TRUE(res, status_); } }; TEST(GPUSpan, WithTrust) { dh::safe_cuda(hipSetDevice(0)); // Not adviced to initialize span with host_vector, since h_vec.data() is // a host function. thrust::host_vector<float> h_vec (16); std::iota(h_vec.begin(), h_vec.end(), 0); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); // Can't initialize span with device_vector, since d_vec.data() is not raw // pointer { Span<float> s (d_vec.data().get(), d_vec.size()); ASSERT_EQ(d_vec.size(), s.size()); ASSERT_EQ(d_vec.data().get(), s.data()); } { TestStatus status; thrust::device_vector<float> d_vec1 (d_vec.size()); thrust::copy(thrust::device, d_vec.begin(), d_vec.end(), d_vec1.begin()); Span<float> s (d_vec1.data().get(), d_vec.size()); dh::LaunchN(16, TestEqual<float>{ thrust::raw_pointer_cast(d_vec1.data()), s.data(), status.Data()}); ASSERT_EQ(status.Get(), 1); // FIXME(trivialfis): memory error! // bool res = thrust::equal(thrust::device, // d_vec.begin(), d_vec.end(), // s.begin()); } } TEST(GPUSpan, BeginEnd) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestBeginEnd{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, RBeginREnd) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestRBeginREnd{status.Data()}); ASSERT_EQ(status.Get(), 1); } __global__ void TestModifyKernel(Span<float> span) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= span.size()) { return; } span[idx] = span.size() - idx; } TEST(GPUSpan, Modify) { thrust::host_vector<float> h_vec (16); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestModifyKernel), dim3(1), dim3(16), 0, 0, span); for (size_t i = 0; i < d_vec.size(); ++i) { ASSERT_EQ(d_vec[i], d_vec.size() - i); } } TEST(GPUSpan, Observers) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestObservers{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, Compare) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterCompare{status.Data()}); ASSERT_EQ(status.Get(), 1); } struct TestElementAccess { private: Span<float> span_; public: XGBOOST_DEVICE explicit TestElementAccess (Span<float> _span) : span_(_span) {} XGBOOST_DEVICE float operator()(size_t _idx) { float tmp = span_[_idx]; return tmp; } }; TEST(GPUSpanDeathTest, ElementAccess) { dh::safe_cuda(hipSetDevice(0)); auto test_element_access = []() { thrust::host_vector<float> h_vec (16); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); dh::LaunchN(17, TestElementAccess{span}); }; testing::internal::CaptureStdout(); EXPECT_DEATH(test_element_access(), ""); std::string output = testing::internal::GetCapturedStdout(); } __global__ void TestFirstDynamicKernel(Span<float> _span) { _span.first<static_cast<Span<float>::index_type>(-1)>(); } __global__ void TestFirstStaticKernel(Span<float> _span) { _span.first(static_cast<Span<float>::index_type>(-1)); } __global__ void TestLastDynamicKernel(Span<float> _span) { _span.last<static_cast<Span<float>::index_type>(-1)>(); } __global__ void TestLastStaticKernel(Span<float> _span) { _span.last(static_cast<Span<float>::index_type>(-1)); } TEST(GPUSpan, FirstLast) { // We construct vectors multiple times since thrust can not recover from // death test. auto lambda_first_dy = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestFirstDynamicKernel), dim3(1), dim3(1), 0, 0, span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_first_dy(), ""); std::string output = testing::internal::GetCapturedStdout(); auto lambda_first_static = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestFirstStaticKernel), dim3(1), dim3(1), 0, 0, span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_first_static(), ""); output = testing::internal::GetCapturedStdout(); auto lambda_last_dy = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestLastDynamicKernel), dim3(1), dim3(1), 0, 0, span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_last_dy(), ""); output = testing::internal::GetCapturedStdout(); auto lambda_last_static = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestLastStaticKernel), dim3(1), dim3(1), 0, 0, span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_last_static(), ""); output = testing::internal::GetCapturedStdout(); } __global__ void TestFrontKernel(Span<float> _span) { _span.front(); } __global__ void TestBackKernel(Span<float> _span) { _span.back(); } TEST(GPUSpan, FrontBack) { dh::safe_cuda(hipSetDevice(0)); Span<float> s; auto lambda_test_front = [=]() { // make sure the termination happens inside this test. try { hipLaunchKernelGGL(( TestFrontKernel), dim3(1), dim3(1), 0, 0, s); dh::safe_cuda(hipDeviceSynchronize()); dh::safe_cuda(hipGetLastError()); } catch (dmlc::Error const& e) { std::terminate(); } }; EXPECT_DEATH(lambda_test_front(), ""); auto lambda_test_back = [=]() { try { hipLaunchKernelGGL(( TestBackKernel), dim3(1), dim3(1), 0, 0, s); dh::safe_cuda(hipDeviceSynchronize()); dh::safe_cuda(hipGetLastError()); } catch (dmlc::Error const& e) { std::terminate(); } }; EXPECT_DEATH(lambda_test_back(), ""); } __global__ void TestSubspanDynamicKernel(Span<float> _span) { _span.subspan(16, 0); } __global__ void TestSubspanStaticKernel(Span<float> _span) { _span.subspan<16>(); } TEST(GPUSpan, Subspan) { auto lambda_subspan_dynamic = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestSubspanDynamicKernel), dim3(1), dim3(1), 0, 0, span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_subspan_dynamic(), ""); std::string output = testing::internal::GetCapturedStdout(); auto lambda_subspan_static = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); hipLaunchKernelGGL(( TestSubspanStaticKernel), dim3(1), dim3(1), 0, 0, span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_subspan_static(), ""); output = testing::internal::GetCapturedStdout(); } TEST(GPUSpanIter, Construct) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterConstruct{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpanIter, Ref) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterRef{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpanIter, Calculate) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterCalculate{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpanIter, Compare) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterCompare{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, AsBytes) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestAsBytes{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, AsWritableBytes) { dh::safe_cuda(hipSetDevice(0)); TestStatus status; dh::LaunchN(16, TestAsWritableBytes{status.Data()}); ASSERT_EQ(status.Get(), 1); } } // namespace common } // namespace xgboost
ddeadf008c5b20e6a82c2f245f0737c3adb6908a.cu
/*! * Copyright 2018 XGBoost contributors */ #include <gtest/gtest.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include "../../../src/common/device_helpers.cuh" #include <xgboost/span.h> #include "test_span.h" namespace xgboost { namespace common { struct TestStatus { private: int *status_; public: TestStatus () { dh::safe_cuda(cudaMalloc(&status_, sizeof(int))); int h_status = 1; dh::safe_cuda(cudaMemcpy(status_, &h_status, sizeof(int), cudaMemcpyHostToDevice)); } ~TestStatus() { dh::safe_cuda(cudaFree(status_)); } int Get() { int h_status; dh::safe_cuda(cudaMemcpy(&h_status, status_, sizeof(int), cudaMemcpyDeviceToHost)); return h_status; } int* Data() { return status_; } }; __global__ void TestFromOtherKernel(Span<float> span) { // don't get optimized out size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= span.size()) { return; } } // Test converting different T __global__ void TestFromOtherKernelConst(Span<float const, 16> span) { // don't get optimized out size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= span.size()) { return; } } /*! * \brief Here we just test whether the code compiles. */ TEST(GPUSpan, FromOther) { thrust::host_vector<float> h_vec (16); std::iota(h_vec.begin(), h_vec.end(), 0); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); // dynamic extent { Span<float> span (d_vec.data().get(), d_vec.size()); TestFromOtherKernel<<<1, 16>>>(span); } { Span<float> span (d_vec.data().get(), d_vec.size()); TestFromOtherKernelConst<<<1, 16>>>(span); } // static extent { Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16); TestFromOtherKernel<<<1, 16>>>(span); } { Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16); TestFromOtherKernelConst<<<1, 16>>>(span); } } TEST(GPUSpan, Assignment) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestAssignment{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, TestStatus) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestTestStatus{status.Data()}); ASSERT_EQ(status.Get(), -1); } template <typename T> struct TestEqual { private: T *lhs_, *rhs_; int *status_; public: TestEqual(T* _lhs, T* _rhs, int * _status) : lhs_(_lhs), rhs_(_rhs), status_(_status) {} XGBOOST_DEVICE void operator()(size_t _idx) { bool res = lhs_[_idx] == rhs_[_idx]; SPAN_ASSERT_TRUE(res, status_); } }; TEST(GPUSpan, WithTrust) { dh::safe_cuda(cudaSetDevice(0)); // Not adviced to initialize span with host_vector, since h_vec.data() is // a host function. thrust::host_vector<float> h_vec (16); std::iota(h_vec.begin(), h_vec.end(), 0); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); // Can't initialize span with device_vector, since d_vec.data() is not raw // pointer { Span<float> s (d_vec.data().get(), d_vec.size()); ASSERT_EQ(d_vec.size(), s.size()); ASSERT_EQ(d_vec.data().get(), s.data()); } { TestStatus status; thrust::device_vector<float> d_vec1 (d_vec.size()); thrust::copy(thrust::device, d_vec.begin(), d_vec.end(), d_vec1.begin()); Span<float> s (d_vec1.data().get(), d_vec.size()); dh::LaunchN(16, TestEqual<float>{ thrust::raw_pointer_cast(d_vec1.data()), s.data(), status.Data()}); ASSERT_EQ(status.Get(), 1); // FIXME(trivialfis): memory error! // bool res = thrust::equal(thrust::device, // d_vec.begin(), d_vec.end(), // s.begin()); } } TEST(GPUSpan, BeginEnd) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestBeginEnd{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, RBeginREnd) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestRBeginREnd{status.Data()}); ASSERT_EQ(status.Get(), 1); } __global__ void TestModifyKernel(Span<float> span) { size_t idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= span.size()) { return; } span[idx] = span.size() - idx; } TEST(GPUSpan, Modify) { thrust::host_vector<float> h_vec (16); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); TestModifyKernel<<<1, 16>>>(span); for (size_t i = 0; i < d_vec.size(); ++i) { ASSERT_EQ(d_vec[i], d_vec.size() - i); } } TEST(GPUSpan, Observers) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestObservers{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, Compare) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterCompare{status.Data()}); ASSERT_EQ(status.Get(), 1); } struct TestElementAccess { private: Span<float> span_; public: XGBOOST_DEVICE explicit TestElementAccess (Span<float> _span) : span_(_span) {} XGBOOST_DEVICE float operator()(size_t _idx) { float tmp = span_[_idx]; return tmp; } }; TEST(GPUSpanDeathTest, ElementAccess) { dh::safe_cuda(cudaSetDevice(0)); auto test_element_access = []() { thrust::host_vector<float> h_vec (16); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); dh::LaunchN(17, TestElementAccess{span}); }; testing::internal::CaptureStdout(); EXPECT_DEATH(test_element_access(), ""); std::string output = testing::internal::GetCapturedStdout(); } __global__ void TestFirstDynamicKernel(Span<float> _span) { _span.first<static_cast<Span<float>::index_type>(-1)>(); } __global__ void TestFirstStaticKernel(Span<float> _span) { _span.first(static_cast<Span<float>::index_type>(-1)); } __global__ void TestLastDynamicKernel(Span<float> _span) { _span.last<static_cast<Span<float>::index_type>(-1)>(); } __global__ void TestLastStaticKernel(Span<float> _span) { _span.last(static_cast<Span<float>::index_type>(-1)); } TEST(GPUSpan, FirstLast) { // We construct vectors multiple times since thrust can not recover from // death test. auto lambda_first_dy = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); TestFirstDynamicKernel<<<1, 1>>>(span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_first_dy(), ""); std::string output = testing::internal::GetCapturedStdout(); auto lambda_first_static = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); TestFirstStaticKernel<<<1, 1>>>(span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_first_static(), ""); output = testing::internal::GetCapturedStdout(); auto lambda_last_dy = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); TestLastDynamicKernel<<<1, 1>>>(span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_last_dy(), ""); output = testing::internal::GetCapturedStdout(); auto lambda_last_static = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); TestLastStaticKernel<<<1, 1>>>(span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_last_static(), ""); output = testing::internal::GetCapturedStdout(); } __global__ void TestFrontKernel(Span<float> _span) { _span.front(); } __global__ void TestBackKernel(Span<float> _span) { _span.back(); } TEST(GPUSpan, FrontBack) { dh::safe_cuda(cudaSetDevice(0)); Span<float> s; auto lambda_test_front = [=]() { // make sure the termination happens inside this test. try { TestFrontKernel<<<1, 1>>>(s); dh::safe_cuda(cudaDeviceSynchronize()); dh::safe_cuda(cudaGetLastError()); } catch (dmlc::Error const& e) { std::terminate(); } }; EXPECT_DEATH(lambda_test_front(), ""); auto lambda_test_back = [=]() { try { TestBackKernel<<<1, 1>>>(s); dh::safe_cuda(cudaDeviceSynchronize()); dh::safe_cuda(cudaGetLastError()); } catch (dmlc::Error const& e) { std::terminate(); } }; EXPECT_DEATH(lambda_test_back(), ""); } __global__ void TestSubspanDynamicKernel(Span<float> _span) { _span.subspan(16, 0); } __global__ void TestSubspanStaticKernel(Span<float> _span) { _span.subspan<16>(); } TEST(GPUSpan, Subspan) { auto lambda_subspan_dynamic = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); TestSubspanDynamicKernel<<<1, 1>>>(span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_subspan_dynamic(), ""); std::string output = testing::internal::GetCapturedStdout(); auto lambda_subspan_static = []() { thrust::host_vector<float> h_vec (4); InitializeRange(h_vec.begin(), h_vec.end()); thrust::device_vector<float> d_vec (h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); Span<float> span (d_vec.data().get(), d_vec.size()); TestSubspanStaticKernel<<<1, 1>>>(span); }; testing::internal::CaptureStdout(); EXPECT_DEATH(lambda_subspan_static(), ""); output = testing::internal::GetCapturedStdout(); } TEST(GPUSpanIter, Construct) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterConstruct{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpanIter, Ref) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterRef{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpanIter, Calculate) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterCalculate{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpanIter, Compare) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestIterCompare{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, AsBytes) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestAsBytes{status.Data()}); ASSERT_EQ(status.Get(), 1); } TEST(GPUSpan, AsWritableBytes) { dh::safe_cuda(cudaSetDevice(0)); TestStatus status; dh::LaunchN(16, TestAsWritableBytes{status.Data()}); ASSERT_EQ(status.Get(), 1); } } // namespace common } // namespace xgboost
2fb2b1616a5ca42b195e21b4e466329d93cf1fd9.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Alexander Ocsa <[email protected]> * Copyright 2018 Felipe Aramburu <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include <iostream> #include <gdf/gdf.h> #include <gdf/cffi/functions.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <hip/hip_runtime.h> #include <tuple> #include "helper/utils.cuh" /* ============================================================================ Description : Compute gpu_comparison and apply_stencil of gdf_columns using Thrust on GPU ============================================================================ */ TEST(FilterOperationsTest, usage_example) { using LeftValueType = int16_t; using RightValueType = int16_t; int column_size = 10; int init_value = 10; int max_size = 4; gdf_comparison_operator gdf_operator = GDF_EQUALS; gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); std::cout << "Left" << std::endl; print_column<LeftValueType>(&lhs); std::cout << "Right" << std::endl; print_column<RightValueType>(&rhs); std::cout << "Output" << std::endl; print_column<int8_t>(&output); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); /// lhs.dtype === rhs.dtype gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } template <typename LeftValueType, typename RightValueType> void test_filterops_using_templates(gdf_comparison_operator gdf_operator = GDF_EQUALS) { //0, ..., 100, //100, 10000, 10000, 100000 for (int column_size = 0; column_size < 10; column_size += 1) { const int max_size = 8; for (int init_value = 0; init_value <= 1; init_value++) { gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 // lhs.null_count = 2; gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 // rhs.null_count = 1; gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); if (lhs.dtype == rhs.dtype ) { gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); } delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } } } TEST(FilterOperationsTest, WithInt8AndOthers) { test_filterops_using_templates<int8_t, int8_t>(); test_filterops_using_templates<int8_t, int16_t>(); test_filterops_using_templates<int8_t, int32_t>(); test_filterops_using_templates<int8_t, int64_t>(); test_filterops_using_templates<int8_t, float>(); test_filterops_using_templates<int8_t, double>(); } TEST(FilterOperationsTest, WithInt16AndOthers) { test_filterops_using_templates<int16_t, int8_t>(); test_filterops_using_templates<int16_t, int16_t>(); test_filterops_using_templates<int16_t, int32_t>(); test_filterops_using_templates<int16_t, int64_t>(); test_filterops_using_templates<int16_t, float>(); test_filterops_using_templates<int16_t, double>(); } TEST(FilterOperationsTest, WithInt32AndOthers) { test_filterops_using_templates<int32_t, int8_t>(); test_filterops_using_templates<int32_t, int16_t>(); test_filterops_using_templates<int32_t, int32_t>(); test_filterops_using_templates<int32_t, int64_t>(); test_filterops_using_templates<int32_t, float>(); test_filterops_using_templates<int32_t, double>(); } TEST(FilterOperationsTest, WithInt64AndOthers) { test_filterops_using_templates<int64_t, int8_t>(); test_filterops_using_templates<int64_t, int16_t>(); test_filterops_using_templates<int64_t, int32_t>(); test_filterops_using_templates<int64_t, int64_t>(); test_filterops_using_templates<int64_t, float>(); test_filterops_using_templates<int64_t, double>(); } TEST(FilterOperationsTest, WithFloat32AndOthers) { test_filterops_using_templates<float, int8_t>(); test_filterops_using_templates<float, int16_t>(); test_filterops_using_templates<float, int32_t>(); test_filterops_using_templates<float, int64_t>(); test_filterops_using_templates<float, float>(); test_filterops_using_templates<float, double>(); } TEST(FilterOperationsTest, WithFloat64AndOthers) { test_filterops_using_templates<double, int8_t>(); test_filterops_using_templates<double, int16_t>(); test_filterops_using_templates<double, int32_t>(); test_filterops_using_templates<double, int64_t>(); test_filterops_using_templates<double, float>(); test_filterops_using_templates<double, double>(); }
2fb2b1616a5ca42b195e21b4e466329d93cf1fd9.cu
/* * Copyright 2018 BlazingDB, Inc. * Copyright 2018 Alexander Ocsa <[email protected]> * Copyright 2018 Felipe Aramburu <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "gtest/gtest.h" #include <iostream> #include <gdf/gdf.h> #include <gdf/cffi/functions.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <cuda_runtime.h> #include <tuple> #include "helper/utils.cuh" /* ============================================================================ Description : Compute gpu_comparison and apply_stencil of gdf_columns using Thrust on GPU ============================================================================ */ TEST(FilterOperationsTest, usage_example) { using LeftValueType = int16_t; using RightValueType = int16_t; int column_size = 10; int init_value = 10; int max_size = 4; gdf_comparison_operator gdf_operator = GDF_EQUALS; gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); std::cout << "Left" << std::endl; print_column<LeftValueType>(&lhs); std::cout << "Right" << std::endl; print_column<RightValueType>(&rhs); std::cout << "Output" << std::endl; print_column<int8_t>(&output); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); /// lhs.dtype === rhs.dtype gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } template <typename LeftValueType, typename RightValueType> void test_filterops_using_templates(gdf_comparison_operator gdf_operator = GDF_EQUALS) { //0, ..., 100, //100, 10000, 10000, 100000 for (int column_size = 0; column_size < 10; column_size += 1) { const int max_size = 8; for (int init_value = 0; init_value <= 1; init_value++) { gdf_column lhs = gen_gdb_column<LeftValueType>(column_size, init_value); // 4, 2, 0 // lhs.null_count = 2; gdf_column rhs = gen_gdb_column<RightValueType>(column_size, 0.01 + max_size - init_value); // 0, 2, 4 // rhs.null_count = 1; gdf_column output = gen_gdb_column<int8_t>(column_size, 0); gdf_error error = gpu_comparison(&lhs, &rhs, &output, gdf_operator); EXPECT_TRUE(error == GDF_SUCCESS); check_column_for_comparison_operation<LeftValueType, RightValueType>(&lhs, &rhs, &output, gdf_operator); if (lhs.dtype == rhs.dtype ) { gpu_apply_stencil(&lhs, &output, &rhs); check_column_for_stencil_operation<LeftValueType, RightValueType>(&lhs, &output, &rhs); } delete_gdf_column(&lhs); delete_gdf_column(&rhs); delete_gdf_column(&output); } } } TEST(FilterOperationsTest, WithInt8AndOthers) { test_filterops_using_templates<int8_t, int8_t>(); test_filterops_using_templates<int8_t, int16_t>(); test_filterops_using_templates<int8_t, int32_t>(); test_filterops_using_templates<int8_t, int64_t>(); test_filterops_using_templates<int8_t, float>(); test_filterops_using_templates<int8_t, double>(); } TEST(FilterOperationsTest, WithInt16AndOthers) { test_filterops_using_templates<int16_t, int8_t>(); test_filterops_using_templates<int16_t, int16_t>(); test_filterops_using_templates<int16_t, int32_t>(); test_filterops_using_templates<int16_t, int64_t>(); test_filterops_using_templates<int16_t, float>(); test_filterops_using_templates<int16_t, double>(); } TEST(FilterOperationsTest, WithInt32AndOthers) { test_filterops_using_templates<int32_t, int8_t>(); test_filterops_using_templates<int32_t, int16_t>(); test_filterops_using_templates<int32_t, int32_t>(); test_filterops_using_templates<int32_t, int64_t>(); test_filterops_using_templates<int32_t, float>(); test_filterops_using_templates<int32_t, double>(); } TEST(FilterOperationsTest, WithInt64AndOthers) { test_filterops_using_templates<int64_t, int8_t>(); test_filterops_using_templates<int64_t, int16_t>(); test_filterops_using_templates<int64_t, int32_t>(); test_filterops_using_templates<int64_t, int64_t>(); test_filterops_using_templates<int64_t, float>(); test_filterops_using_templates<int64_t, double>(); } TEST(FilterOperationsTest, WithFloat32AndOthers) { test_filterops_using_templates<float, int8_t>(); test_filterops_using_templates<float, int16_t>(); test_filterops_using_templates<float, int32_t>(); test_filterops_using_templates<float, int64_t>(); test_filterops_using_templates<float, float>(); test_filterops_using_templates<float, double>(); } TEST(FilterOperationsTest, WithFloat64AndOthers) { test_filterops_using_templates<double, int8_t>(); test_filterops_using_templates<double, int16_t>(); test_filterops_using_templates<double, int32_t>(); test_filterops_using_templates<double, int64_t>(); test_filterops_using_templates<double, float>(); test_filterops_using_templates<double, double>(); }
3b1a6bf66c8795f9ad9f695b0450f8af550cced3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.numIterations1 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* Matrix transpose with Cuda * Host code. * This example transposes arbitrary-size matrices. It compares a naive * transpose kernel that suffers from non-coalesced writes, to an optimized * transpose with fully coalesced memory access and no bank conflicts. On * a G80 GPU, the optimized transpose can be more than 10x faster for large * matrices. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project //#include <cutil.h> #include "/opt/nvidia/cuda/common/inc/cutil.h" // includes, kernels #include <transpose_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold( float* reference, float* idata, const unsigned int size_x, const unsigned int size_y ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int numIterations = 5; // size of the matrix #ifdef __DEVICE_EMULATION__ const unsigned int size_x = 32; const unsigned int size_y = 128; #else // const unsigned int size_x = 64; // const unsigned int size_y = 64; // const unsigned int size_x = 256; // const unsigned int size_y = 4096; // const unsigned int size_x = 256; // const unsigned int size_y = 256; const unsigned int size_x = 4096; const unsigned int size_y = 4096; // const unsigned int size_x = 1024; // const unsigned int size_y = 1024; #endif // size of memory required to store the matrix const unsigned int mem_size = sizeof(float) * size_x * size_y; unsigned int timer; cutCreateTimer(&timer); CUT_DEVICE_INIT(argc, argv); // allocate host memory float* h_idata = (float*) malloc(mem_size); // initalize the memory srand(15235911); for( unsigned int i = 0; i < (size_x * size_y); ++i) { h_idata[i] = (float) i; // rand(); } // allocate device memory float* d_idata; float* d_odata; CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size)); // copy host memory to device CUDA_SAFE_CALL( hipMemcpy( d_idata, h_idata, mem_size, hipMemcpyHostToDevice) ); // setup execution parameters dim3 grid(size_x / BLOCK_DIM, size_y / BLOCK_DIM, 1); dim3 threads(BLOCK_DIM, BLOCK_DIM, 1); // warmup so we don't time CUDA startup hipLaunchKernelGGL(( transpose_naive), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y); hipLaunchKernelGGL(( transpose), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y); printf("Transposing a %d by %d matrix of floats...\n", size_x, size_y); // execute the kernel cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( transpose_naive), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y); } hipDeviceSynchronize(); cutStopTimer(timer); float naiveTime = cutGetTimerValue(timer); // execute the kernel cutResetTimer(timer); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { hipLaunchKernelGGL(( transpose), dim3(grid), dim3(threads) , 0, 0, d_odata, d_idata, size_x, size_y); } hipDeviceSynchronize(); cutStopTimer(timer); float optimizedTime = cutGetTimerValue(timer); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host float* h_odata = (float*) malloc(mem_size); CUDA_SAFE_CALL( hipMemcpy( h_odata, d_odata, mem_size, hipMemcpyDeviceToHost) ); // compute reference solution float* reference = (float*) malloc( mem_size); cutResetTimer(timer); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { computeGold( reference, h_idata, size_x, size_y); } hipDeviceSynchronize(); cutStopTimer(timer); float cpuTime = cutGetTimerValue(timer); printf("CPU transpose average time: %0.3f ms\n\n", cpuTime / numIterations); printf("Naive transpose average time: %0.3f ms\n", naiveTime / numIterations); printf("Optimized transpose average time: %0.3f ms\n", optimizedTime / numIterations); // check result CUTBoolean res = cutComparef( reference, h_odata, size_x * size_y); printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // cleanup memory free(h_idata); free(h_odata); free( reference); CUDA_SAFE_CALL(hipFree(d_idata)); CUDA_SAFE_CALL(hipFree(d_odata)); CUT_SAFE_CALL( cutDeleteTimer(timer)); }
3b1a6bf66c8795f9ad9f695b0450f8af550cced3.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.numIterations1 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* Matrix transpose with Cuda * Host code. * This example transposes arbitrary-size matrices. It compares a naive * transpose kernel that suffers from non-coalesced writes, to an optimized * transpose with fully coalesced memory access and no bank conflicts. On * a G80 GPU, the optimized transpose can be more than 10x faster for large * matrices. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project //#include <cutil.h> #include "/opt/nvidia/cuda/common/inc/cutil.h" // includes, kernels #include <transpose_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold( float* reference, float* idata, const unsigned int size_x, const unsigned int size_y ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int numIterations = 5; // size of the matrix #ifdef __DEVICE_EMULATION__ const unsigned int size_x = 32; const unsigned int size_y = 128; #else // const unsigned int size_x = 64; // const unsigned int size_y = 64; // const unsigned int size_x = 256; // const unsigned int size_y = 4096; // const unsigned int size_x = 256; // const unsigned int size_y = 256; const unsigned int size_x = 4096; const unsigned int size_y = 4096; // const unsigned int size_x = 1024; // const unsigned int size_y = 1024; #endif // size of memory required to store the matrix const unsigned int mem_size = sizeof(float) * size_x * size_y; unsigned int timer; cutCreateTimer(&timer); CUT_DEVICE_INIT(argc, argv); // allocate host memory float* h_idata = (float*) malloc(mem_size); // initalize the memory srand(15235911); for( unsigned int i = 0; i < (size_x * size_y); ++i) { h_idata[i] = (float) i; // rand(); } // allocate device memory float* d_idata; float* d_odata; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size)); // copy host memory to device CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) ); // setup execution parameters dim3 grid(size_x / BLOCK_DIM, size_y / BLOCK_DIM, 1); dim3 threads(BLOCK_DIM, BLOCK_DIM, 1); // warmup so we don't time CUDA startup transpose_naive<<< grid, threads >>>(d_odata, d_idata, size_x, size_y); transpose<<< grid, threads >>>(d_odata, d_idata, size_x, size_y); printf("Transposing a %d by %d matrix of floats...\n", size_x, size_y); // execute the kernel cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { transpose_naive<<< grid, threads >>>(d_odata, d_idata, size_x, size_y); } cudaThreadSynchronize(); cutStopTimer(timer); float naiveTime = cutGetTimerValue(timer); // execute the kernel cutResetTimer(timer); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { transpose<<< grid, threads >>>(d_odata, d_idata, size_x, size_y); } cudaThreadSynchronize(); cutStopTimer(timer); float optimizedTime = cutGetTimerValue(timer); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host float* h_odata = (float*) malloc(mem_size); CUDA_SAFE_CALL( cudaMemcpy( h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost) ); // compute reference solution float* reference = (float*) malloc( mem_size); cutResetTimer(timer); cutStartTimer(timer); for (int i = 0; i < numIterations; ++i) { computeGold( reference, h_idata, size_x, size_y); } cudaThreadSynchronize(); cutStopTimer(timer); float cpuTime = cutGetTimerValue(timer); printf("CPU transpose average time: %0.3f ms\n\n", cpuTime / numIterations); printf("Naive transpose average time: %0.3f ms\n", naiveTime / numIterations); printf("Optimized transpose average time: %0.3f ms\n", optimizedTime / numIterations); // check result CUTBoolean res = cutComparef( reference, h_odata, size_x * size_y); printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // cleanup memory free(h_idata); free(h_odata); free( reference); CUDA_SAFE_CALL(cudaFree(d_idata)); CUDA_SAFE_CALL(cudaFree(d_odata)); CUT_SAFE_CALL( cutDeleteTimer(timer)); }
b957b51ab980b73262aba0cd031d560e465aa957.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void field_summary( const int x_inner, const int y_inner, const int halo_depth, const double* volume, const double* density, const double* energy0, const double* u, double* vol_out, double* mass_out, double* ie_out, double* temp_out) { const int gid = threadIdx.x+blockDim.x*blockIdx.x; const int lid = threadIdx.x; __shared__ double vol_shared[BLOCK_SIZE]; __shared__ double mass_shared[BLOCK_SIZE]; __shared__ double ie_shared[BLOCK_SIZE]; __shared__ double temp_shared[BLOCK_SIZE]; vol_shared[lid] = 0.0; mass_shared[lid] = 0.0; ie_shared[lid] = 0.0; temp_shared[lid] = 0.0; if(gid < x_inner*y_inner) { const int x = x_inner + 2*halo_depth; const int col = gid % x_inner; const int row = gid / x_inner; const int off0 = halo_depth*(x + 1); const int index = off0 + col + row*x; double cell_vol = volume[index]; double cell_mass = cell_vol*density[index]; vol_shared[lid] = cell_vol; mass_shared[lid] = cell_mass; ie_shared[lid] = cell_mass*energy0[index]; temp_shared[lid] = cell_mass*u[index]; } __syncthreads(); #pragma unroll for(int ii = BLOCK_SIZE/2; ii > 0; ii /= 2) { if(lid < ii) { vol_shared[lid] += vol_shared[lid+ii]; mass_shared[lid] += mass_shared[lid+ii]; ie_shared[lid] += ie_shared[lid+ii]; temp_shared[lid] += temp_shared[lid+ii]; } __syncthreads(); } vol_out[blockIdx.x] = vol_shared[0]; mass_out[blockIdx.x] = mass_shared[0]; ie_out[blockIdx.x] = ie_shared[0]; temp_out[blockIdx.x] = temp_shared[0]; }
b957b51ab980b73262aba0cd031d560e465aa957.cu
#include "includes.h" __global__ void field_summary( const int x_inner, const int y_inner, const int halo_depth, const double* volume, const double* density, const double* energy0, const double* u, double* vol_out, double* mass_out, double* ie_out, double* temp_out) { const int gid = threadIdx.x+blockDim.x*blockIdx.x; const int lid = threadIdx.x; __shared__ double vol_shared[BLOCK_SIZE]; __shared__ double mass_shared[BLOCK_SIZE]; __shared__ double ie_shared[BLOCK_SIZE]; __shared__ double temp_shared[BLOCK_SIZE]; vol_shared[lid] = 0.0; mass_shared[lid] = 0.0; ie_shared[lid] = 0.0; temp_shared[lid] = 0.0; if(gid < x_inner*y_inner) { const int x = x_inner + 2*halo_depth; const int col = gid % x_inner; const int row = gid / x_inner; const int off0 = halo_depth*(x + 1); const int index = off0 + col + row*x; double cell_vol = volume[index]; double cell_mass = cell_vol*density[index]; vol_shared[lid] = cell_vol; mass_shared[lid] = cell_mass; ie_shared[lid] = cell_mass*energy0[index]; temp_shared[lid] = cell_mass*u[index]; } __syncthreads(); #pragma unroll for(int ii = BLOCK_SIZE/2; ii > 0; ii /= 2) { if(lid < ii) { vol_shared[lid] += vol_shared[lid+ii]; mass_shared[lid] += mass_shared[lid+ii]; ie_shared[lid] += ie_shared[lid+ii]; temp_shared[lid] += temp_shared[lid+ii]; } __syncthreads(); } vol_out[blockIdx.x] = vol_shared[0]; mass_out[blockIdx.x] = mass_shared[0]; ie_out[blockIdx.x] = ie_shared[0]; temp_out[blockIdx.x] = temp_shared[0]; }
e8e07051ce8a896af065eb3cb032064a83a2f049.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ///nvcc -o fil main.cu -O3 -m=64 -arch=compute_61 -code=sm_61 -Xptxas -allow-expensive-optimizations=true -Xptxas -v #include <iostream> #include <chrono> #include <fstream> #include <algorithm> #include <inttypes.h> #include <bitset> #include <iostream> #include <vector> #include <map> #include <iomanip> #include <fstream> #include <chrono> #include <mutex> #include <time.h> #include "lcg.h" #ifdef BOINC #include "boinc_api.h" #if defined _WIN32 || defined _WIN64 #include "boinc_win.h" #endif #endif uint64_t millis() {return (std::chrono::duration_cast< std::chrono::milliseconds >(std::chrono::system_clock::now().time_since_epoch())).count();} #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", hipGetErrorString(code), code, file, line); exit(code); } } // ===== LCG IMPLEMENTATION ===== // namespace java_lcg { //region Java LCG #define Random uint64_t #define RANDOM_MULTIPLIER 0x5DEECE66DULL #define RANDOM_ADDEND 0xBULL #define RANDOM_MASK ((1ULL << 48u) - 1) #define get_random(seed) ((Random)((seed ^ RANDOM_MULTIPLIER) & RANDOM_MASK)) __host__ __device__ __forceinline__ static int32_t random_next(Random *random, int bits) { *random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK; return (int32_t) (*random >> (48u - bits)); } __device__ __forceinline__ static int32_t random_next_int(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; if ((bound & m) == 0) { r = (int32_t) ((bound * (uint64_t) r) >> 31u); } else { for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); } return r; } __device__ __host__ __forceinline__ static int32_t random_next_int_nonpow(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); return r; } __host__ __device__ __forceinline__ static double next_double(Random *random) { return (double) ((((uint64_t) ((uint32_t) random_next(random, 26)) << 27u)) + random_next(random, 27)) / (double)(1ULL << 53); } __host__ __device__ __forceinline__ static uint64_t random_next_long (Random *random) { return (((uint64_t)random_next(random, 32)) << 32u) + (int32_t)random_next(random, 32); } __host__ __device__ __forceinline__ static void advance2(Random *random) { *random = (*random * 0xBB20B4600A69LLU + 0x40942DE6BALLU) & RANDOM_MASK; } __host__ __device__ __forceinline__ static void advance3759(Random *random) { *random = (*random * 0x6FE85C031F25LLU + 0x8F50ECFF899LLU) & RANDOM_MASK; } } using namespace java_lcg; namespace device_intrinsics { //region DEVICE INTRINSICS #define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) #define PXL_GLOBAL_PTR "l" #else #define PXL_GLOBAL_PTR "r" #endif DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l1(const void* const ptr) { asm("prefetch.local.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr) { asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l2(const void* const ptr) { asm("prefetch.local.L2 [%0];" : : PXL_GLOBAL_PTR(ptr)); } #if __CUDA__ < 10 #define __ldg(ptr) (*(ptr)) #endif } using namespace device_intrinsics; #define BLOCK_SIZE (128) //#define BLOCK_SIZE (128) #define WORK_SIZE_BITS 16 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) //#define SEEDS_PER_CALL 8000000 //Specifying where the (1 = dirt/grass, 0 = sand) is // This will match the seed 76261196830436 (not pack.png ofc) // Double match: 76261206560653 (almost 100% confirmed, sans very last bit of sand in first match) // Triple match: 76273693341674 (100% match) __constant__ int CHUNK_X = 6; __constant__ int CHUNK_X_2 = 6; __constant__ int CHUNK_X_3 = 5; #define CHUNK_Z -1 #define CHUNK_Z_2 -2 #define CHUNK_Z_3 -1 #define INNER_X_START 4 #define INNER_Z_START 0 #define INNER_X_END 13 #define INNER_Z_END 2 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{1,15,15,15,1,15,0,15,15,15}, {15,1,15,15,15,1,15,1,15,15}, {15,15,1,1,15,15,1,1,1,0}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; #define EARLY_RETURN (INNER_Z_END * 16 + INNER_X_END) #define INNER_X_START_2 0 #define INNER_Z_START_2 6 #define INNER_X_END_2 9 #define INNER_Z_END_2 15 __constant__ uint8_t DIRT_HEIGHT_2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1] = {{0,15,15,15,15,15,15,15,15,15}, {15,0,0,15,15,15,15,15,15,15}, {0,15,15,0,15,15,15,15,15,15}, {15,1,15,15,0,15,15,15,15,15}, {15,15,0,15,15,0,15,15,15,15}, {15,15,15,0,15,0,15,15,15,15}, {15,15,15,15,0,15,0,15,15,15}, {0,15,15,15,15,0,0,15,15,15}, {0,0,15,15,15,15,0,0,0,15}, {15,15,0,0,15,15,15,0,15,0}}; __constant__ double LocalNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; #define INNER_X_START_3 4 #define INNER_Z_START_3 0 #define INNER_X_END_3 15 #define INNER_Z_END_3 10 __constant__ uint8_t DIRT_HEIGHT_2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1] = {{1,1,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,15,0}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,15,15,15,15,15,15,15,15,15}, {15,15,0,15,15,15,15,15,15,15,15,15}, {15,15,1,15,15,15,15,15,15,15,15,15}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,15}}; __constant__ double LocalNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; /* //Old test: matches 104703450999364 #define CHUNK_X 2 #define CHUNK_Z 11 #define INNER_X_START 2 #define INNER_Z_START 0 #define INNER_X_END 11 #define INNER_Z_END 0 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{0,15,0,1,0,15,15,15,15,1}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; */ //The generation of the simplex layers and noise namespace noise { //region Simplex layer gen /* End of constant for simplex noise*/ struct Octave { double xo; double yo; double zo; uint8_t permutations[256]; }; __shared__ uint8_t permutations[256][BLOCK_SIZE]; #define getValue(array, index) array[index][threadIdx.x] #define setValue(array, index, value) array[index][threadIdx.x] = value __device__ static inline void setupNoise(const uint8_t nbOctaves, Random *random, Octave resultArray[]) { for (int j = 0; j < nbOctaves; ++j) { __prefetch_local_l2(&resultArray[j]); resultArray[j].xo = next_double(random) * 256.0; resultArray[j].yo = next_double(random) * 256.0; resultArray[j].zo = next_double(random) * 256.0; #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = random_next_int(random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); //uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, getValue(permutations,randomIndex)); setValue(permutations, randomIndex, v1); //} } #pragma unroll for(int c = 0; c<256;c++) { __prefetch_local_l1(&(resultArray[j].permutations[c+1])); resultArray[j].permutations[c] = getValue(permutations,c); } //resultArray[j].xo = xo; //resultArray[j].yo = yo; //resultArray[j].zo = zo; } } __device__ static inline void SkipNoiseGen(const uint8_t nbOctaves, Random* random) { for (int j = 0; j < nbOctaves; ++j) { lcg::advance<2*3>(*random); for(int index = 0; index<256; index++) { random_next_int(random, 256ull - index); } } } __device__ static inline double lerp(double x, double a, double b) { return a + x * (b - a); } __device__ static inline double grad(uint8_t hash, double x, double y, double z) { switch (hash & 0xFu) { case 0x0: return x + y; case 0x1: return -x + y; case 0x2: return x - y; case 0x3: return -x - y; case 0x4: return x + z; case 0x5: return -x + z; case 0x6: return x - z; case 0x7: return -x - z; case 0x8: return y + z; case 0x9: return -y + z; case 0xA: return y - z; case 0xB: return -y - z; case 0xC: return y + x; case 0xD: return -y + z; case 0xE: return y - x; case 0xF: return -y - z; default: return 0; // never happens } } __device__ static inline void generateNormalPermutations(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START && columnIndex%16 <= INNER_X_END && DIRT_HEIGHT_2D[columnIndex/16 - INNER_Z_START][columnIndex%16 - INNER_X_START] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } if (columnIndex == EARLY_RETURN) return; columnIndex++; } } } } __device__ static inline void generateNormalPermutations_2(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_2 && columnIndex%16 <= INNER_X_END_2 && DIRT_HEIGHT_2D_2[columnIndex/16 - INNER_Z_START_2][columnIndex%16 - INNER_X_START_2] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNormalPermutations_3(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_3 && columnIndex%16 <= INNER_X_END_3 && DIRT_HEIGHT_2D_3[columnIndex/16 - INNER_Z_START_3][columnIndex%16 - INNER_X_START_3] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNoise(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_2(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_2(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_3(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_3(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } } using namespace noise; __device__ static inline bool match(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[EARLY_RETURN+1]; #pragma unroll for(uint16_t i = 0; i<EARLY_RETURN+1;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise(heightField, (double) (CHUNK_X <<4), (double) (CHUNK_Z<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for(uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { if (DIRT_HEIGHT_2D[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START + x + (INNER_Z_START + z) * 16] + LocalNoise2D[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D[z][x]) return false; } } } return true; } __device__ static inline bool match2(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_2(heightField, (double) (CHUNK_X_2 <<4), (double) (CHUNK_Z_2<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { if (DIRT_HEIGHT_2D_2[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_2 + x + (INNER_Z_START_2 + z) * 16] + LocalNoise2D_2[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_2[z][x]) return false; } } } return true; } __device__ static inline bool match3(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_3(heightField, (double) (CHUNK_X_3 <<4), (double) (CHUNK_Z_3<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { if (DIRT_HEIGHT_2D_3[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_3 + x + (INNER_Z_START_3 + z) * 16] + LocalNoise2D_3[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_3[z][x]) return false; } } } return true; } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck(uint64_t offset, uint64_t* buffer, uint32_t* counter) { uint64_t seed = blockIdx.x * blockDim.x + threadIdx.x + offset; if (match(seed)) { buffer[atomicAdd(counter,1)] = seed; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck2(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; if (!match2(buffer[seedIndex])) { buffer[seedIndex] = 0; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck3(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; uint64_t seed = buffer[seedIndex]; if (seed==0) return; if (!match3(seed)) { buffer[seedIndex] = 0; } } std::ifstream inSeeds; std::ofstream outSeeds; uint64_t* buffer; uint32_t* counter; double getNextDoubleForLocNoise(int x, int z); void setup(int gpu_device) { hipSetDevice(gpu_device); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); double locNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for (uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { locNoise2D[z][x] = getNextDoubleForLocNoise((CHUNK_X<<4) + INNER_X_START + x, (CHUNK_Z<<4) + INNER_Z_START + z); } } GPU_ASSERT(hipMemcpyToSymbol(LocalNoise2D, &locNoise2D, sizeof(locNoise2D))); GPU_ASSERT(hipPeekAtLastError()); double locNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { locNoise2D_2[z][x] = getNextDoubleForLocNoise((CHUNK_X_2<<4) + INNER_X_START_2 + x, (CHUNK_Z_2<<4) + INNER_Z_START_2 + z); } } GPU_ASSERT(hipMemcpyToSymbol(LocalNoise2D_2, &locNoise2D_2, sizeof(locNoise2D_2))); GPU_ASSERT(hipPeekAtLastError()); double locNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { locNoise2D_3[z][x] = getNextDoubleForLocNoise((CHUNK_X_3<<4) + INNER_X_START_3 + x, (CHUNK_Z_3<<4) + INNER_Z_START_3 + z); } } GPU_ASSERT(hipMemcpyToSymbol(LocalNoise2D_3, &locNoise2D_3, sizeof(locNoise2D_3))); GPU_ASSERT(hipPeekAtLastError()); } time_t elapsed_chkpoint = 0; struct checkpoint_vars { unsigned long long offset; time_t elapsed_chkpoint; }; int main(int argc, char *argv[]) { int gpu_device = 0; uint64_t START; uint64_t offsetStart = 0; uint64_t COUNT; int x = 116; int chunkxCPU = 6; int chunkxCPU2 = 6; int chunkxCPU3 = 5; #ifdef BOINC BOINC_OPTIONS options; boinc_options_defaults(options); options.normal_thread_priority = true; boinc_init_options(&options); #endif for (int i = 1; i < argc; i += 2) { const char *param = argv[i]; if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) { gpu_device = atoi(argv[i + 1]); } else if (strcmp(param, "-s") == 0 || strcmp(param, "--start") == 0) { sscanf(argv[i + 1], "%llu", &START); } else if (strcmp(param, "-c") == 0 || strcmp(param, "--count") == 0) { sscanf(argv[i + 1], "%llu", &COUNT); } else if (strcmp(param, "-x") == 0){ sscanf(argv[i + 1], "%i", &x); } else { fprintf(stderr,"Unknown parameter: %s\n", param); } } x = (x>>4) - 7; chunkxCPU += x; chunkxCPU2 += x; chunkxCPU3 += x; GPU_ASSERT(hipMemcpyToSymbol(CHUNK_X, &chunkxCPU, sizeof(CHUNK_X))); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipMemcpyToSymbol(CHUNK_X_2, &chunkxCPU2, sizeof(CHUNK_X_2))); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipMemcpyToSymbol(CHUNK_X_3, &chunkxCPU3, sizeof(CHUNK_X_3))); GPU_ASSERT(hipPeekAtLastError()); FILE *checkpoint_data = boinc_fopen("packpoint.txt", "rb"); if(!checkpoint_data){ fprintf(stderr, "No checkpoint to load\n"); } else{ #ifdef BOINC boinc_begin_critical_section(); #endif struct checkpoint_vars data_store; fread(&data_store, sizeof(data_store), 1, checkpoint_data); offsetStart = data_store.offset; elapsed_chkpoint = data_store.elapsed_chkpoint; fprintf(stderr, "Checkpoint loaded, task time %d s, seed pos: %llu\n", elapsed_chkpoint, START); fclose(checkpoint_data); #ifdef BOINC boinc_end_critical_section(); #endif } #ifdef BOINC APP_INIT_DATA aid; boinc_get_init_data(aid); if (aid.gpu_device_num >= 0) { gpu_device = aid.gpu_device_num; fprintf(stderr,"boinc gpu %i gpuindex: %i \n", aid.gpu_device_num, gpu_device); } else { fprintf(stderr,"stndalone gpuindex %i \n", gpu_device); } #endif setup(gpu_device); uint64_t seedCount = COUNT; std::cout << "Processing " << seedCount << " seeds" << std::endl; outSeeds.open("seedsout"); GPU_ASSERT(hipMallocManaged(&buffer, sizeof(*buffer) * SEEDS_PER_CALL)); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipMallocManaged(&counter, sizeof(*counter))); GPU_ASSERT(hipPeekAtLastError()); time_t start_time = time(NULL); int outCount = 0; int checkpointTemp = 0; for(uint64_t offset =offsetStart;offset<seedCount;offset+=SEEDS_PER_CALL) { // Normal filtering time_t elapsed = time(NULL) - start_time; double frac = (double) offset / (double)(seedCount); #ifdef BOINC boinc_fraction_done(frac); #endif *counter = 0; hipLaunchKernelGGL(( tempCheck), dim3(1ULL<<WORK_SIZE_BITS),dim3(BLOCK_SIZE), 0, 0, 0, 0, START + offset, buffer,counter); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); hipLaunchKernelGGL(( tempCheck2), dim3(((*counter)/BLOCK_SIZE)+1),dim3(BLOCK_SIZE), 0, 0, *counter, buffer); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); hipLaunchKernelGGL(( tempCheck3), dim3(((*counter)/BLOCK_SIZE)+1),dim3(BLOCK_SIZE), 0, 0, *counter, buffer); GPU_ASSERT(hipPeekAtLastError()); GPU_ASSERT(hipDeviceSynchronize()); for(int i=0;i<*counter;i++) { if (buffer[i]!=0) { uint64_t seed = buffer[i]; std::cout << "3rd level seed found: " << seed << std::endl; outSeeds << seed << std::endl; outCount++; } } if(checkpointTemp >= 180000000 || boinc_time_to_checkpoint()){ #ifdef BOINC boinc_begin_critical_section(); // Boinc should not interrupt this #endif // Checkpointing section below boinc_delete_file("packpoint.txt"); // Don't touch, same func as normal fdel FILE *checkpoint_data = boinc_fopen("packpoint.txt", "wb"); struct checkpoint_vars data_store; data_store.offset = offset; data_store.elapsed_chkpoint = elapsed_chkpoint + elapsed; fwrite(&data_store, sizeof(data_store), 1, checkpoint_data); fclose(checkpoint_data); checkpointTemp = 0; #ifdef BOINC boinc_end_critical_section(); boinc_checkpoint_completed(); // Checkpointing completed #endif } checkpointTemp += SEEDS_PER_CALL; std::cout << "Seeds left:" << (((int64_t)seedCount-offset)-SEEDS_PER_CALL) << std::endl; } std::cout << "Done processing" << std::endl; #ifdef BOINC boinc_begin_critical_section(); #endif time_t elapsed = time(NULL) - start_time; double done = (double)COUNT / 1000000.0; double speed = done / (double) elapsed; fprintf(stderr, "\nSpeed: %.2lfm/s\n", speed ); fprintf(stderr, "Done\n"); fprintf(stderr, "Processed: %llu seeds in %.2lfs seconds\n", COUNT, (double) elapsed_chkpoint + (double) elapsed ); fprintf(stderr, "Have %llu output seeds.\n", outCount); fflush(stderr); outSeeds.close(); boinc_delete_file("packpoint.txt"); #ifdef BOINC boinc_end_critical_section(); #endif boinc_finish(0); } double getNextDoubleForLocNoise(int x, int z) { Random rand = get_random((((int64_t)x) >> 4) * 341873128712LL + (((int64_t)z) >> 4) * 132897987541LL); for (int dx = 0; dx < 16; dx++) { for (int dz = 0; dz < 16; dz++) { if (dx == (x & 15) && dz == (z & 15)) { //advance2(&rand); //advance2(&rand); return next_double(&rand); } advance2(&rand); advance2(&rand); advance2(&rand); for(int k1 = 127; k1 >= 0; k1--) { random_next_int_nonpow(&rand,5); } //for (int i = 0; i < 67; i++) { // advance2(&rand); //} } } exit(-99); }
e8e07051ce8a896af065eb3cb032064a83a2f049.cu
///nvcc -o fil main.cu -O3 -m=64 -arch=compute_61 -code=sm_61 -Xptxas -allow-expensive-optimizations=true -Xptxas -v #include <iostream> #include <chrono> #include <fstream> #include <algorithm> #include <inttypes.h> #include <bitset> #include <iostream> #include <vector> #include <map> #include <iomanip> #include <fstream> #include <chrono> #include <mutex> #include <time.h> #include "lcg.h" #ifdef BOINC #include "boinc_api.h" #if defined _WIN32 || defined _WIN64 #include "boinc_win.h" #endif #endif uint64_t millis() {return (std::chrono::duration_cast< std::chrono::milliseconds >(std::chrono::system_clock::now().time_since_epoch())).count();} #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line); exit(code); } } // ===== LCG IMPLEMENTATION ===== // namespace java_lcg { //region Java LCG #define Random uint64_t #define RANDOM_MULTIPLIER 0x5DEECE66DULL #define RANDOM_ADDEND 0xBULL #define RANDOM_MASK ((1ULL << 48u) - 1) #define get_random(seed) ((Random)((seed ^ RANDOM_MULTIPLIER) & RANDOM_MASK)) __host__ __device__ __forceinline__ static int32_t random_next(Random *random, int bits) { *random = (*random * RANDOM_MULTIPLIER + RANDOM_ADDEND) & RANDOM_MASK; return (int32_t) (*random >> (48u - bits)); } __device__ __forceinline__ static int32_t random_next_int(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; if ((bound & m) == 0) { r = (int32_t) ((bound * (uint64_t) r) >> 31u); } else { for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); } return r; } __device__ __host__ __forceinline__ static int32_t random_next_int_nonpow(Random *random, const uint16_t bound) { int32_t r = random_next(random, 31); const uint16_t m = bound - 1u; for (int32_t u = r; u - (r = u % bound) + m < 0; u = random_next(random, 31)); return r; } __host__ __device__ __forceinline__ static double next_double(Random *random) { return (double) ((((uint64_t) ((uint32_t) random_next(random, 26)) << 27u)) + random_next(random, 27)) / (double)(1ULL << 53); } __host__ __device__ __forceinline__ static uint64_t random_next_long (Random *random) { return (((uint64_t)random_next(random, 32)) << 32u) + (int32_t)random_next(random, 32); } __host__ __device__ __forceinline__ static void advance2(Random *random) { *random = (*random * 0xBB20B4600A69LLU + 0x40942DE6BALLU) & RANDOM_MASK; } __host__ __device__ __forceinline__ static void advance3759(Random *random) { *random = (*random * 0x6FE85C031F25LLU + 0x8F50ECFF899LLU) & RANDOM_MASK; } } using namespace java_lcg; namespace device_intrinsics { //region DEVICE INTRINSICS #define DEVICE_STATIC_INTRINSIC_QUALIFIERS static __device__ __forceinline__ #if (defined(_MSC_VER) && defined(_WIN64)) || defined(__LP64__) #define PXL_GLOBAL_PTR "l" #else #define PXL_GLOBAL_PTR "r" #endif DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l1(const void* const ptr) { asm("prefetch.local.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_global_uniform(const void* const ptr) { asm("prefetchu.L1 [%0];" : : PXL_GLOBAL_PTR(ptr)); } DEVICE_STATIC_INTRINSIC_QUALIFIERS void __prefetch_local_l2(const void* const ptr) { asm("prefetch.local.L2 [%0];" : : PXL_GLOBAL_PTR(ptr)); } #if __CUDA__ < 10 #define __ldg(ptr) (*(ptr)) #endif } using namespace device_intrinsics; #define BLOCK_SIZE (128) //#define BLOCK_SIZE (128) #define WORK_SIZE_BITS 16 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) //#define SEEDS_PER_CALL 8000000 //Specifying where the (1 = dirt/grass, 0 = sand) is // This will match the seed 76261196830436 (not pack.png ofc) // Double match: 76261206560653 (almost 100% confirmed, sans very last bit of sand in first match) // Triple match: 76273693341674 (100% match) __constant__ int CHUNK_X = 6; __constant__ int CHUNK_X_2 = 6; __constant__ int CHUNK_X_3 = 5; #define CHUNK_Z -1 #define CHUNK_Z_2 -2 #define CHUNK_Z_3 -1 #define INNER_X_START 4 #define INNER_Z_START 0 #define INNER_X_END 13 #define INNER_Z_END 2 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{1,15,15,15,1,15,0,15,15,15}, {15,1,15,15,15,1,15,1,15,15}, {15,15,1,1,15,15,1,1,1,0}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; #define EARLY_RETURN (INNER_Z_END * 16 + INNER_X_END) #define INNER_X_START_2 0 #define INNER_Z_START_2 6 #define INNER_X_END_2 9 #define INNER_Z_END_2 15 __constant__ uint8_t DIRT_HEIGHT_2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1] = {{0,15,15,15,15,15,15,15,15,15}, {15,0,0,15,15,15,15,15,15,15}, {0,15,15,0,15,15,15,15,15,15}, {15,1,15,15,0,15,15,15,15,15}, {15,15,0,15,15,0,15,15,15,15}, {15,15,15,0,15,0,15,15,15,15}, {15,15,15,15,0,15,0,15,15,15}, {0,15,15,15,15,0,0,15,15,15}, {0,0,15,15,15,15,0,0,0,15}, {15,15,0,0,15,15,15,0,15,0}}; __constant__ double LocalNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; #define INNER_X_START_3 4 #define INNER_Z_START_3 0 #define INNER_X_END_3 15 #define INNER_Z_END_3 10 __constant__ uint8_t DIRT_HEIGHT_2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1] = {{1,1,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,0,15}, {15,15,15,15,15,15,15,15,15,15,15,0}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,0}, {15,15,15,15,15,15,15,15,15,15,15,15}, {15,15,0,15,15,15,15,15,15,15,15,15}, {15,15,1,15,15,15,15,15,15,15,15,15}, {15,15,15,1,15,15,15,15,15,15,15,15}, {15,15,15,0,15,15,15,15,15,15,15,15}}; __constant__ double LocalNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; /* //Old test: matches 104703450999364 #define CHUNK_X 2 #define CHUNK_Z 11 #define INNER_X_START 2 #define INNER_Z_START 0 #define INNER_X_END 11 #define INNER_Z_END 0 __constant__ uint8_t DIRT_HEIGHT_2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1] = {{0,15,0,1,0,15,15,15,15,1}}; __constant__ double LocalNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; */ //The generation of the simplex layers and noise namespace noise { //region Simplex layer gen /* End of constant for simplex noise*/ struct Octave { double xo; double yo; double zo; uint8_t permutations[256]; }; __shared__ uint8_t permutations[256][BLOCK_SIZE]; #define getValue(array, index) array[index][threadIdx.x] #define setValue(array, index, value) array[index][threadIdx.x] = value __device__ static inline void setupNoise(const uint8_t nbOctaves, Random *random, Octave resultArray[]) { for (int j = 0; j < nbOctaves; ++j) { __prefetch_local_l2(&resultArray[j]); resultArray[j].xo = next_double(random) * 256.0; resultArray[j].yo = next_double(random) * 256.0; resultArray[j].zo = next_double(random) * 256.0; #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = random_next_int(random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); //uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, getValue(permutations,randomIndex)); setValue(permutations, randomIndex, v1); //} } #pragma unroll for(int c = 0; c<256;c++) { __prefetch_local_l1(&(resultArray[j].permutations[c+1])); resultArray[j].permutations[c] = getValue(permutations,c); } //resultArray[j].xo = xo; //resultArray[j].yo = yo; //resultArray[j].zo = zo; } } __device__ static inline void SkipNoiseGen(const uint8_t nbOctaves, Random* random) { for (int j = 0; j < nbOctaves; ++j) { lcg::advance<2*3>(*random); for(int index = 0; index<256; index++) { random_next_int(random, 256ull - index); } } } __device__ static inline double lerp(double x, double a, double b) { return a + x * (b - a); } __device__ static inline double grad(uint8_t hash, double x, double y, double z) { switch (hash & 0xFu) { case 0x0: return x + y; case 0x1: return -x + y; case 0x2: return x - y; case 0x3: return -x - y; case 0x4: return x + z; case 0x5: return -x + z; case 0x6: return x - z; case 0x7: return -x - z; case 0x8: return y + z; case 0x9: return -y + z; case 0xA: return y - z; case 0xB: return -y - z; case 0xC: return y + x; case 0xD: return -y + z; case 0xE: return y - x; case 0xF: return -y - z; default: return 0; // never happens } } __device__ static inline void generateNormalPermutations(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START && columnIndex%16 <= INNER_X_END && DIRT_HEIGHT_2D[columnIndex/16 - INNER_Z_START][columnIndex%16 - INNER_X_START] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } if (columnIndex == EARLY_RETURN) return; columnIndex++; } } } } __device__ static inline void generateNormalPermutations_2(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_2 && columnIndex%16 <= INNER_X_END_2 && DIRT_HEIGHT_2D_2[columnIndex/16 - INNER_Z_START_2][columnIndex%16 - INNER_X_START_2] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNormalPermutations_3(double *buffer, double x, double y, double z, int sizeX, int sizeY, int sizeZ, double noiseFactorX, double noiseFactorY, double noiseFactorZ, double octaveSize, Random* random) { double xo = lcg::next_double(*random) * 256.0; double yo = lcg::next_double(*random) * 256.0; double zo = lcg::next_double(*random) * 256.0; //Setup the permutation fresh xD #pragma unroll for(int w = 0; w<256; w++) { setValue(permutations, w, w); } for(int index = 0; index<256; index++) { uint32_t randomIndex = lcg::dynamic_next_int(*random, 256ull - index) + index; //if (randomIndex != index) { // swap uint8_t v1 = getValue(permutations,index); uint8_t v2 = getValue(permutations,randomIndex); setValue(permutations,index, v2); setValue(permutations, randomIndex, v1); //} } double octaveWidth = 1.0 / octaveSize; int32_t i2 = -1; double x1 = 0.0; double x2 = 0.0; double xx1 = 0.0; double xx2 = 0.0; double t; double w; int columnIndex = 0; for (int X = 0; X < sizeX; X++) { double xCoord = (x + (double) X) * noiseFactorX + xo; auto clampedXcoord = (int32_t) xCoord; if (xCoord < (double) clampedXcoord) { clampedXcoord--; } auto xBottoms = (uint8_t) ((uint32_t) clampedXcoord & 0xffu); xCoord -= clampedXcoord; t = xCoord * 6 - 15; w = (xCoord * t + 10); double fadeX = xCoord * xCoord * xCoord * w; for (int Z = 0; Z < sizeZ; Z++) { double zCoord = zo; auto clampedZCoord = (int32_t) zCoord; if (zCoord < (double) clampedZCoord) { clampedZCoord--; } auto zBottoms = (uint8_t) ((uint32_t) clampedZCoord & 0xffu); zCoord -= clampedZCoord; t = zCoord * 6 - 15; w = (zCoord * t + 10); double fadeZ = zCoord * zCoord * zCoord * w; for (int Y = 0; Y < sizeY; Y++) { double yCoords = (y + (double) Y) * noiseFactorY + yo; auto clampedYCoords = (int32_t) yCoords; if (yCoords < (double) clampedYCoords) { clampedYCoords--; } auto yBottoms = (uint8_t) ((uint32_t) clampedYCoords & 0xffu); yCoords -= clampedYCoords; t = yCoords * 6 - 15; w = yCoords * t + 10; double fadeY = yCoords * yCoords * yCoords * w; // ZCoord if (Y == 0 || yBottoms != i2) { // this is wrong on so many levels, same ybottoms doesnt mean x and z were the same... i2 = yBottoms; uint16_t k2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms)& 0xffu)) + zBottoms; uint16_t l2 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)(xBottoms& 0xffu)) + yBottoms + 1u )& 0xffu)) + zBottoms; uint16_t k3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms )& 0xffu)) + zBottoms; uint16_t l3 = getValue(permutations,(uint8_t)((uint16_t)(getValue(permutations,(uint8_t)((xBottoms + 1u)& 0xffu)) + yBottoms + 1u) & 0xffu)) + zBottoms; x1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(k2& 0xffu)), xCoord, yCoords, zCoord), grad(getValue(permutations,(uint8_t)(k3& 0xffu)), xCoord - 1.0, yCoords, zCoord)); x2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)(l2& 0xffu)), xCoord, yCoords - 1.0, zCoord), grad(getValue(permutations,(uint8_t)(l3& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord)); xx1 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((k2+1u)& 0xffu)), xCoord, yCoords, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((k3+1u)& 0xffu)), xCoord - 1.0, yCoords, zCoord - 1.0)); xx2 = lerp(fadeX, grad(getValue(permutations,(uint8_t)((l2+1u)& 0xffu)), xCoord, yCoords - 1.0, zCoord - 1.0), grad(getValue(permutations,(uint8_t)((l3+1u)& 0xffu)), xCoord - 1.0, yCoords - 1.0, zCoord - 1.0)); } if (columnIndex%16 >= INNER_X_START_3 && columnIndex%16 <= INNER_X_END_3 && DIRT_HEIGHT_2D_3[columnIndex/16 - INNER_Z_START_3][columnIndex%16 - INNER_X_START_3] != 15){ double y1 = lerp(fadeY, x1, x2); double y2 = lerp(fadeY, xx1, xx2); (buffer)[columnIndex] = (buffer)[columnIndex] + lerp(fadeZ, y1, y2) * octaveWidth; } columnIndex++; } } } } __device__ static inline void generateNoise(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_2(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_2(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } __device__ static inline void generateNoise_3(double *buffer, double chunkX, double chunkY, double chunkZ, int sizeX, int sizeY, int sizeZ, double offsetX, double offsetY, double offsetZ, Random random, int nbOctaves) { //memset(buffer, 0, sizeof(double) * sizeX * sizeZ * sizeY); double octavesFactor = 1.0; for (int octave = 0; octave < nbOctaves; octave++) { generateNormalPermutations_3(buffer, chunkX, chunkY, chunkZ, sizeX, sizeY, sizeZ, offsetX * octavesFactor, offsetY * octavesFactor, offsetZ * octavesFactor, octavesFactor, &random); octavesFactor /= 2.0; } } } using namespace noise; __device__ static inline bool match(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[EARLY_RETURN+1]; #pragma unroll for(uint16_t i = 0; i<EARLY_RETURN+1;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise(heightField, (double) (CHUNK_X <<4), (double) (CHUNK_Z<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for(uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { if (DIRT_HEIGHT_2D[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START + x + (INNER_Z_START + z) * 16] + LocalNoise2D[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D[z][x]) return false; } } } return true; } __device__ static inline bool match2(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_2(heightField, (double) (CHUNK_X_2 <<4), (double) (CHUNK_Z_2<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { if (DIRT_HEIGHT_2D_2[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_2 + x + (INNER_Z_START_2 + z) * 16] + LocalNoise2D_2[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_2[z][x]) return false; } } } return true; } __device__ static inline bool match3(uint64_t seed) { seed = get_random(seed); //SkipNoiseGen(16+16+8, &seed); lcg::advance<10480>(seed);//VERY VERY DODGY double heightField[256]; #pragma unroll for(uint16_t i = 0; i<256;i++) heightField[i] = 0; const double noiseFactor = 0.03125; generateNoise_3(heightField, (double) (CHUNK_X_3 <<4), (double) (CHUNK_Z_3<<4), 0.0, 16, 16, 1, noiseFactor, noiseFactor, 1.0, seed, 4); for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for(uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { if (DIRT_HEIGHT_2D_3[z][x] != 15) { uint8_t dirty = heightField[INNER_X_START_3 + x + (INNER_Z_START_3 + z) * 16] + LocalNoise2D_3[z][x] * 0.2 > 0.0 ? 0 : 1; if (dirty!=(int8_t)DIRT_HEIGHT_2D_3[z][x]) return false; } } } return true; } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck(uint64_t offset, uint64_t* buffer, uint32_t* counter) { uint64_t seed = blockIdx.x * blockDim.x + threadIdx.x + offset; if (match(seed)) { buffer[atomicAdd(counter,1)] = seed; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck2(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; if (!match2(buffer[seedIndex])) { buffer[seedIndex] = 0; } } __global__ __launch_bounds__(BLOCK_SIZE,2) static void tempCheck3(uint32_t count, uint64_t* buffer) { uint64_t seedIndex = blockIdx.x * blockDim.x + threadIdx.x; if (seedIndex>=count) return; uint64_t seed = buffer[seedIndex]; if (seed==0) return; if (!match3(seed)) { buffer[seedIndex] = 0; } } std::ifstream inSeeds; std::ofstream outSeeds; uint64_t* buffer; uint32_t* counter; double getNextDoubleForLocNoise(int x, int z); void setup(int gpu_device) { cudaSetDevice(gpu_device); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); double locNoise2D[INNER_Z_END - INNER_Z_START + 1][INNER_X_END - INNER_X_START + 1]; for(uint8_t z = 0; z < INNER_Z_END - INNER_Z_START + 1; z++) { for (uint8_t x = 0; x < INNER_X_END - INNER_X_START + 1; x++) { locNoise2D[z][x] = getNextDoubleForLocNoise((CHUNK_X<<4) + INNER_X_START + x, (CHUNK_Z<<4) + INNER_Z_START + z); } } GPU_ASSERT(cudaMemcpyToSymbol(LocalNoise2D, &locNoise2D, sizeof(locNoise2D))); GPU_ASSERT(cudaPeekAtLastError()); double locNoise2D_2[INNER_Z_END_2 - INNER_Z_START_2 + 1][INNER_X_END_2 - INNER_X_START_2 + 1]; for(uint8_t z = 0; z < INNER_Z_END_2 - INNER_Z_START_2 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_2 - INNER_X_START_2 + 1; x++) { locNoise2D_2[z][x] = getNextDoubleForLocNoise((CHUNK_X_2<<4) + INNER_X_START_2 + x, (CHUNK_Z_2<<4) + INNER_Z_START_2 + z); } } GPU_ASSERT(cudaMemcpyToSymbol(LocalNoise2D_2, &locNoise2D_2, sizeof(locNoise2D_2))); GPU_ASSERT(cudaPeekAtLastError()); double locNoise2D_3[INNER_Z_END_3 - INNER_Z_START_3 + 1][INNER_X_END_3 - INNER_X_START_3 + 1]; for(uint8_t z = 0; z < INNER_Z_END_3 - INNER_Z_START_3 + 1; z++) { for (uint8_t x = 0; x < INNER_X_END_3 - INNER_X_START_3 + 1; x++) { locNoise2D_3[z][x] = getNextDoubleForLocNoise((CHUNK_X_3<<4) + INNER_X_START_3 + x, (CHUNK_Z_3<<4) + INNER_Z_START_3 + z); } } GPU_ASSERT(cudaMemcpyToSymbol(LocalNoise2D_3, &locNoise2D_3, sizeof(locNoise2D_3))); GPU_ASSERT(cudaPeekAtLastError()); } time_t elapsed_chkpoint = 0; struct checkpoint_vars { unsigned long long offset; time_t elapsed_chkpoint; }; int main(int argc, char *argv[]) { int gpu_device = 0; uint64_t START; uint64_t offsetStart = 0; uint64_t COUNT; int x = 116; int chunkxCPU = 6; int chunkxCPU2 = 6; int chunkxCPU3 = 5; #ifdef BOINC BOINC_OPTIONS options; boinc_options_defaults(options); options.normal_thread_priority = true; boinc_init_options(&options); #endif for (int i = 1; i < argc; i += 2) { const char *param = argv[i]; if (strcmp(param, "-d") == 0 || strcmp(param, "--device") == 0) { gpu_device = atoi(argv[i + 1]); } else if (strcmp(param, "-s") == 0 || strcmp(param, "--start") == 0) { sscanf(argv[i + 1], "%llu", &START); } else if (strcmp(param, "-c") == 0 || strcmp(param, "--count") == 0) { sscanf(argv[i + 1], "%llu", &COUNT); } else if (strcmp(param, "-x") == 0){ sscanf(argv[i + 1], "%i", &x); } else { fprintf(stderr,"Unknown parameter: %s\n", param); } } x = (x>>4) - 7; chunkxCPU += x; chunkxCPU2 += x; chunkxCPU3 += x; GPU_ASSERT(cudaMemcpyToSymbol(CHUNK_X, &chunkxCPU, sizeof(CHUNK_X))); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMemcpyToSymbol(CHUNK_X_2, &chunkxCPU2, sizeof(CHUNK_X_2))); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMemcpyToSymbol(CHUNK_X_3, &chunkxCPU3, sizeof(CHUNK_X_3))); GPU_ASSERT(cudaPeekAtLastError()); FILE *checkpoint_data = boinc_fopen("packpoint.txt", "rb"); if(!checkpoint_data){ fprintf(stderr, "No checkpoint to load\n"); } else{ #ifdef BOINC boinc_begin_critical_section(); #endif struct checkpoint_vars data_store; fread(&data_store, sizeof(data_store), 1, checkpoint_data); offsetStart = data_store.offset; elapsed_chkpoint = data_store.elapsed_chkpoint; fprintf(stderr, "Checkpoint loaded, task time %d s, seed pos: %llu\n", elapsed_chkpoint, START); fclose(checkpoint_data); #ifdef BOINC boinc_end_critical_section(); #endif } #ifdef BOINC APP_INIT_DATA aid; boinc_get_init_data(aid); if (aid.gpu_device_num >= 0) { gpu_device = aid.gpu_device_num; fprintf(stderr,"boinc gpu %i gpuindex: %i \n", aid.gpu_device_num, gpu_device); } else { fprintf(stderr,"stndalone gpuindex %i \n", gpu_device); } #endif setup(gpu_device); uint64_t seedCount = COUNT; std::cout << "Processing " << seedCount << " seeds" << std::endl; outSeeds.open("seedsout"); GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(*buffer) * SEEDS_PER_CALL)); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMallocManaged(&counter, sizeof(*counter))); GPU_ASSERT(cudaPeekAtLastError()); time_t start_time = time(NULL); int outCount = 0; int checkpointTemp = 0; for(uint64_t offset =offsetStart;offset<seedCount;offset+=SEEDS_PER_CALL) { // Normal filtering time_t elapsed = time(NULL) - start_time; double frac = (double) offset / (double)(seedCount); #ifdef BOINC boinc_fraction_done(frac); #endif *counter = 0; tempCheck<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(START + offset, buffer,counter); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); tempCheck2<<<((*counter)/BLOCK_SIZE)+1,BLOCK_SIZE>>>(*counter, buffer); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); tempCheck3<<<((*counter)/BLOCK_SIZE)+1,BLOCK_SIZE>>>(*counter, buffer); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); for(int i=0;i<*counter;i++) { if (buffer[i]!=0) { uint64_t seed = buffer[i]; std::cout << "3rd level seed found: " << seed << std::endl; outSeeds << seed << std::endl; outCount++; } } if(checkpointTemp >= 180000000 || boinc_time_to_checkpoint()){ #ifdef BOINC boinc_begin_critical_section(); // Boinc should not interrupt this #endif // Checkpointing section below boinc_delete_file("packpoint.txt"); // Don't touch, same func as normal fdel FILE *checkpoint_data = boinc_fopen("packpoint.txt", "wb"); struct checkpoint_vars data_store; data_store.offset = offset; data_store.elapsed_chkpoint = elapsed_chkpoint + elapsed; fwrite(&data_store, sizeof(data_store), 1, checkpoint_data); fclose(checkpoint_data); checkpointTemp = 0; #ifdef BOINC boinc_end_critical_section(); boinc_checkpoint_completed(); // Checkpointing completed #endif } checkpointTemp += SEEDS_PER_CALL; std::cout << "Seeds left:" << (((int64_t)seedCount-offset)-SEEDS_PER_CALL) << std::endl; } std::cout << "Done processing" << std::endl; #ifdef BOINC boinc_begin_critical_section(); #endif time_t elapsed = time(NULL) - start_time; double done = (double)COUNT / 1000000.0; double speed = done / (double) elapsed; fprintf(stderr, "\nSpeed: %.2lfm/s\n", speed ); fprintf(stderr, "Done\n"); fprintf(stderr, "Processed: %llu seeds in %.2lfs seconds\n", COUNT, (double) elapsed_chkpoint + (double) elapsed ); fprintf(stderr, "Have %llu output seeds.\n", outCount); fflush(stderr); outSeeds.close(); boinc_delete_file("packpoint.txt"); #ifdef BOINC boinc_end_critical_section(); #endif boinc_finish(0); } double getNextDoubleForLocNoise(int x, int z) { Random rand = get_random((((int64_t)x) >> 4) * 341873128712LL + (((int64_t)z) >> 4) * 132897987541LL); for (int dx = 0; dx < 16; dx++) { for (int dz = 0; dz < 16; dz++) { if (dx == (x & 15) && dz == (z & 15)) { //advance2(&rand); //advance2(&rand); return next_double(&rand); } advance2(&rand); advance2(&rand); advance2(&rand); for(int k1 = 127; k1 >= 0; k1--) { random_next_int_nonpow(&rand,5); } //for (int i = 0; i < 67; i++) { // advance2(&rand); //} } } exit(-99); }
eb23efcc461e4c466e5ffcaaf8662b30605edea7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ctdetLayer.h> __device__ float Logist(float data){ return 1./(1. + exp(-data)); } __global__ void CTdetforward_kernel(const float *hm, const float *reg,const float *wh , float *output,const int w,const int h,const int classes,const int kernel_size,const float visthresh ) { int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (idx >= w*h) return; int padding = (kernel_size-1)/2; int offset = - padding ; int stride = w*h; int grid_x = idx % w ; int grid_y = idx / w ; int cls,l,m; float c_x,c_y; for (cls = 0; cls < classes; ++cls ) { int objIndex = stride * cls + idx; float objProb = Logist(hm[objIndex]); float max=-1; int max_index =0; for(l=0 ;l < kernel_size ; ++l) for(m=0 ; m < kernel_size ; ++m){ int cur_x = offset + l + grid_x; int cur_y = offset + m + grid_y; int cur_index = cur_y * w + cur_x + stride*cls; int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h ); float val = (valid !=0 ) ? Logist(hm[cur_index]): -1; max_index = (val > max) ? cur_index : max_index; max = (val > max ) ? val: max ; } if((max == objProb) && (objProb > visthresh)){ int resCount = (int)atomicAdd(output,1); //printf("%d",resCount); char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection); Detection* det = (Detection*)(data); c_x = grid_x + reg[idx] ; c_y = grid_y + reg[idx+stride]; det->bbox.x1 = (c_x - wh[idx]/2)*4; det->bbox.y1 = (c_y - wh[idx+stride]/2)*4; det->bbox.x2 = (c_x + wh[idx]/2)*4; det->bbox.y2 = (c_y + wh[idx+stride]/2)*4; det->classId = cls; det->prob = objProb; } } } __global__ void CTfaceforward_kernel(const float *hm, const float *wh,const float *reg,const float* landmarks, float *output,const int w,const int h,const int classes,const int kernel_size,const float visthresh ) { int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (idx >= w*h) return; int padding = (kernel_size-1)/2; int offset = - padding; int stride = w*h; int grid_x = idx % w ; int grid_y = idx / w ; int cls,l,m,mark_id; float c_x,c_y,scale_w,scale_h; for (cls = 0; cls < classes; ++cls ) { int objIndex = stride * cls + idx; float objProb = hm[objIndex]; float max=-1; int max_index =0; for(l=0 ;l < kernel_size ; ++l) for(m=0 ; m < kernel_size ; ++m){ int cur_x = offset + l + grid_x; int cur_y = offset + m + grid_y; int cur_index = cur_y * w + cur_x + stride*cls; int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h ); float val = (valid !=0 ) ? hm[cur_index]: -1; max_index = (val > max) ? cur_index : max_index; max = (val > max ) ? val: max ; } //printf("%f\n",objProb); if((max_index == objIndex) && (objProb > visthresh)){ int resCount = (int)atomicAdd(output,1); //printf("%d",resCount); char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection); Detection* det = (Detection*)(data); c_x = (grid_x + reg[idx+stride] + 0.5)*4 ; c_y = (grid_y + reg[idx] + 0.5) * 4; scale_w = expf(wh[idx+stride]) * 4 ; scale_h = expf(wh[idx]) * 4; det->bbox.x1 = c_x - scale_w/2; det->bbox.y1 = c_y - scale_h/2 ; det->bbox.x2 = c_x + scale_w/2; det->bbox.y2 = c_y + scale_h/2; det->prob = objProb; det->classId = cls; for(mark_id=0 ; mark_id < 5 ; ++ mark_id){ det->marks[mark_id].x = det->bbox.x1 + landmarks[idx + (2*mark_id+1)*stride]*scale_w; det->marks[mark_id].y = det->bbox.y1 + landmarks[idx + (2*mark_id)*stride]*scale_h; } } } } void CTdetforward_gpu(const float *hm, const float *reg,const float *wh ,float *output, const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){ uint num = w * h; hipLaunchKernelGGL(( CTdetforward_kernel), dim3(cudaGridSize(num)),dim3(BLOCK), 0, 0, hm,reg,wh,output,w,h,classes,kernerl_size,visthresh); } void CTfaceforward_gpu(const float *hm, const float *wh,const float *reg,const float* landmarks,float *output, const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){ uint num = w * h; hipLaunchKernelGGL(( CTfaceforward_kernel), dim3(cudaGridSize(num)),dim3(BLOCK), 0, 0, hm,wh,reg,landmarks,output,w,h,classes,kernerl_size,visthresh); }
eb23efcc461e4c466e5ffcaaf8662b30605edea7.cu
#include <ctdetLayer.h> __device__ float Logist(float data){ return 1./(1. + exp(-data)); } __global__ void CTdetforward_kernel(const float *hm, const float *reg,const float *wh , float *output,const int w,const int h,const int classes,const int kernel_size,const float visthresh ) { int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (idx >= w*h) return; int padding = (kernel_size-1)/2; int offset = - padding ; int stride = w*h; int grid_x = idx % w ; int grid_y = idx / w ; int cls,l,m; float c_x,c_y; for (cls = 0; cls < classes; ++cls ) { int objIndex = stride * cls + idx; float objProb = Logist(hm[objIndex]); float max=-1; int max_index =0; for(l=0 ;l < kernel_size ; ++l) for(m=0 ; m < kernel_size ; ++m){ int cur_x = offset + l + grid_x; int cur_y = offset + m + grid_y; int cur_index = cur_y * w + cur_x + stride*cls; int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h ); float val = (valid !=0 ) ? Logist(hm[cur_index]): -1; max_index = (val > max) ? cur_index : max_index; max = (val > max ) ? val: max ; } if((max == objProb) && (objProb > visthresh)){ int resCount = (int)atomicAdd(output,1); //printf("%d",resCount); char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection); Detection* det = (Detection*)(data); c_x = grid_x + reg[idx] ; c_y = grid_y + reg[idx+stride]; det->bbox.x1 = (c_x - wh[idx]/2)*4; det->bbox.y1 = (c_y - wh[idx+stride]/2)*4; det->bbox.x2 = (c_x + wh[idx]/2)*4; det->bbox.y2 = (c_y + wh[idx+stride]/2)*4; det->classId = cls; det->prob = objProb; } } } __global__ void CTfaceforward_kernel(const float *hm, const float *wh,const float *reg,const float* landmarks, float *output,const int w,const int h,const int classes,const int kernel_size,const float visthresh ) { int idx = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (idx >= w*h) return; int padding = (kernel_size-1)/2; int offset = - padding; int stride = w*h; int grid_x = idx % w ; int grid_y = idx / w ; int cls,l,m,mark_id; float c_x,c_y,scale_w,scale_h; for (cls = 0; cls < classes; ++cls ) { int objIndex = stride * cls + idx; float objProb = hm[objIndex]; float max=-1; int max_index =0; for(l=0 ;l < kernel_size ; ++l) for(m=0 ; m < kernel_size ; ++m){ int cur_x = offset + l + grid_x; int cur_y = offset + m + grid_y; int cur_index = cur_y * w + cur_x + stride*cls; int valid = (cur_x>=0 && cur_x < w && cur_y >=0 && cur_y <h ); float val = (valid !=0 ) ? hm[cur_index]: -1; max_index = (val > max) ? cur_index : max_index; max = (val > max ) ? val: max ; } //printf("%f\n",objProb); if((max_index == objIndex) && (objProb > visthresh)){ int resCount = (int)atomicAdd(output,1); //printf("%d",resCount); char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection); Detection* det = (Detection*)(data); c_x = (grid_x + reg[idx+stride] + 0.5)*4 ; c_y = (grid_y + reg[idx] + 0.5) * 4; scale_w = expf(wh[idx+stride]) * 4 ; scale_h = expf(wh[idx]) * 4; det->bbox.x1 = c_x - scale_w/2; det->bbox.y1 = c_y - scale_h/2 ; det->bbox.x2 = c_x + scale_w/2; det->bbox.y2 = c_y + scale_h/2; det->prob = objProb; det->classId = cls; for(mark_id=0 ; mark_id < 5 ; ++ mark_id){ det->marks[mark_id].x = det->bbox.x1 + landmarks[idx + (2*mark_id+1)*stride]*scale_w; det->marks[mark_id].y = det->bbox.y1 + landmarks[idx + (2*mark_id)*stride]*scale_h; } } } } void CTdetforward_gpu(const float *hm, const float *reg,const float *wh ,float *output, const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){ uint num = w * h; CTdetforward_kernel<<<cudaGridSize(num),BLOCK>>>(hm,reg,wh,output,w,h,classes,kernerl_size,visthresh); } void CTfaceforward_gpu(const float *hm, const float *wh,const float *reg,const float* landmarks,float *output, const int w,const int h,const int classes,const int kernerl_size, const float visthresh ){ uint num = w * h; CTfaceforward_kernel<<<cudaGridSize(num),BLOCK>>>(hm,wh,reg,landmarks,output,w,h,classes,kernerl_size,visthresh); }
f436fd3f8394ab46d5817e30df39d7a26980857f.hip
// !!! This is a file automatically generated by hipify!!! #include "flow.h" void FLOWDEBUGIMAGE(std::string windowName, float* deviceImage, int height, int stride, bool verbose, bool wait) { cv::Mat calibrated = cv::Mat(height, stride, CV_32F); checkCudaErrors(hipMemcpy((float *)calibrated.ptr(), deviceImage, stride * height * sizeof(float), hipMemcpyDeviceToHost)); cv::imshow(windowName, calibrated); if (verbose) { std::cout << windowName << " " << calibrated.at<float>(height / 2, stride / 2) << std::endl; } if (wait) { cv::waitKey(); } else { cv::waitKey(1); } } Flow::Flow() { this->BlockHeight = 1; this->BlockWidth = 32; this->StrideAlignment = 32; } int Flow::initialize(int width, int height, float lambda, float theta, float tau, int nLevels, float fScale, int nWarpIters, int nSolverIters) { this->width = width; this->height = height; this->stride = this->iAlignUp(width); this->lambda = lambda; this->theta = theta; this->tau = tau; this->fScale = fScale; this->nLevels = nLevels; this->nWarpIters = nWarpIters; this->nSolverIters = nSolverIters; pI0 = std::vector<float*>(nLevels); pI1 = std::vector<float*>(nLevels); pW = std::vector<int>(nLevels); pH = std::vector<int>(nLevels); pS = std::vector<int>(nLevels); pDataSize = std::vector<int>(nLevels); pMask = std::vector<float*>(nLevels); int newHeight = height; int newWidth = width; int newStride = iAlignUp(width); //std::cout << "Pyramid Sizes: " << newWidth << " " << newHeight << " " << newStride << std::endl; for (int level = 0; level < nLevels; level++) { pDataSize[level] = newStride * newHeight * sizeof(float); checkCudaErrors(hipMalloc(&pI0[level], pDataSize[level])); checkCudaErrors(hipMalloc(&pI1[level], pDataSize[level]));; checkCudaErrors(hipMalloc(&pMask[level], pDataSize[level])); //std::cout << newHeight << " " << newWidth << " " << newStride << std::endl; pW[level] = newWidth; pH[level] = newHeight; pS[level] = newStride; newHeight = (int)((float)newHeight / fScale); newWidth = (int)((float)newWidth / fScale); newStride = iAlignUp(newWidth); } //std::cout << stride << " " << height << std::endl; dataSize8u = stride * height * sizeof(uchar); dataSize8uc3 = stride * height * sizeof(uchar3); dataSize32f = stride * height * sizeof(float); dataSize32fc2 = stride * height * sizeof(float2); dataSize32fc3 = stride * height * sizeof(float3); dataSize32fc4 = stride * height * sizeof(float4); // Inputs and Outputs checkCudaErrors(hipMalloc(&d_i0, dataSize32f)); checkCudaErrors(hipMalloc(&d_i1, dataSize32f)); checkCudaErrors(hipMalloc(&d_i1warp, dataSize32f)); checkCudaErrors(hipMalloc(&d_i08u, dataSize8u)); checkCudaErrors(hipMalloc(&d_i18u, dataSize8u)); checkCudaErrors(hipMalloc(&d_i08uc3, dataSize8uc3)); checkCudaErrors(hipMalloc(&d_i18uc3, dataSize8uc3)); checkCudaErrors(hipMalloc(&d_Ix, dataSize32f)); checkCudaErrors(hipMalloc(&d_Iy, dataSize32f)); checkCudaErrors(hipMalloc(&d_Iz, dataSize32f)); // Output Optical Flow checkCudaErrors(hipMalloc(&d_u, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_us, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_umed, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_umeds, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_du, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_dumed, dataSize32fc2)); // Process variables checkCudaErrors(hipMalloc(&d_pu, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_pus, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_pv, dataSize32fc2)); checkCudaErrors(hipMalloc(&d_pvs, dataSize32fc2)); // Debugging checkCudaErrors(hipMalloc(&d_uvrgb, dataSize32fc3)); uvrgb = cv::Mat(height, stride, CV_32FC3); u = cv::Mat(height, stride, CV_32FC2); return 0; } int Flow::copyImagesToDevice(cv::Mat i0, cv::Mat i1) { // Padding cv::copyMakeBorder(i0, im0pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0); cv::copyMakeBorder(i1, im1pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0); if (i0.type() == CV_8U) { checkCudaErrors(hipMemcpy(d_i08u, (uchar *)im0pad.ptr(), dataSize8u, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_i18u, (uchar *)im1pad.ptr(), dataSize8u, hipMemcpyHostToDevice)); // Convert to 32F Cv8uToGray(d_i08u, pI0[0], width, height, stride); Cv8uToGray(d_i18u, pI1[0], width, height, stride); } else if (i0.type() == CV_32F) { checkCudaErrors(hipMemcpy(pI0[0], (float *)im0pad.ptr(), dataSize32f, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(pI1[0], (float *)im1pad.ptr(), dataSize32f, hipMemcpyHostToDevice)); } else if (i0.type() == CV_8UC3) { checkCudaErrors(hipMemcpy(d_i08uc3, (uchar3 *)im0pad.ptr(), dataSize8uc3, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_i18uc3, (uchar3 *)im1pad.ptr(), dataSize8uc3, hipMemcpyHostToDevice)); // Convert to 32F Cv8uc3ToGray(d_i08uc3, pI0[0], width, height, stride); Cv8uc3ToGray(d_i18uc3, pI1[0], width, height, stride); } return 0; } int Flow::copyMaskToDevice(cv::Mat mask) { cv::copyMakeBorder(mask, maskPad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0); checkCudaErrors(hipMemcpy(pMask[0], (float *)maskPad.ptr(), dataSize32f, hipMemcpyHostToDevice)); for (int level = 1; level < nLevels; level++) { //std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl; DownscaleNearestNeighbor(pMask[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pMask[level]); //DEBUGIMAGE("maskasdfadf", pFisheyeMask[level], pH[level], pS[level], true, true); } return 0; } int Flow::solveOpticalFlow() { checkCudaErrors(hipMemset(d_u, 0, dataSize32fc2)); checkCudaErrors(hipMemset(d_umed, 0, dataSize32fc2)); checkCudaErrors(hipMemset(d_pu, 0, dataSize32fc2)); checkCudaErrors(hipMemset(d_pv, 0, dataSize32fc2)); checkCudaErrors(hipMemset(d_pus, 0, dataSize32fc2)); checkCudaErrors(hipMemset(d_pvs, 0, dataSize32fc2)); // Construct pyramid for (int level = 1; level < nLevels; level++) { Downscale(pI0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI0[level]); Downscale(pI1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI1[level]); } // Solve TVL1 optical flow for (int level = nLevels - 1; level >= 0; level--) { for (int warpIter = 0; warpIter < nWarpIters; warpIter++) { checkCudaErrors(hipMemset(d_pu, 0, dataSize32fc2)); checkCudaErrors(hipMemset(d_pv, 0, dataSize32fc2)); //checkCudaErrors(hipMemset(d_du, 0, dataSize32fc2)); WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_i1warp); /*if (level == 0) { FLOWDEBUGIMAGE("warped", d_i1warp, pH[level], pS[level], false, false); }*/ ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); // Inner iteration for (int iter = 0; iter < nSolverIters; iter++) { // Solve Problem1A ThresholdingL1Masked(pMask[level], d_u, d_umed, d_Ix, d_Iy, d_Iz, lambda, theta, pW[level], pH[level], pS[level]); //Swap(d_u, d_us); // Solve Problem1B SolveProblem1bMasked(pMask[level], d_u, d_pu, d_pv, theta, d_umed, pW[level], pH[level], pS[level]); // Solve Problem2 SolveProblem2Masked(pMask[level], d_umed, d_pu, d_pv, theta, tau, d_pus, d_pvs, pW[level], pH[level], pS[level]); Swap(d_pu, d_pus); Swap(d_pv, d_pvs); } MedianFilter(d_u, pW[level], pH[level], pS[level], d_us, 5); Swap(d_u, d_us); } // Upscale if (level > 0) { float scale = fScale; Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us); Upscale(d_umed, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_umeds); Swap(d_u, d_us); Swap(d_umed, d_umeds); } } return 0; } int Flow::copyFlowToHost(cv::Mat &wCropped) { // Remove Padding checkCudaErrors(hipMemcpy((float2 *)u.ptr(), d_umed, dataSize32fc2, hipMemcpyDeviceToHost)); cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height wCropped = u(roi); return 0; } int Flow::copyFlowColorToHost(cv::Mat &wCropped, float flowscale) { FlowToHSV(d_umed, width, height, stride, d_uvrgb, flowscale); checkCudaErrors(hipMemcpy((float3 *)uvrgb.ptr(), d_uvrgb, dataSize32fc3, hipMemcpyDeviceToHost)); cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height wCropped = uvrgb(roi); return 0; } // Utilities int Flow::iAlignUp(int n) { int m = this->StrideAlignment; int mod = n % m; if (mod) return n + m - mod; else return n; } int Flow::iDivUp(int n, int m) { return (n + m - 1) / m; } template<typename T> void Flow::Swap(T &a, T &ax) { T t = a; a = ax; ax = t; } template<typename T> void Flow::Copy(T &dst, T &src) { dst = src; }
f436fd3f8394ab46d5817e30df39d7a26980857f.cu
#include "flow.h" void FLOWDEBUGIMAGE(std::string windowName, float* deviceImage, int height, int stride, bool verbose, bool wait) { cv::Mat calibrated = cv::Mat(height, stride, CV_32F); checkCudaErrors(cudaMemcpy((float *)calibrated.ptr(), deviceImage, stride * height * sizeof(float), cudaMemcpyDeviceToHost)); cv::imshow(windowName, calibrated); if (verbose) { std::cout << windowName << " " << calibrated.at<float>(height / 2, stride / 2) << std::endl; } if (wait) { cv::waitKey(); } else { cv::waitKey(1); } } Flow::Flow() { this->BlockHeight = 1; this->BlockWidth = 32; this->StrideAlignment = 32; } int Flow::initialize(int width, int height, float lambda, float theta, float tau, int nLevels, float fScale, int nWarpIters, int nSolverIters) { this->width = width; this->height = height; this->stride = this->iAlignUp(width); this->lambda = lambda; this->theta = theta; this->tau = tau; this->fScale = fScale; this->nLevels = nLevels; this->nWarpIters = nWarpIters; this->nSolverIters = nSolverIters; pI0 = std::vector<float*>(nLevels); pI1 = std::vector<float*>(nLevels); pW = std::vector<int>(nLevels); pH = std::vector<int>(nLevels); pS = std::vector<int>(nLevels); pDataSize = std::vector<int>(nLevels); pMask = std::vector<float*>(nLevels); int newHeight = height; int newWidth = width; int newStride = iAlignUp(width); //std::cout << "Pyramid Sizes: " << newWidth << " " << newHeight << " " << newStride << std::endl; for (int level = 0; level < nLevels; level++) { pDataSize[level] = newStride * newHeight * sizeof(float); checkCudaErrors(cudaMalloc(&pI0[level], pDataSize[level])); checkCudaErrors(cudaMalloc(&pI1[level], pDataSize[level]));; checkCudaErrors(cudaMalloc(&pMask[level], pDataSize[level])); //std::cout << newHeight << " " << newWidth << " " << newStride << std::endl; pW[level] = newWidth; pH[level] = newHeight; pS[level] = newStride; newHeight = (int)((float)newHeight / fScale); newWidth = (int)((float)newWidth / fScale); newStride = iAlignUp(newWidth); } //std::cout << stride << " " << height << std::endl; dataSize8u = stride * height * sizeof(uchar); dataSize8uc3 = stride * height * sizeof(uchar3); dataSize32f = stride * height * sizeof(float); dataSize32fc2 = stride * height * sizeof(float2); dataSize32fc3 = stride * height * sizeof(float3); dataSize32fc4 = stride * height * sizeof(float4); // Inputs and Outputs checkCudaErrors(cudaMalloc(&d_i0, dataSize32f)); checkCudaErrors(cudaMalloc(&d_i1, dataSize32f)); checkCudaErrors(cudaMalloc(&d_i1warp, dataSize32f)); checkCudaErrors(cudaMalloc(&d_i08u, dataSize8u)); checkCudaErrors(cudaMalloc(&d_i18u, dataSize8u)); checkCudaErrors(cudaMalloc(&d_i08uc3, dataSize8uc3)); checkCudaErrors(cudaMalloc(&d_i18uc3, dataSize8uc3)); checkCudaErrors(cudaMalloc(&d_Ix, dataSize32f)); checkCudaErrors(cudaMalloc(&d_Iy, dataSize32f)); checkCudaErrors(cudaMalloc(&d_Iz, dataSize32f)); // Output Optical Flow checkCudaErrors(cudaMalloc(&d_u, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_us, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_umed, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_umeds, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_du, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_dumed, dataSize32fc2)); // Process variables checkCudaErrors(cudaMalloc(&d_pu, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_pus, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_pv, dataSize32fc2)); checkCudaErrors(cudaMalloc(&d_pvs, dataSize32fc2)); // Debugging checkCudaErrors(cudaMalloc(&d_uvrgb, dataSize32fc3)); uvrgb = cv::Mat(height, stride, CV_32FC3); u = cv::Mat(height, stride, CV_32FC2); return 0; } int Flow::copyImagesToDevice(cv::Mat i0, cv::Mat i1) { // Padding cv::copyMakeBorder(i0, im0pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0); cv::copyMakeBorder(i1, im1pad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0); if (i0.type() == CV_8U) { checkCudaErrors(cudaMemcpy(d_i08u, (uchar *)im0pad.ptr(), dataSize8u, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_i18u, (uchar *)im1pad.ptr(), dataSize8u, cudaMemcpyHostToDevice)); // Convert to 32F Cv8uToGray(d_i08u, pI0[0], width, height, stride); Cv8uToGray(d_i18u, pI1[0], width, height, stride); } else if (i0.type() == CV_32F) { checkCudaErrors(cudaMemcpy(pI0[0], (float *)im0pad.ptr(), dataSize32f, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(pI1[0], (float *)im1pad.ptr(), dataSize32f, cudaMemcpyHostToDevice)); } else if (i0.type() == CV_8UC3) { checkCudaErrors(cudaMemcpy(d_i08uc3, (uchar3 *)im0pad.ptr(), dataSize8uc3, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_i18uc3, (uchar3 *)im1pad.ptr(), dataSize8uc3, cudaMemcpyHostToDevice)); // Convert to 32F Cv8uc3ToGray(d_i08uc3, pI0[0], width, height, stride); Cv8uc3ToGray(d_i18uc3, pI1[0], width, height, stride); } return 0; } int Flow::copyMaskToDevice(cv::Mat mask) { cv::copyMakeBorder(mask, maskPad, 0, 0, 0, stride - width, cv::BORDER_CONSTANT, 0); checkCudaErrors(cudaMemcpy(pMask[0], (float *)maskPad.ptr(), dataSize32f, cudaMemcpyHostToDevice)); for (int level = 1; level < nLevels; level++) { //std::cout << pW[level] << " " << pH[level] << " " << pS[level] << std::endl; DownscaleNearestNeighbor(pMask[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pMask[level]); //DEBUGIMAGE("maskasdfadf", pFisheyeMask[level], pH[level], pS[level], true, true); } return 0; } int Flow::solveOpticalFlow() { checkCudaErrors(cudaMemset(d_u, 0, dataSize32fc2)); checkCudaErrors(cudaMemset(d_umed, 0, dataSize32fc2)); checkCudaErrors(cudaMemset(d_pu, 0, dataSize32fc2)); checkCudaErrors(cudaMemset(d_pv, 0, dataSize32fc2)); checkCudaErrors(cudaMemset(d_pus, 0, dataSize32fc2)); checkCudaErrors(cudaMemset(d_pvs, 0, dataSize32fc2)); // Construct pyramid for (int level = 1; level < nLevels; level++) { Downscale(pI0[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI0[level]); Downscale(pI1[level - 1], pW[level - 1], pH[level - 1], pS[level - 1], pW[level], pH[level], pS[level], pI1[level]); } // Solve TVL1 optical flow for (int level = nLevels - 1; level >= 0; level--) { for (int warpIter = 0; warpIter < nWarpIters; warpIter++) { checkCudaErrors(cudaMemset(d_pu, 0, dataSize32fc2)); checkCudaErrors(cudaMemset(d_pv, 0, dataSize32fc2)); //checkCudaErrors(cudaMemset(d_du, 0, dataSize32fc2)); WarpImage(pI1[level], pW[level], pH[level], pS[level], d_u, d_i1warp); /*if (level == 0) { FLOWDEBUGIMAGE("warped", d_i1warp, pH[level], pS[level], false, false); }*/ ComputeDerivatives(pI0[level], d_i1warp, pW[level], pH[level], pS[level], d_Ix, d_Iy, d_Iz); // Inner iteration for (int iter = 0; iter < nSolverIters; iter++) { // Solve Problem1A ThresholdingL1Masked(pMask[level], d_u, d_umed, d_Ix, d_Iy, d_Iz, lambda, theta, pW[level], pH[level], pS[level]); //Swap(d_u, d_us); // Solve Problem1B SolveProblem1bMasked(pMask[level], d_u, d_pu, d_pv, theta, d_umed, pW[level], pH[level], pS[level]); // Solve Problem2 SolveProblem2Masked(pMask[level], d_umed, d_pu, d_pv, theta, tau, d_pus, d_pvs, pW[level], pH[level], pS[level]); Swap(d_pu, d_pus); Swap(d_pv, d_pvs); } MedianFilter(d_u, pW[level], pH[level], pS[level], d_us, 5); Swap(d_u, d_us); } // Upscale if (level > 0) { float scale = fScale; Upscale(d_u, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_us); Upscale(d_umed, pW[level], pH[level], pS[level], pW[level - 1], pH[level - 1], pS[level - 1], scale, d_umeds); Swap(d_u, d_us); Swap(d_umed, d_umeds); } } return 0; } int Flow::copyFlowToHost(cv::Mat &wCropped) { // Remove Padding checkCudaErrors(cudaMemcpy((float2 *)u.ptr(), d_umed, dataSize32fc2, cudaMemcpyDeviceToHost)); cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height wCropped = u(roi); return 0; } int Flow::copyFlowColorToHost(cv::Mat &wCropped, float flowscale) { FlowToHSV(d_umed, width, height, stride, d_uvrgb, flowscale); checkCudaErrors(cudaMemcpy((float3 *)uvrgb.ptr(), d_uvrgb, dataSize32fc3, cudaMemcpyDeviceToHost)); cv::Rect roi(0, 0, width, height); // define roi here as x0, y0, width, height wCropped = uvrgb(roi); return 0; } // Utilities int Flow::iAlignUp(int n) { int m = this->StrideAlignment; int mod = n % m; if (mod) return n + m - mod; else return n; } int Flow::iDivUp(int n, int m) { return (n + m - 1) / m; } template<typename T> void Flow::Swap(T &a, T &ax) { T t = a; a = ax; ax = t; } template<typename T> void Flow::Copy(T &dst, T &src) { dst = src; }
303b52d8280cca86980ace773e029f15934f8ef3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void compute_d_vector_kernel(int N_i, int* d_ind, double* d_ptr, int* p_ptr, double* N_ptr, int N_ld) { int I = threadIdx.x + blockIdx.x * blockDim.x; if (I < N_i) { int index = p_ptr[d_ind[I]]; d_ptr[d_ind[I]] = 1. / N_ptr[index + index * N_ld]; } }
303b52d8280cca86980ace773e029f15934f8ef3.cu
#include "includes.h" __global__ void compute_d_vector_kernel(int N_i, int* d_ind, double* d_ptr, int* p_ptr, double* N_ptr, int N_ld) { int I = threadIdx.x + blockIdx.x * blockDim.x; if (I < N_i) { int index = p_ptr[d_ind[I]]; d_ptr[d_ind[I]] = 1. / N_ptr[index + index * N_ld]; } }
9aee647c3117a393639a81d6a8befe4c249d6d8d.hip
// !!! This is a file automatically generated by hipify!!! #ifdef __HIPCC__ #include "Macro.h" #else #include "GAMER.h" #endif #include "CUPOT.h" #ifdef GRAVITY //----------------------------------------------------------------------------------------- // Function : CUPOT_ExternalAcc / CPU_ExternlAcc // Description : 1. Cacalculate the external acceleration from the input coordinates and time // 2. This function will be invoked in both the CPU and GPU codes // 3. "__forceinline__" is required since this device function will be invoked by more than one kernel // (e.g., CUPOT_HydroGravitySolver, CUFLU_ComputeFlux <-- which will be called by different fluid solvers) // // Parameter : Acc : Array to store the output external acceleration // x/y/z : Spatial coordinates // Time : Current physical time // UserArray : User-provided auxiliary array (set by "Init_ExternalPot") // // Return : Acc //----------------------------------------------------------------------------------------- #ifdef __HIPCC__ __forceinline__ __device__ void CUPOT_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] ) #else void CPU_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] ) #endif { const double Cen[3] = { UserArray[0], UserArray[1], UserArray[2] }; const real GM_4 = (real)UserArray[3]; // 0.25*G*m const real dx = (real)(x - Cen[0]); const real dy = (real)(y - Cen[1]); const real dz = (real)(z - Cen[2]); const real r = SQRT( dx*dx + dy*dy + dz*dz ); const real _r3 = (real)1.0/(r*r*r); Acc[0] = -GM_4*_r3*dx; Acc[1] = -GM_4*_r3*dy; Acc[2] = -GM_4*_r3*dz; } // FUNCTION : CUPOT_ExternalAcc / CPU_ExternalAcc #endif // #ifdef GRAVITY
9aee647c3117a393639a81d6a8befe4c249d6d8d.cu
#ifdef __CUDACC__ #include "Macro.h" #else #include "GAMER.h" #endif #include "CUPOT.h" #ifdef GRAVITY //----------------------------------------------------------------------------------------- // Function : CUPOT_ExternalAcc / CPU_ExternlAcc // Description : 1. Cacalculate the external acceleration from the input coordinates and time // 2. This function will be invoked in both the CPU and GPU codes // 3. "__forceinline__" is required since this device function will be invoked by more than one kernel // (e.g., CUPOT_HydroGravitySolver, CUFLU_ComputeFlux <-- which will be called by different fluid solvers) // // Parameter : Acc : Array to store the output external acceleration // x/y/z : Spatial coordinates // Time : Current physical time // UserArray : User-provided auxiliary array (set by "Init_ExternalPot") // // Return : Acc //----------------------------------------------------------------------------------------- #ifdef __CUDACC__ __forceinline__ __device__ void CUPOT_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] ) #else void CPU_ExternalAcc( real Acc[], const double x, const double y, const double z, const double Time, const double UserArray[] ) #endif { const double Cen[3] = { UserArray[0], UserArray[1], UserArray[2] }; const real GM_4 = (real)UserArray[3]; // 0.25*G*m const real dx = (real)(x - Cen[0]); const real dy = (real)(y - Cen[1]); const real dz = (real)(z - Cen[2]); const real r = SQRT( dx*dx + dy*dy + dz*dz ); const real _r3 = (real)1.0/(r*r*r); Acc[0] = -GM_4*_r3*dx; Acc[1] = -GM_4*_r3*dy; Acc[2] = -GM_4*_r3*dz; } // FUNCTION : CUPOT_ExternalAcc / CPU_ExternalAcc #endif // #ifdef GRAVITY
b6d4bb825a603c881fd1980a90ac03a5b4646657.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "vectorOp.h" __global__ void vector_operation_kernel_coalesced(int *output, int *data, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int total = blockDim.x * gridDim.x; for (; tid < size; tid += total) { output[tid] = OPERATION(data[tid]); } } __global__ void vector_operation_kernel(int *output, int *data, int size, int work_per_thread) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int si = tid * work_per_thread; int ei = si + work_per_thread; if (ei > size) { ei = size; } for (tid = si; tid < ei; tid++) { output[tid] = OPERATION(data[tid]); } } __global__ void vector_operation_kernel(int *output, int *data, int size, int work_per_thread) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int si = tid * work_per_thread; int ei = si + work_per_thread; if (ei > size) { ei = size; } for (tid = si; tid < ei; tid++) { output[tid] = OPERATION(data[tid]); } } void sequential_vector_operation(int *output, int *data, int size) { for (int i = 0; i < size; i++) { output[i] = OPERATION_I(data[i]); } return; } int main(int argc, char *argv[]) { double elapsed_time; int block_size, grid_size; int data_size; int *data_h, *output_h, *device_output_h; int *data_d, *output_d; int work_per_thread; if (argc != 4) { printf("Correct way to execute this program is:\n"); printf("./vectorOp data_size block_size work_per_thread\n"); printf("For example:\n./vectorOp 10000 512 4\n"); return 1; } data_size = atoi(argv[1]); block_size = atoi(argv[2]); work_per_thread = atoi(argv[3]); // Initialize data on Host initialize_data_random(&data_h, data_size); initialize_data_zero(&output_h, data_size); initialize_data_zero(&device_output_h, data_size); // Initialize data on Device CUDA_CHECK_RETURN(hipMalloc((void **)&data_d, sizeof(int) * data_size)); CUDA_CHECK_RETURN(hipMalloc((void **)&output_d, sizeof(int) * data_size)); // Sequential vector operation set_clock(); sequential_vector_operation(output_h, data_h, data_size); elapsed_time = get_elapsed_time(); printf("-> Naive vector operation time: %.4fms\n", elapsed_time / 1000); // CUDA Parallel vector operation // TODO: compute grid_size grid_size = (data_size - 1) / (block_size * work_per_thread) + 1; dim3 grid_dime(grid_size, 1, 1); dim3 block_dime(block_size, 1, 1); set_clock(); CUDA_CHECK_RETURN(hipMemcpy(data_d, data_h, sizeof(int) * data_size, hipMemcpyHostToDevice)); // vector_operation_kernel<<< grid_dime, block_dime >>>(output_d, data_d, // data_size, work_per_thread); hipLaunchKernelGGL(( vector_operation_kernel_coalesced), dim3(grid_dime), dim3(block_dime), 0, 0, output_d, data_d, data_size); CUDA_CHECK_RETURN( hipDeviceSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(hipGetLastError()); // Copy back the result CUDA_CHECK_RETURN(hipMemcpy(device_output_h, output_d, sizeof(int) * data_size, hipMemcpyDeviceToHost)); elapsed_time = get_elapsed_time(); printf("-> CUDA vector operation time: %.4fms\n", elapsed_time / 1000); #ifdef TEST validate(output_h, device_output_h, data_size); #endif free(data_h); free(output_h); free(device_output_h); CUDA_CHECK_RETURN(hipFree(output_d)); CUDA_CHECK_RETURN(hipFree(data_d)); return 0; }
b6d4bb825a603c881fd1980a90ac03a5b4646657.cu
#include "vectorOp.h" __global__ void vector_operation_kernel_coalesced(int *output, int *data, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int total = blockDim.x * gridDim.x; for (; tid < size; tid += total) { output[tid] = OPERATION(data[tid]); } } __global__ void vector_operation_kernel(int *output, int *data, int size, int work_per_thread) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int si = tid * work_per_thread; int ei = si + work_per_thread; if (ei > size) { ei = size; } for (tid = si; tid < ei; tid++) { output[tid] = OPERATION(data[tid]); } } __global__ void vector_operation_kernel(int *output, int *data, int size, int work_per_thread) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int si = tid * work_per_thread; int ei = si + work_per_thread; if (ei > size) { ei = size; } for (tid = si; tid < ei; tid++) { output[tid] = OPERATION(data[tid]); } } void sequential_vector_operation(int *output, int *data, int size) { for (int i = 0; i < size; i++) { output[i] = OPERATION_I(data[i]); } return; } int main(int argc, char *argv[]) { double elapsed_time; int block_size, grid_size; int data_size; int *data_h, *output_h, *device_output_h; int *data_d, *output_d; int work_per_thread; if (argc != 4) { printf("Correct way to execute this program is:\n"); printf("./vectorOp data_size block_size work_per_thread\n"); printf("For example:\n./vectorOp 10000 512 4\n"); return 1; } data_size = atoi(argv[1]); block_size = atoi(argv[2]); work_per_thread = atoi(argv[3]); // Initialize data on Host initialize_data_random(&data_h, data_size); initialize_data_zero(&output_h, data_size); initialize_data_zero(&device_output_h, data_size); // Initialize data on Device CUDA_CHECK_RETURN(cudaMalloc((void **)&data_d, sizeof(int) * data_size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&output_d, sizeof(int) * data_size)); // Sequential vector operation set_clock(); sequential_vector_operation(output_h, data_h, data_size); elapsed_time = get_elapsed_time(); printf("-> Naive vector operation time: %.4fms\n", elapsed_time / 1000); // CUDA Parallel vector operation // TODO: compute grid_size grid_size = (data_size - 1) / (block_size * work_per_thread) + 1; dim3 grid_dime(grid_size, 1, 1); dim3 block_dime(block_size, 1, 1); set_clock(); CUDA_CHECK_RETURN(cudaMemcpy(data_d, data_h, sizeof(int) * data_size, cudaMemcpyHostToDevice)); // vector_operation_kernel<<< grid_dime, block_dime >>>(output_d, data_d, // data_size, work_per_thread); vector_operation_kernel_coalesced<<<grid_dime, block_dime>>>(output_d, data_d, data_size); CUDA_CHECK_RETURN( cudaDeviceSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(cudaGetLastError()); // Copy back the result CUDA_CHECK_RETURN(cudaMemcpy(device_output_h, output_d, sizeof(int) * data_size, cudaMemcpyDeviceToHost)); elapsed_time = get_elapsed_time(); printf("-> CUDA vector operation time: %.4fms\n", elapsed_time / 1000); #ifdef TEST validate(output_h, device_output_h, data_size); #endif free(data_h); free(output_h); free(device_output_h); CUDA_CHECK_RETURN(cudaFree(output_d)); CUDA_CHECK_RETURN(cudaFree(data_d)); return 0; }
c6e3f58ded80439f8f54405b8811ecbbc7c271d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_unmask_ops.h" namespace caffe2 { namespace { __global__ void ComputeIndicesKernel( const int numMasks, const int maskSize, int* indices, bool* const masks[]) { CUDA_1D_KERNEL_LOOP(i, maskSize) { for (int j = 0; j < numMasks; ++j) { if (masks[j][i]) { indices[i] = j; return; } } CUDA_KERNEL_ASSERT(false); } } __global__ void FillValuesKernel( const int numMasks, const int maskSize, const size_t itemSize, const int* indices, char* const values[], int* valueSizes, char* dest) { CUDA_1D_KERNEL_LOOP(j, numMasks) { int k = 0; for (int i = 0; i < maskSize; ++i) { if (indices[i] == j) { for (int h = 0; h < itemSize; ++h) { dest[i * itemSize + h] = values[j][k * itemSize + h]; } ++k; } } CUDA_KERNEL_ASSERT(valueSizes[j] == k); } } } // namespace template <> class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanUnmaskOp(const OperatorDef& def, Workspace* ws) : Operator<CUDAContext>(def, ws) {} bool RunOnDevice() override { int maskSize = Input(0).numel(); int numMasks = InputSize() / 2; const auto& meta = Input(1).meta(); auto* out = Output(0); out->Resize(maskSize); auto* dest = (char*)out->raw_mutable_data(meta); ReinitializeTensor(&hostMasks_, {numMasks}, at::dtype<bool*>().device(CPU)); auto* hostMasksData = hostMasks_.mutable_data<bool*>(); ReinitializeTensor( &hostValues_, {numMasks}, at::dtype<char*>().device(CPU)); auto* hostValuesData = hostValues_.mutable_data<char*>(); ReinitializeTensor( &hostValueSizes_, {numMasks}, at::dtype<int>().device(CPU)); auto* hostValueSizesData = hostValueSizes_.mutable_data<int>(); for (int i = 0; i < numMasks; ++i) { auto& mask = Input(i * 2); CAFFE_ENFORCE_EQ(mask.dim(), 1); CAFFE_ENFORCE_EQ(mask.numel(), maskSize); hostMasksData[i] = const_cast<bool*>(mask.data<bool>()); const auto& value = Input(i * 2 + 1); CAFFE_ENFORCE_EQ(value.dim(), 1); hostValuesData[i] = (char*)value.raw_data(); hostValueSizesData[i] = value.numel(); } masks_.CopyFrom(hostMasks_); values_.CopyFrom(hostValues_); valueSizes_.CopyFrom(hostValueSizes_); ReinitializeTensor(&indices_, {maskSize}, at::dtype<int>().device(CUDA)); auto* indicesData = indices_.mutable_data<int>(); hipLaunchKernelGGL(( ComputeIndicesKernel), dim3(::min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), numMasks, maskSize, indicesData, masks_.data<bool*>()); auto* valueSizesData = valueSizes_.mutable_data<int>(); hipLaunchKernelGGL(( FillValuesKernel), dim3(::min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), numMasks, maskSize, meta.itemsize(), indicesData, values_.data<char*>(), valueSizesData, dest); return true; } private: Tensor indices_; Tensor masks_{CUDA}; Tensor values_{CUDA}; Tensor valueSizes_{CUDA}; Tensor hostMasks_; Tensor hostValues_; Tensor hostValueSizes_; }; REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>); } // caffe2
c6e3f58ded80439f8f54405b8811ecbbc7c271d6.cu
#include <algorithm> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/boolean_unmask_ops.h" namespace caffe2 { namespace { __global__ void ComputeIndicesKernel( const int numMasks, const int maskSize, int* indices, bool* const masks[]) { CUDA_1D_KERNEL_LOOP(i, maskSize) { for (int j = 0; j < numMasks; ++j) { if (masks[j][i]) { indices[i] = j; return; } } CUDA_KERNEL_ASSERT(false); } } __global__ void FillValuesKernel( const int numMasks, const int maskSize, const size_t itemSize, const int* indices, char* const values[], int* valueSizes, char* dest) { CUDA_1D_KERNEL_LOOP(j, numMasks) { int k = 0; for (int i = 0; i < maskSize; ++i) { if (indices[i] == j) { for (int h = 0; h < itemSize; ++h) { dest[i * itemSize + h] = values[j][k * itemSize + h]; } ++k; } } CUDA_KERNEL_ASSERT(valueSizes[j] == k); } } } // namespace template <> class BooleanUnmaskOp<CUDAContext> final : public Operator<CUDAContext> { public: BooleanUnmaskOp(const OperatorDef& def, Workspace* ws) : Operator<CUDAContext>(def, ws) {} bool RunOnDevice() override { int maskSize = Input(0).numel(); int numMasks = InputSize() / 2; const auto& meta = Input(1).meta(); auto* out = Output(0); out->Resize(maskSize); auto* dest = (char*)out->raw_mutable_data(meta); ReinitializeTensor(&hostMasks_, {numMasks}, at::dtype<bool*>().device(CPU)); auto* hostMasksData = hostMasks_.mutable_data<bool*>(); ReinitializeTensor( &hostValues_, {numMasks}, at::dtype<char*>().device(CPU)); auto* hostValuesData = hostValues_.mutable_data<char*>(); ReinitializeTensor( &hostValueSizes_, {numMasks}, at::dtype<int>().device(CPU)); auto* hostValueSizesData = hostValueSizes_.mutable_data<int>(); for (int i = 0; i < numMasks; ++i) { auto& mask = Input(i * 2); CAFFE_ENFORCE_EQ(mask.dim(), 1); CAFFE_ENFORCE_EQ(mask.numel(), maskSize); hostMasksData[i] = const_cast<bool*>(mask.data<bool>()); const auto& value = Input(i * 2 + 1); CAFFE_ENFORCE_EQ(value.dim(), 1); hostValuesData[i] = (char*)value.raw_data(); hostValueSizesData[i] = value.numel(); } masks_.CopyFrom(hostMasks_); values_.CopyFrom(hostValues_); valueSizes_.CopyFrom(hostValueSizes_); ReinitializeTensor(&indices_, {maskSize}, at::dtype<int>().device(CUDA)); auto* indicesData = indices_.mutable_data<int>(); ComputeIndicesKernel<<< std::min(maskSize, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( numMasks, maskSize, indicesData, masks_.data<bool*>()); auto* valueSizesData = valueSizes_.mutable_data<int>(); FillValuesKernel<<< std::min(numMasks, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( numMasks, maskSize, meta.itemsize(), indicesData, values_.data<char*>(), valueSizesData, dest); return true; } private: Tensor indices_; Tensor masks_{CUDA}; Tensor values_{CUDA}; Tensor valueSizes_{CUDA}; Tensor hostMasks_; Tensor hostValues_; Tensor hostValueSizes_; }; REGISTER_CUDA_OPERATOR(BooleanUnmask, BooleanUnmaskOp<CUDAContext>); } // caffe2
4cb1303c55e2a7b36ee6bf2be6c3f46934fe5c3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used eke*/ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(hipFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(hipMalloc(&d_red, channelSize)); checkCudaErrors(hipMalloc(&d_green, channelSize)); checkCudaErrors(hipMalloc(&d_blue, channelSize)); checkCudaErrors(hipMalloc(&d_x__, channelSize)); checkCudaErrors(hipMalloc(&d_y__, channelSize)); checkCudaErrors(hipMalloc(&d_logY__, channelSize)); checkCudaErrors(hipMemcpy(d_red, red, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_green, green, channelSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_blue, blue, channelSize, hipMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); hipLaunchKernelGGL(( rgb_to_xyY), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(hipMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(hipMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(hipMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel hipLaunchKernelGGL(( normalize_cdf), dim3((numBins + numThreads - 1) / numThreads), dim3(numThreads), 0, 0, d_cdf__, d_cdf_normalized, numBins); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); float h_cdf_normalized[numBins]; hipMemcpy(h_cdf_normalized, d_cdf_normalized, sizeof(float)*numBins, hipMemcpyDeviceToHost); std::cout << "h_cdf_normalized\n"; for (int i = 0; i < numBins; ++i) { std::cout << " " << h_cdf_normalized[i]; } std::cout << std::endl; //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(hipMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(hipMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space hipLaunchKernelGGL(( tonemap), dim3(gridSize), dim3(blockSize), 0, 0, d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipMemcpy(h_red, d_red, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_green, d_green, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_blue, d_blue, sizeof(float) * numPixels, hipMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(hipFree(d_cdf_normalized)); } void cleanupGlobalMemory(void) { checkCudaErrors(hipFree(d_x__)); checkCudaErrors(hipFree(d_y__)); checkCudaErrors(hipFree(d_logY__)); checkCudaErrors(hipFree(d_cdf__)); }
4cb1303c55e2a7b36ee6bf2be6c3f46934fe5c3e.cu
#include "utils.h" #include <string> #include "loadSaveImage.h" #include <thrust/extrema.h> //chroma-LogLuminance Space static float *d_x__; static float *d_y__; static float *d_logY__; //memory for the cdf static unsigned int *d_cdf__; static const int numBins = 1024; size_t numRows__; size_t numCols__; /* Copied from Mike's IPython notebook with some minor modifications * Mainly double precision constants to floats and log10 -> log10f * Also removed Luminance (Y) channel since it is never used eke*/ __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } /* Copied from Mike's IPython notebook * Modified just by having threads read the normalization constant directly from device memory instead of copying it back */ __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } /* Copied from Mike's IPython notebook * Modified double constants -> float * Perform tone mapping based upon new * luminance scaling */ __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } //return types are void since any internal error will be handled by quitting //no point in returning error codes... void preProcess(float** d_luminance, unsigned int** d_cdf, size_t *numRows, size_t *numCols, unsigned int *numberOfBins, const std::string &filename) { //make sure the context initializes ok checkCudaErrors(cudaFree(0)); float *imgPtr; //we will become responsible for this pointer loadImageHDR(filename, &imgPtr, &numRows__, &numCols__); *numRows = numRows__; *numCols = numCols__; //first thing to do is split incoming BGR float data into separate channels size_t numPixels = numRows__ * numCols__; float *red = new float[numPixels]; float *green = new float[numPixels]; float *blue = new float[numPixels]; //Remeber image is loaded BGR for (size_t i = 0; i < numPixels; ++i) { blue[i] = imgPtr[3 * i + 0]; green[i] = imgPtr[3 * i + 1]; red[i] = imgPtr[3 * i + 2]; } delete[] imgPtr; //being good citizens are releasing resources //allocated in loadImageHDR float *d_red, *d_green, *d_blue; //RGB space size_t channelSize = sizeof(float) * numPixels; checkCudaErrors(cudaMalloc(&d_red, channelSize)); checkCudaErrors(cudaMalloc(&d_green, channelSize)); checkCudaErrors(cudaMalloc(&d_blue, channelSize)); checkCudaErrors(cudaMalloc(&d_x__, channelSize)); checkCudaErrors(cudaMalloc(&d_y__, channelSize)); checkCudaErrors(cudaMalloc(&d_logY__, channelSize)); checkCudaErrors(cudaMemcpy(d_red, red, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_green, green, channelSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_blue, blue, channelSize, cudaMemcpyHostToDevice)); //convert from RGB space to chrominance/luminance space xyY const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols__ + blockSize.x - 1) / blockSize.x, (numRows__ + blockSize.y - 1) / blockSize.y, 1); rgb_to_xyY<<<gridSize, blockSize>>>(d_red, d_green, d_blue, d_x__, d_y__, d_logY__, .0001f, numRows__, numCols__); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); *d_luminance = d_logY__; //allocate memory for the cdf of the histogram *numberOfBins = numBins; checkCudaErrors(cudaMalloc(&d_cdf__, sizeof(unsigned int) * numBins)); checkCudaErrors(cudaMemset(d_cdf__, 0, sizeof(unsigned int) * numBins)); *d_cdf = d_cdf__; checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); delete[] red; delete[] green; delete[] blue; } void postProcess(const std::string& output_file, size_t numRows, size_t numCols, float min_log_Y, float max_log_Y) { const int numPixels = numRows__ * numCols__; const int numThreads = 192; float *d_cdf_normalized; checkCudaErrors(cudaMalloc(&d_cdf_normalized, sizeof(float) * numBins)); //first normalize the cdf to a maximum value of 1 //this is how we compress the range of the luminance channel normalize_cdf<<< (numBins + numThreads - 1) / numThreads, numThreads>>>(d_cdf__, d_cdf_normalized, numBins); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); float h_cdf_normalized[numBins]; cudaMemcpy(h_cdf_normalized, d_cdf_normalized, sizeof(float)*numBins, cudaMemcpyDeviceToHost); std::cout << "h_cdf_normalized\n"; for (int i = 0; i < numBins; ++i) { std::cout << " " << h_cdf_normalized[i]; } std::cout << std::endl; //allocate memory for the output RGB channels float *h_red, *h_green, *h_blue; float *d_red, *d_green, *d_blue; h_red = new float[numPixels]; h_green = new float[numPixels]; h_blue = new float[numPixels]; checkCudaErrors(cudaMalloc(&d_red, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_green, sizeof(float) * numPixels)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(float) * numPixels)); float log_Y_range = max_log_Y - min_log_Y; const dim3 blockSize(32, 16, 1); const dim3 gridSize( (numCols + blockSize.x - 1) / blockSize.x, (numRows + blockSize.y - 1) / blockSize.y ); //next perform the actual tone-mapping //we map each luminance value to its new value //and then transform back to RGB space tonemap<<<gridSize, blockSize>>>(d_x__, d_y__, d_logY__, d_cdf_normalized, d_red, d_green, d_blue, min_log_Y, max_log_Y, log_Y_range, numBins, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaMemcpy(h_red, d_red, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_green, d_green, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_blue, d_blue, sizeof(float) * numPixels, cudaMemcpyDeviceToHost)); //recombine the image channels float *imageHDR = new float[numPixels * 3]; for (int i = 0; i < numPixels; ++i) { imageHDR[3 * i + 0] = h_blue[i]; imageHDR[3 * i + 1] = h_green[i]; imageHDR[3 * i + 2] = h_red[i]; } saveImageHDR(imageHDR, numRows, numCols, output_file); delete[] imageHDR; delete[] h_red; delete[] h_green; delete[] h_blue; //cleanup checkCudaErrors(cudaFree(d_cdf_normalized)); } void cleanupGlobalMemory(void) { checkCudaErrors(cudaFree(d_x__)); checkCudaErrors(cudaFree(d_y__)); checkCudaErrors(cudaFree(d_logY__)); checkCudaErrors(cudaFree(d_cdf__)); }
36d7430145781f2d4d189ad80dbe5bd31bf01b98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/cast.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! AdamUpdate <T = float32, Device = CUDA> */ template <typename T> __global__ void _AdamUpdate( const int count, const T lr, const T beta1, const T beta2, const T eps, T* g, T* m, T* v) { CUDA_1D_KERNEL_LOOP(i, count) { T gi = g[i]; T mi = m[i] = m[i] * beta1 + gi * (1 - beta1); T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); g[i] = lr * mi / (sqrt(vi) + eps); } } template <> void AdamUpdate<float, CUDAContext>( const int count, const float lr, const float beta1, const float beta2, const float eps, float* g, float* m, float* v, CUDAContext* ctx) { _AdamUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, lr, beta1, beta2, eps, g, m, v); } /*! AdamUpdate <T = float16, Device = CUDA> */ __global__ void _AdamUpdateHalf( const int count, const half lr, const half beta1, const half beta2, const half eps, half* g, half* m, half* v) { CUDA_1D_KERNEL_LOOP(i, count) { #if __CUDA_ARCH__ >= 530 half gi = g[i]; half kOne = __float2half(1.f); half mi = m[i] = __hadd( __hmul(m[i], beta1), __hmul(gi, __hsub(kOne, beta1)) ); half vi = v[i] = __hadd( __hmul(v[i], beta2), __hmul(gi, __hmul(gi, __hsub(kOne, beta2))) ); g[i] = __hdiv( __hmul(lr, mi), __hadd(hsqrt(vi), eps) ); #endif } } template <> void AdamUpdate<float16, CUDAContext>( const int count, const float lr, const float beta1, const float beta2, const float eps, float16* g, float16* m, float16* v, CUDAContext* ctx) { _AdamUpdateHalf << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, dragon_cast<half, float>(lr), dragon_cast<half, float>(beta1), dragon_cast<half, float>(beta2), dragon_cast<half, float>(eps), reinterpret_cast<half*>(g), reinterpret_cast<half*>(m), reinterpret_cast<half*>(v)); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
36d7430145781f2d4d189ad80dbe5bd31bf01b98.cu
#ifdef WITH_CUDA #include "core/context_cuda.h" #include "utils/cast.h" #include "utils/op_kernel.h" namespace dragon { namespace kernel { /*! AdamUpdate <T = float32, Device = CUDA> */ template <typename T> __global__ void _AdamUpdate( const int count, const T lr, const T beta1, const T beta2, const T eps, T* g, T* m, T* v) { CUDA_1D_KERNEL_LOOP(i, count) { T gi = g[i]; T mi = m[i] = m[i] * beta1 + gi * (1 - beta1); T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); g[i] = lr * mi / (sqrt(vi) + eps); } } template <> void AdamUpdate<float, CUDAContext>( const int count, const float lr, const float beta1, const float beta2, const float eps, float* g, float* m, float* v, CUDAContext* ctx) { _AdamUpdate<float> << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, lr, beta1, beta2, eps, g, m, v); } /*! AdamUpdate <T = float16, Device = CUDA> */ __global__ void _AdamUpdateHalf( const int count, const half lr, const half beta1, const half beta2, const half eps, half* g, half* m, half* v) { CUDA_1D_KERNEL_LOOP(i, count) { #if __CUDA_ARCH__ >= 530 half gi = g[i]; half kOne = __float2half(1.f); half mi = m[i] = __hadd( __hmul(m[i], beta1), __hmul(gi, __hsub(kOne, beta1)) ); half vi = v[i] = __hadd( __hmul(v[i], beta2), __hmul(gi, __hmul(gi, __hsub(kOne, beta2))) ); g[i] = __hdiv( __hmul(lr, mi), __hadd(hsqrt(vi), eps) ); #endif } } template <> void AdamUpdate<float16, CUDAContext>( const int count, const float lr, const float beta1, const float beta2, const float eps, float16* g, float16* m, float16* v, CUDAContext* ctx) { _AdamUpdateHalf << < CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream() >> > (count, dragon_cast<half, float>(lr), dragon_cast<half, float>(beta1), dragon_cast<half, float>(beta2), dragon_cast<half, float>(eps), reinterpret_cast<half*>(g), reinterpret_cast<half*>(m), reinterpret_cast<half*>(v)); } } // namespace kernel } // namepsace dragon #endif // WITH_CUDA
055189fd1fabdcae45e138dad8bdb800d92f2bfc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> int penarty; //! /*! * customer_numcapacity */ __device__ void move(RDATA *rdata, PDATA *pdata, int customer_num) { if (customer_num == 0) { printf("CUSTOMER NUMBER IS 0 in move().\n"); exit(1); } else { int offset = rdata->cur_vehicle * rdata->nc + rdata->idx; rdata->route[offset] = customer_num; rdata->route_cap[rdata->cur_vehicle] += pdata->demands[customer_num]; rdata->idx++; } } //! /*! * (vehicle_num)223... * \n */ int calc_distance(RDATA *rdata, PDATA *pdata) { int sum_distance; int route_num; int i; int c1, c2; sum_distance = 0; for (route_num=0; route_num < rdata->nr; route_num++) { c2 = DEPOT_NUM; for (i=0; i < rdata->nc; i++) { c1 = c2; c2 = rdata->route[route_num*rdata->nc+i]; if (c2 == DEPOT_NUM) { sum_distance += pdata->cost[INDEX(c1, DEPOT_NUM)]; break; // } else { sum_distance += pdata->cost[INDEX(c1, c2)]; } } if (i == rdata->nc) { sum_distance += pdata->cost[INDEX(c2, DEPOT_NUM)]; } } return sum_distance; } __global__ void random_simulation(RDATA *d_rdata, PDATA *d_pdata, int *d_distance) { __shared__ int candidates[d_pdata->nc]; // __shared__ int num_candidates; int tid = threadIdx.y * blockDim.x + threadIdx.x; int cv = d_rdata->cur_vehicle; // current working vehicle while (cv < d_rdata->nr && !allVisit()) { if (tid == 0) num_candidates = 0; if (tid < d_rdata->nc) { if (!isVisit(tid)) { if (d_rdata->route_cap[cv] + vrp->demand[tid] <= d_pdata->capacity) { candidates[num_candidates] = i; atomicAdd(&num_candidates, 1); } } } __syncthreads(); if (num_candidates == 0) { d_rdata->cur_vehicle++; // change to next vehicle d_rdata->idx = 0; } else { i = rand() % num_candidates; move(d_rdata, d_pdata, candidates[i], d_rdata); } __syncthreads(); } d_distance[tid] = allVisit() ? calc_distance() : penarty; } void capsule_random_simulation(RDATA *rdata, PDATA *pdata) { } int main(int argc, char **argv) { return 0; }
055189fd1fabdcae45e138dad8bdb800d92f2bfc.cu
#include <stdio.h> #include <stdlib.h> int penarty; //! 次に進む顧客をルートに追加する関数 /*! * customer_numはcapacityの制限を超過しないことを仮定している */ __device__ void move(RDATA *rdata, PDATA *pdata, int customer_num) { if (customer_num == 0) { printf("CUSTOMER NUMBER IS 0 in move().\n"); exit(1); } else { int offset = rdata->cur_vehicle * rdata->nc + rdata->idx; rdata->route[offset] = customer_num; rdata->route_cap[rdata->cur_vehicle] += pdata->demands[customer_num]; rdata->idx++; } } //! ルートの合計距離を返す関数 /*! * 倉庫と最初の車体(vehicle_num)が最初に訪問した顧客との距離、その最初に訪問した顧客と2番目に訪問した顧客との距離、2番目に訪問した顧客と3番目に訪問した顧客との距離... * 最後に訪問した顧客と倉庫との距離、これらを全て合計する。これを全ての車体に対して行う。そうして全てを合計した距離を返す。\n */ int calc_distance(RDATA *rdata, PDATA *pdata) { int sum_distance; int route_num; int i; int c1, c2; sum_distance = 0; for (route_num=0; route_num < rdata->nr; route_num++) { c2 = DEPOT_NUM; for (i=0; i < rdata->nc; i++) { c1 = c2; c2 = rdata->route[route_num*rdata->nc+i]; if (c2 == DEPOT_NUM) { sum_distance += pdata->cost[INDEX(c1, DEPOT_NUM)]; break; // 次のルートへ } else { sum_distance += pdata->cost[INDEX(c1, c2)]; } } if (i == rdata->nc) { sum_distance += pdata->cost[INDEX(c2, DEPOT_NUM)]; } } return sum_distance; } __global__ void random_simulation(RDATA *d_rdata, PDATA *d_pdata, int *d_distance) { __shared__ int candidates[d_pdata->nc]; // これで定義出来なければ動的に定義する __shared__ int num_candidates; int tid = threadIdx.y * blockDim.x + threadIdx.x; int cv = d_rdata->cur_vehicle; // current working vehicle while (cv < d_rdata->nr && !allVisit()) { if (tid == 0) num_candidates = 0; if (tid < d_rdata->nc) { if (!isVisit(tid)) { if (d_rdata->route_cap[cv] + vrp->demand[tid] <= d_pdata->capacity) { candidates[num_candidates] = i; atomicAdd(&num_candidates, 1); } } } __syncthreads(); if (num_candidates == 0) { d_rdata->cur_vehicle++; // change to next vehicle d_rdata->idx = 0; } else { i = rand() % num_candidates; move(d_rdata, d_pdata, candidates[i], d_rdata); } __syncthreads(); } d_distance[tid] = allVisit() ? calc_distance() : penarty; } void capsule_random_simulation(RDATA *rdata, PDATA *pdata) { } int main(int argc, char **argv) { return 0; }
3a0d6b17089f19d786f29f9c57cb99b2be4494d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" //#include "col2im.h" #include "utils.h" #include "hip/hip_runtime.h" } __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { hipLaunchKernelGGL(( binarize_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, x, n, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabsf(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_input_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, input, n, size, binary); check_error(hipPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for(i = 0; i < size; ++i){ mean += fabsf(weights[f*size + i]); } mean = mean / size; for(i = 0; i < size; ++i){ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { hipLaunchKernelGGL(( binarize_weights_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, weights, n, size, binary); check_error(hipPeekAtLastError()); } void forward_convolutional_layer_gpu(convolutional_layer l, network net) { fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); // if(l.binary){ // binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); // swap_binary(&l); // } // // if(l.xnor){ // binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); // swap_binary(&l); // binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu); // net.input_gpu = l.binary_input_gpu; // } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int i, j; int m = l.n/l.groups; int k = l.size*l.size*l.c/l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.weights_gpu + j*l.nweights/l.groups; float *b = net.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; float *im = net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w; if (l.size == 1){ b = im; } else { im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); } gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } #endif if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); // if(l.binary || l.xnor) swap_binary(&l); } //__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta) //{ // int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; // if(id >= n) return; // // int j = id % w; // id /= w; // int i = id % h; // id /= h; // int k = id % c; // id /= c; // int b = id; // // int w_offset = -(size/2.f); // int h_offset = -(size/2.f); // // int out_index = j + w*(i + h*(k + c*b)); // int l, m; // for(l = 0; l < size; ++l){ // for(m = 0; m < size; ++m){ // int cur_h = h_offset + i + l; // int cur_w = w_offset + j + m; // int index = cur_w + w*(cur_h + h*(k + b*c)); // int valid = (cur_h >= 0 && cur_h < h && // cur_w >= 0 && cur_w < w); // delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0; // } // } //} //extern "C" void smooth_layer(layer l, int size, float rate) //{ // int h = l.out_h; // int w = l.out_w; // int c = l.out_c; // // size_t n = h*w*c*l.batch; // // smooth_kernel<<<cuda_gridsize(n), BLOCK>>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu); // check_error(hipPeekAtLastError()); //} //void backward_convolutional_layer_gpu(convolutional_layer l, network net) //{ // if(l.smooth){ // smooth_layer(l, 5, l.smooth); // } // //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); // gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); // // // if(l.batch_normalize){ // backward_batchnorm_layer_gpu(l, net); // } else { // backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); // } // float *original_input = net.input_gpu; // // if(l.xnor) net.input_gpu = l.binary_input_gpu; //#ifdef CUDNN // float one = 1; // cudnnConvolutionBackwardFilter(cudnn_handle(), // &one, // l.srcTensorDesc, // net.input_gpu, // l.ddstTensorDesc, // l.delta_gpu, // l.convDesc, // l.bf_algo, // net.workspace, // l.workspace_size, // &one, // l.dweightDesc, // l.weight_updates_gpu); // // if(net.delta_gpu){ // if(l.binary || l.xnor) swap_binary(&l); // cudnnConvolutionBackwardData(cudnn_handle(), // &one, // l.weightDesc, // l.weights_gpu, // l.ddstTensorDesc, // l.delta_gpu, // l.convDesc, // l.bd_algo, // net.workspace, // l.workspace_size, // &one, // l.dsrcTensorDesc, // net.delta_gpu); // if(l.binary || l.xnor) swap_binary(&l); // if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu); // } // //#else // int m = l.n/l.groups; // int n = l.size*l.size*l.c/l.groups; // int k = l.out_w*l.out_h; // // int i, j; // for(i = 0; i < l.batch; ++i){ // for(j = 0; j < l.groups; ++j){ // float *a = l.delta_gpu + (i*l.groups + j)*m*k; // float *b = net.workspace; // float *c = l.weight_updates_gpu + j*l.nweights/l.groups; // // float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; // float *imd = net.delta_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; // // im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); // gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); // // if (net.delta_gpu) { // if (l.binary || l.xnor) swap_binary(&l); // a = l.weights_gpu + j*l.nweights/l.groups; // b = l.delta_gpu + (i*l.groups + j)*m*k; // c = net.workspace; // if (l.size == 1) { // c = imd; // } // // gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k); // // if (l.size != 1) { // col2im_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, imd); // } // if(l.binary || l.xnor) { // swap_binary(&l); // } // } // if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w); // } // } //#endif //} void pull_convolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.nweights); cuda_pull_array(l.biases_gpu, l.biases, l.n); // cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights); // cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_convolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); cuda_push_array(l.biases_gpu, l.biases, l.n); // cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); // cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } //void update_convolutional_layer_gpu(layer l, update_args a) //{ // float learning_rate = a.learning_rate*l.learning_rate_scale; // float momentum = a.momentum; // float decay = a.decay; // int batch = a.batch; // // if(a.adam){ // adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); // adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); // if(l.scales_gpu){ // adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); // } // }else{ // axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); // axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); // scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); // // axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); // scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); // // if(l.scales_gpu){ // axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); // scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); // } // } // if(l.clip){ // constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1); // } //}
3a0d6b17089f19d786f29f9c57cb99b2be4494d3.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "convolutional_layer.h" #include "batchnorm_layer.h" #include "gemm.h" #include "blas.h" #include "im2col.h" //#include "col2im.h" #include "utils.h" #include "cuda.h" } __global__ void binarize_kernel(float *x, int n, float *binary) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= n) return; binary[i] = (x[i] >= 0) ? 1 : -1; } void binarize_gpu(float *x, int n, float *binary) { binarize_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_input_kernel(float *input, int n, int size, float *binary) { int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (s >= size) return; int i = 0; float mean = 0; for(i = 0; i < n; ++i){ mean += fabsf(input[i*size + s]); } mean = mean / n; for(i = 0; i < n; ++i){ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean; } } void binarize_input_gpu(float *input, int n, int size, float *binary) { binarize_input_kernel<<<cuda_gridsize(size), BLOCK>>>(input, n, size, binary); check_error(cudaPeekAtLastError()); } __global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary) { int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (f >= n) return; int i = 0; float mean = 0; for(i = 0; i < size; ++i){ mean += fabsf(weights[f*size + i]); } mean = mean / size; for(i = 0; i < size; ++i){ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean; //binary[f*size + i] = weights[f*size + i]; } } void binarize_weights_gpu(float *weights, int n, int size, float *binary) { binarize_weights_kernel<<<cuda_gridsize(n), BLOCK>>>(weights, n, size, binary); check_error(cudaPeekAtLastError()); } void forward_convolutional_layer_gpu(convolutional_layer l, network net) { fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1); // if(l.binary){ // binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); // swap_binary(&l); // } // // if(l.xnor){ // binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu); // swap_binary(&l); // binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu); // net.input_gpu = l.binary_input_gpu; // } #ifdef CUDNN float one = 1; cudnnConvolutionForward(cudnn_handle(), &one, l.srcTensorDesc, net.input_gpu, l.weightDesc, l.weights_gpu, l.convDesc, l.fw_algo, net.workspace, l.workspace_size, &one, l.dstTensorDesc, l.output_gpu); #else int i, j; int m = l.n/l.groups; int k = l.size*l.size*l.c/l.groups; int n = l.out_w*l.out_h; for(i = 0; i < l.batch; ++i){ for(j = 0; j < l.groups; ++j){ float *a = l.weights_gpu + j*l.nweights/l.groups; float *b = net.workspace; float *c = l.output_gpu + (i*l.groups + j)*n*m; float *im = net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w; if (l.size == 1){ b = im; } else { im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); } gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n); } } #endif if (l.batch_normalize) { forward_batchnorm_layer_gpu(l, net); } else { add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h); } activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation); //if(l.dot > 0) dot_error_gpu(l); // if(l.binary || l.xnor) swap_binary(&l); } //__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta) //{ // int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; // if(id >= n) return; // // int j = id % w; // id /= w; // int i = id % h; // id /= h; // int k = id % c; // id /= c; // int b = id; // // int w_offset = -(size/2.f); // int h_offset = -(size/2.f); // // int out_index = j + w*(i + h*(k + c*b)); // int l, m; // for(l = 0; l < size; ++l){ // for(m = 0; m < size; ++m){ // int cur_h = h_offset + i + l; // int cur_w = w_offset + j + m; // int index = cur_w + w*(cur_h + h*(k + b*c)); // int valid = (cur_h >= 0 && cur_h < h && // cur_w >= 0 && cur_w < w); // delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0; // } // } //} //extern "C" void smooth_layer(layer l, int size, float rate) //{ // int h = l.out_h; // int w = l.out_w; // int c = l.out_c; // // size_t n = h*w*c*l.batch; // // smooth_kernel<<<cuda_gridsize(n), BLOCK>>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu); // check_error(cudaPeekAtLastError()); //} //void backward_convolutional_layer_gpu(convolutional_layer l, network net) //{ // if(l.smooth){ // smooth_layer(l, 5, l.smooth); // } // //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1); // gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); // // // if(l.batch_normalize){ // backward_batchnorm_layer_gpu(l, net); // } else { // backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h); // } // float *original_input = net.input_gpu; // // if(l.xnor) net.input_gpu = l.binary_input_gpu; //#ifdef CUDNN // float one = 1; // cudnnConvolutionBackwardFilter(cudnn_handle(), // &one, // l.srcTensorDesc, // net.input_gpu, // l.ddstTensorDesc, // l.delta_gpu, // l.convDesc, // l.bf_algo, // net.workspace, // l.workspace_size, // &one, // l.dweightDesc, // l.weight_updates_gpu); // // if(net.delta_gpu){ // if(l.binary || l.xnor) swap_binary(&l); // cudnnConvolutionBackwardData(cudnn_handle(), // &one, // l.weightDesc, // l.weights_gpu, // l.ddstTensorDesc, // l.delta_gpu, // l.convDesc, // l.bd_algo, // net.workspace, // l.workspace_size, // &one, // l.dsrcTensorDesc, // net.delta_gpu); // if(l.binary || l.xnor) swap_binary(&l); // if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu); // } // //#else // int m = l.n/l.groups; // int n = l.size*l.size*l.c/l.groups; // int k = l.out_w*l.out_h; // // int i, j; // for(i = 0; i < l.batch; ++i){ // for(j = 0; j < l.groups; ++j){ // float *a = l.delta_gpu + (i*l.groups + j)*m*k; // float *b = net.workspace; // float *c = l.weight_updates_gpu + j*l.nweights/l.groups; // // float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; // float *imd = net.delta_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w; // // im2col_gpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b); // gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n); // // if (net.delta_gpu) { // if (l.binary || l.xnor) swap_binary(&l); // a = l.weights_gpu + j*l.nweights/l.groups; // b = l.delta_gpu + (i*l.groups + j)*m*k; // c = net.workspace; // if (l.size == 1) { // c = imd; // } // // gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k); // // if (l.size != 1) { // col2im_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, imd); // } // if(l.binary || l.xnor) { // swap_binary(&l); // } // } // if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w); // } // } //#endif //} void pull_convolutional_layer(layer l) { cuda_pull_array(l.weights_gpu, l.weights, l.nweights); cuda_pull_array(l.biases_gpu, l.biases, l.n); // cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights); // cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_pull_array(l.scales_gpu, l.scales, l.n); cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } void push_convolutional_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); cuda_push_array(l.biases_gpu, l.biases, l.n); // cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights); // cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); if (l.batch_normalize){ cuda_push_array(l.scales_gpu, l.scales, l.n); cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n); cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n); } } //void update_convolutional_layer_gpu(layer l, update_args a) //{ // float learning_rate = a.learning_rate*l.learning_rate_scale; // float momentum = a.momentum; // float decay = a.decay; // int batch = a.batch; // // if(a.adam){ // adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t); // adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); // if(l.scales_gpu){ // adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t); // } // }else{ // axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); // axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); // scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1); // // axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1); // scal_gpu(l.n, momentum, l.bias_updates_gpu, 1); // // if(l.scales_gpu){ // axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1); // scal_gpu(l.n, momentum, l.scale_updates_gpu, 1); // } // } // if(l.clip){ // constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1); // } //}
066d774a0fd10d698b9ca15c6f8bf45e2f07b86d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Ref:https://github.com/PacktPublishing/Hands-On-GPU-Accelerated-Computer-Vision-with-OpenCV-and-CUDA/blob/master/Chapter2/03_thread_execution_example.cu #include <iostream> #include <stdio.h> __global__ void myfirstkernel(void) { printf("Hello! I'm thread in block: %d\n", blockIdx.x); } int main() { hipLaunchKernelGGL(( myfirstkernel), dim3(16), dim3(1), 0, 0, ); hipDeviceSynchronize(); printf("All threads are finished.\n"); return 0; }
066d774a0fd10d698b9ca15c6f8bf45e2f07b86d.cu
// Ref:https://github.com/PacktPublishing/Hands-On-GPU-Accelerated-Computer-Vision-with-OpenCV-and-CUDA/blob/master/Chapter2/03_thread_execution_example.cu #include <iostream> #include <stdio.h> __global__ void myfirstkernel(void) { printf("Hello! I'm thread in block: %d\n", blockIdx.x); } int main() { myfirstkernel<<<16, 1>>>(); cudaDeviceSynchronize(); printf("All threads are finished.\n"); return 0; }
3e8efb12279bf4d0976d0d1089b1069f59203bd6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stddef.h> #include<search.h> #include<hip/device_functions.h> #define MAX_FILE_SIZE 200 #define MAX_HASH_ENTRIES 200 #define M 10 __global__ void getWordCounts(char *fileArray,int *countArray,int *fileSize,char *wordhashtable, int *nextPtr, int *lock){ unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; int ind,word_started =0 ,count =0; int found; int hashvalue; char *ptr,*wptr,*temp; ptr = &fileArray[i*MAX_FILE_SIZE]; int tempi=0,tempi2; for(ind =0;ind<fileSize[i];ind++){ if(ptr[ind]>64&&ptr[ind]<91) ptr[ind]+=32; if(ptr[ind]!=' '&&ptr[ind]!='.'&&ptr[ind]!='!') if(word_started!=1) { word_started = 1; hashvalue = ptr[ind];//>64&&ptr[ind]<91) ? ptr[ind]+32:ptr[ind];//temp addition else do only assignemnt wptr = &ptr[ind]; } else{//Middle of the word hashvalue+= ptr[ind];//>64&&ptr[ind]<91) ? ptr[ind]+32:ptr[ind]; } if(word_started) if(ptr[ind]==' '||ptr[ind]=='.'||ptr[ind]=='!'){ word_started = 0; hashvalue = hashvalue % M;// 10 here is hashtable size M /*Check Location*/ //lock -hashvalue while(!atomicCAS(&lock[hashvalue],0,1)); if(wordhashtable[hashvalue*20]=='\0'){//Not found in Hash temp = &wordhashtable[hashvalue*20]; tempi =0; while(&wptr[tempi]!=&ptr[ind])//Entering in hash table {temp[tempi]= wptr[tempi]; tempi++;} //unlock -hash value atomicCAS(&lock[hashvalue],1,0); //fn-atomicAdd(&countArray[hashvalue],1);//count countArray[hashvalue] = hashvalue; } else{//Collision detection tempi =hashvalue;found = -1; /*Check word*/ while(nextPtr[tempi]!=-1||found==-1){ tempi2 = 0; found =1; temp = &wordhashtable[tempi*20]; while(&wptr[tempi2]!=&ptr[ind]){ if(temp[tempi2]!=wptr[tempi2]) {found =0;break;} tempi2++; } if(temp[tempi2]!='\0') found =0; //unlock - tempi atomicCAS(&lock[tempi],1,0); if(found) break; if(nextPtr[tempi]!=-1){ tempi = nextPtr[tempi]; //lock - tempi while(!atomicCAS(&lock[tempi],0,1)); } } if(found){ atomicAdd(&countArray[tempi],1); countArray[tempi]=hashvalue;}//DEBUG else{//Collision but record not found tempi2 =0; //lock - M+tempi2 while(!atomicCAS(&lock[M+tempi2],0,1)); while(wordhashtable[(M+tempi2)*20]!='\0' && tempi2<MAX_HASH_ENTRIES) tempi2++;//10 = M; tempi2 holds location in hast tab;e if(tempi2 < MAX_HASH_ENTRIES){ nextPtr[tempi] = tempi2+M;tempi=0;//tempi holds the location where last hash was found temp = &wordhashtable[(M+tempi2)*20]; while(&wptr[tempi]!=&ptr[ind]) //Entering in hash table {temp[tempi]= wptr[tempi]; tempi++;} //unlock - M+tempi2 atomicCAS(&lock[M+tempi2],1,0); countArray[tempi2+M] = hashvalue; //fn-atomicAdd(&countArray[tempi2+M],1); }//count*/ //tryunlock = M+tempi2 atomicCAS(&lock[M+tempi2],1,0); } } //atomicAdd(&countArray[hashvalue],1); //atomicExch(&countArray[hashvalue],hashvalue); count++; } } //countArray[i] = hashvalue; } int main(int argc,char **argv){ char *filename=NULL;//Limiting no if files char *fileArray; char *dfileArray; int *countArray; int *dcountArray; int *fileSize; int *dfileSize; char *hashtable; char *dhashtable; int *nextPtr; int *dnextPtr; int *dlock; int noOfFiles=0; FILE *fp; char *temp;int itemp=0; filename =(char*) malloc (10*sizeof(char)); fileArray=(char*) malloc(10*MAX_FILE_SIZE*sizeof(char)); countArray =(int*) malloc (MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words fileSize =(int*) malloc (10*sizeof(int)); hashtable=(char*) malloc(20*MAX_HASH_ENTRIES*sizeof(char)); nextPtr = (int*) malloc (MAX_HASH_ENTRIES*sizeof(int)); hipMalloc((void**)&dfileArray,10*MAX_FILE_SIZE*sizeof(char)); hipMalloc((void**)&dcountArray,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words hipMalloc((void**)&dfileSize,10*sizeof(int)); hipMalloc((void**)&dhashtable,20*MAX_HASH_ENTRIES*sizeof(char));//20-max word size 500-max words hipMalloc((void**)&dnextPtr,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words hipMalloc((void**)&dlock,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words hipMemset(dcountArray,0,MAX_HASH_ENTRIES*sizeof(int)); hipMemset(dhashtable,'\0',20*MAX_HASH_ENTRIES*sizeof(char)); hipMemset(dnextPtr,-1,MAX_HASH_ENTRIES*sizeof(int)); hipMemset(dlock,0,MAX_HASH_ENTRIES*sizeof(int)); while(scanf("%s",filename)!=EOF){ printf("\nAttempting to open %s",filename); fp = fopen(filename,"r"); if(fp == NULL) { perror("failed to open sample.txt"); exit(0) ;//EXIT_FAILURE; } fread(&fileArray[noOfFiles*200],MAX_FILE_SIZE*sizeof(char),1,fp); fileSize[noOfFiles]=ftell(fp); fclose(fp);fp = NULL; noOfFiles++; } temp = fileArray; while(itemp<noOfFiles){ printf("%s\n",temp);itemp++; temp+=200; } hipMemcpy(dfileArray,fileArray,10*MAX_FILE_SIZE*sizeof(char),hipMemcpyHostToDevice); hipMemcpy(dfileSize,fileSize,10*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( getWordCounts), dim3(1),dim3(noOfFiles), 0, 0, dfileArray,dcountArray,dfileSize,dhashtable,dnextPtr, dlock); hipDeviceSynchronize(); hipMemcpy(countArray,dcountArray,200*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(hashtable,dhashtable,20*200*sizeof(char),hipMemcpyDeviceToHost); itemp=0; printf("\nNo Of Words : \n"); while(itemp<200){ // printf("\t%d",countArray[itemp]);itemp++; if(hashtable[itemp*20]!='\0') printf("%s:[%d]\n",&hashtable[itemp*20],countArray[itemp]); itemp++; } hipFree(dfileArray); hipFree(dcountArray); hipFree(dhashtable); free(fileArray); free(countArray); free(hashtable); }
3e8efb12279bf4d0976d0d1089b1069f59203bd6.cu
#include<stdio.h> #include<stddef.h> #include<search.h> #include<device_functions.h> #define MAX_FILE_SIZE 200 #define MAX_HASH_ENTRIES 200 #define M 10 __global__ void getWordCounts(char *fileArray,int *countArray,int *fileSize,char *wordhashtable, int *nextPtr, int *lock){ unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; int ind,word_started =0 ,count =0; int found; int hashvalue; char *ptr,*wptr,*temp; ptr = &fileArray[i*MAX_FILE_SIZE]; int tempi=0,tempi2; for(ind =0;ind<fileSize[i];ind++){ if(ptr[ind]>64&&ptr[ind]<91) ptr[ind]+=32; if(ptr[ind]!=' '&&ptr[ind]!='.'&&ptr[ind]!='!') if(word_started!=1) { word_started = 1; hashvalue = ptr[ind];//>64&&ptr[ind]<91) ? ptr[ind]+32:ptr[ind];//temp addition else do only assignemnt wptr = &ptr[ind]; } else{//Middle of the word hashvalue+= ptr[ind];//>64&&ptr[ind]<91) ? ptr[ind]+32:ptr[ind]; } if(word_started) if(ptr[ind]==' '||ptr[ind]=='.'||ptr[ind]=='!'){ word_started = 0; hashvalue = hashvalue % M;// 10 here is hashtable size M /*Check Location*/ //lock -hashvalue while(!atomicCAS(&lock[hashvalue],0,1)); if(wordhashtable[hashvalue*20]=='\0'){//Not found in Hash temp = &wordhashtable[hashvalue*20]; tempi =0; while(&wptr[tempi]!=&ptr[ind])//Entering in hash table {temp[tempi]= wptr[tempi]; tempi++;} //unlock -hash value atomicCAS(&lock[hashvalue],1,0); //fn-atomicAdd(&countArray[hashvalue],1);//count countArray[hashvalue] = hashvalue; } else{//Collision detection tempi =hashvalue;found = -1; /*Check word*/ while(nextPtr[tempi]!=-1||found==-1){ tempi2 = 0; found =1; temp = &wordhashtable[tempi*20]; while(&wptr[tempi2]!=&ptr[ind]){ if(temp[tempi2]!=wptr[tempi2]) {found =0;break;} tempi2++; } if(temp[tempi2]!='\0') found =0; //unlock - tempi atomicCAS(&lock[tempi],1,0); if(found) break; if(nextPtr[tempi]!=-1){ tempi = nextPtr[tempi]; //lock - tempi while(!atomicCAS(&lock[tempi],0,1)); } } if(found){ atomicAdd(&countArray[tempi],1); countArray[tempi]=hashvalue;}//DEBUG else{//Collision but record not found tempi2 =0; //lock - M+tempi2 while(!atomicCAS(&lock[M+tempi2],0,1)); while(wordhashtable[(M+tempi2)*20]!='\0' && tempi2<MAX_HASH_ENTRIES) tempi2++;//10 = M; tempi2 holds location in hast tab;e if(tempi2 < MAX_HASH_ENTRIES){ nextPtr[tempi] = tempi2+M;tempi=0;//tempi holds the location where last hash was found temp = &wordhashtable[(M+tempi2)*20]; while(&wptr[tempi]!=&ptr[ind]) //Entering in hash table {temp[tempi]= wptr[tempi]; tempi++;} //unlock - M+tempi2 atomicCAS(&lock[M+tempi2],1,0); countArray[tempi2+M] = hashvalue; //fn-atomicAdd(&countArray[tempi2+M],1); }//count*/ //tryunlock = M+tempi2 atomicCAS(&lock[M+tempi2],1,0); } } //atomicAdd(&countArray[hashvalue],1); //atomicExch(&countArray[hashvalue],hashvalue); count++; } } //countArray[i] = hashvalue; } int main(int argc,char **argv){ char *filename=NULL;//Limiting no if files char *fileArray; char *dfileArray; int *countArray; int *dcountArray; int *fileSize; int *dfileSize; char *hashtable; char *dhashtable; int *nextPtr; int *dnextPtr; int *dlock; int noOfFiles=0; FILE *fp; char *temp;int itemp=0; filename =(char*) malloc (10*sizeof(char)); fileArray=(char*) malloc(10*MAX_FILE_SIZE*sizeof(char)); countArray =(int*) malloc (MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words fileSize =(int*) malloc (10*sizeof(int)); hashtable=(char*) malloc(20*MAX_HASH_ENTRIES*sizeof(char)); nextPtr = (int*) malloc (MAX_HASH_ENTRIES*sizeof(int)); cudaMalloc((void**)&dfileArray,10*MAX_FILE_SIZE*sizeof(char)); cudaMalloc((void**)&dcountArray,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words cudaMalloc((void**)&dfileSize,10*sizeof(int)); cudaMalloc((void**)&dhashtable,20*MAX_HASH_ENTRIES*sizeof(char));//20-max word size 500-max words cudaMalloc((void**)&dnextPtr,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words cudaMalloc((void**)&dlock,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words cudaMemset(dcountArray,0,MAX_HASH_ENTRIES*sizeof(int)); cudaMemset(dhashtable,'\0',20*MAX_HASH_ENTRIES*sizeof(char)); cudaMemset(dnextPtr,-1,MAX_HASH_ENTRIES*sizeof(int)); cudaMemset(dlock,0,MAX_HASH_ENTRIES*sizeof(int)); while(scanf("%s",filename)!=EOF){ printf("\nAttempting to open %s",filename); fp = fopen(filename,"r"); if(fp == NULL) { perror("failed to open sample.txt"); exit(0) ;//EXIT_FAILURE; } fread(&fileArray[noOfFiles*200],MAX_FILE_SIZE*sizeof(char),1,fp); fileSize[noOfFiles]=ftell(fp); fclose(fp);fp = NULL; noOfFiles++; } temp = fileArray; while(itemp<noOfFiles){ printf("%s\n",temp);itemp++; temp+=200; } cudaMemcpy(dfileArray,fileArray,10*MAX_FILE_SIZE*sizeof(char),cudaMemcpyHostToDevice); cudaMemcpy(dfileSize,fileSize,10*sizeof(int),cudaMemcpyHostToDevice); getWordCounts<<<1,noOfFiles>>>(dfileArray,dcountArray,dfileSize,dhashtable,dnextPtr, dlock); cudaThreadSynchronize(); cudaMemcpy(countArray,dcountArray,200*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(hashtable,dhashtable,20*200*sizeof(char),cudaMemcpyDeviceToHost); itemp=0; printf("\nNo Of Words : \n"); while(itemp<200){ // printf("\t%d",countArray[itemp]);itemp++; if(hashtable[itemp*20]!='\0') printf("%s:[%d]\n",&hashtable[itemp*20],countArray[itemp]); itemp++; } cudaFree(dfileArray); cudaFree(dcountArray); cudaFree(dhashtable); free(fileArray); free(countArray); free(hashtable); }
e7c89f5ccff974645277957e63926c7768941414.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "util.cuh" #include <stdio.h> #include <stdlib.h> #include <thrust/device_vector.h> #include <numeric> #include <ATen/ATen.h> #include <torch/extension.h> #include "roctracer/roctx.h" #include "TmpMalloc.cuh" #define SECTION_SIZE 64 #define SECTION_SIZE_LARGE 512 #define BLOCK_WIDTH 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; float *copy_to_device(vector <vector<float>> X, int number_of_points, int number_of_dims) { float *d_X; hipMalloc(&d_X, sizeof(float) * number_of_points * number_of_dims); for (int i = 0; i < number_of_points; i++) { float *h_x_i = X[i].data(); hipMemcpy(&d_X[i * number_of_dims], h_x_i, sizeof(float) * number_of_dims, hipMemcpyHostToDevice); } return d_X; } float *copy_to_device(at::Tensor X, int number_of_points, int number_of_dims) { float *d_X; hipMalloc(&d_X, sizeof(float) * number_of_points * number_of_dims); hipMemcpy(d_X, X.data_ptr<float>(), sizeof(float) * number_of_points * number_of_dims, hipMemcpyHostToDevice); return d_X; } __global__ void print_array_gpu(int *x, int n) { for (int i = 0; i < n; i++) { if (x[i] < 10 && x[i] > -1) printf(" "); if (x[i] < 100 && x[i] > -10) printf(" "); printf("%d ", x[i]); } printf("\n"); } __global__ void print_array_gpu(float *x, int n) { for (int i = 0; i < n; i++) { printf("%f ", x[i]); } printf("\n"); } __global__ void print_array_gpu(bool *x, int n) { for (int i = 0; i < n; i++) { printf("%d ", (int) x[i]); } printf("\n"); } __global__ void scan_kernel_eff(int *x, int *y, int n) { /** * from the cuda book */ __shared__ int XY[SECTION_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { XY[threadIdx.x] = x[i]; } for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index < blockDim.x) { XY[index] += XY[index - stride]; } } for (int stride = SECTION_SIZE; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1) * stride * 2 - 1; if (index + stride < SECTION_SIZE) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < n) { y[i] = XY[threadIdx.x]; } } __global__ void scan_kernel_eff_large1(int *x, int *y, int *S, int n) { /** * from the cuda book */ __shared__ int XY[SECTION_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { XY[threadIdx.x] = x[i]; } for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index < blockDim.x) { XY[index] += XY[index - stride]; } } for (int stride = SECTION_SIZE; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1) * stride * 2 - 1; if (index + stride < SECTION_SIZE) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < n) { y[i] = XY[threadIdx.x]; } if (threadIdx.x == 0) { S[blockIdx.x] = XY[SECTION_SIZE - 1]; } } __global__ void scan_kernel_eff_large3(int *y, int *S, int n) { /** * from the cuda book */ int i = blockIdx.x * blockDim.x + threadIdx.x; if (blockIdx.x > 0 && i < n) { y[i] += S[blockIdx.x - 1]; } } void inclusive_scan(int *source, int *result, int n) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S; hipMalloc((void **) &S, numBlocks * sizeof(int)); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan(S, S, numBlocks); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); hipFree(S); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); } } void inclusive_scan_points(int *source, int *result, int n, TmpMalloc *tmps) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S = tmps->malloc_points(); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan(S, S, numBlocks); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); tmps->free_points(S); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); } } void inclusive_scan_nodes(int *source, int *result, int n, TmpMalloc *tmps) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S = tmps->malloc_nodes(); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan(S, S, numBlocks); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); tmps->free_nodes(S); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); gpuErrchk(hipPeekAtLastError()); } } void inclusive_scan_any(int *source, int *result, int n, TmpMalloc *tmps) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S = tmps->malloc_any(numBlocks); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan_any(S, S, numBlocks, tmps); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); tmps->free_any(S, numBlocks); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); gpuErrchk(hipPeekAtLastError()); } } void inclusive_scan_cpu(int *d_x, int *d_y, int n) { int *h_x = new int[n]; int *h_y = new int[n]; hipMemcpy(h_y, d_y, n * sizeof(int), hipMemcpyDeviceToHost); int tmp = 0; for (int i = 0; i < n; i++) { tmp += h_y[i]; h_x[i] = tmp; } hipMemcpy(d_x, h_x, n * sizeof(int), hipMemcpyHostToDevice); hipDeviceSynchronize(); delete h_x; delete h_y; } void inclusive_scan_async(int *x, int *y, int n, hipStream_t stream) { int numBlocks = n / BLOCK_WIDTH; if (n % BLOCK_WIDTH) numBlocks++; if (n > SECTION_SIZE) { int *S; hipMalloc((void **) &S, (n / SECTION_SIZE) * sizeof(int)); scan_kernel_eff_large1 << < numBlocks, BLOCK_WIDTH, 0, stream >> > (x, y, S, n); inclusive_scan_async(S, S, n / SECTION_SIZE, stream); scan_kernel_eff_large3 << < numBlocks, BLOCK_WIDTH, 0, stream >> > (y, S, n); hipFree(S); } else { scan_kernel_eff << < numBlocks, BLOCK_WIDTH, 0, stream >> > (x, y, n); } } void populate(int *parents, int *cells, int *counts, int *dim_start, int *dims, int c, int d, int n) { int lvl_size = c - c * 1 / 3; int prev_lvl_size = 0; int prev_count = 0; for (int i = 0; i < d; i++) { dims[i] = d - i; dim_start[i] = prev_count; int p = -1; for (int j = 0; j < lvl_size; j++) { p += j % 3 == 2 ? 0 : 1; if (i == 0) { parents[j + prev_count] = -1; } else { parents[j + prev_count] = prev_count - prev_lvl_size + p; } } prev_count += lvl_size; prev_lvl_size = lvl_size; lvl_size *= 1.5; } for (int i = 0; i < d; i++) { int r_count = 0; int c_no = 0; for (int j = 0; j < ((i < d - 1 ? dim_start[i + 1] : n) - dim_start[i]); j++) { int m = (i == 0 ? c * 1 / 3 : c - 2); if (i != 0 && j % 3 != 2) { r_count = 0; c_no = 0; } while (r_count < m && rand() % c < m) { r_count++; c_no++; } cells[dim_start[i] + j] = c_no + 1; c_no++; } } for (int j = 0; j < dim_start[d - 1]; j++) { counts[j] = 0; } for (int j = dim_start[d - 1]; j < n; j++) { int count = rand() % 10 * rand() % 10 + 1; counts[j] = count; int p = parents[j]; while (p != -1) { counts[p] += count; p = parents[p]; } } } void print_scy_tree(int *parents, int *cells, int *counts, int *dim_start, int *dims, int d, int n) { printf("r: %d/%d\n", cells[0], counts[0]); if (d == 0) return; int *leaf_count = new int[n]; for (int i = 0; i < n; i++) leaf_count[i] = 0; for (int i = dim_start[d - 1]; i < n; i++) { leaf_count[i] = 0; int p = i; while (p > 0) { leaf_count[p]++; p = parents[p]; } } for (int i = 0; i < d; i++) { printf("%d: ", dims[i]); for (int j = dim_start[i]; j < ((i < (d - 1)) ? dim_start[i + 1] : n); j++) { if (cells[j] < 100) printf(" "); if (cells[j] < 10) printf(" "); printf("%d/%d ", cells[j], counts[j]); if (counts[j] < 100 && counts[j] > -10) printf(" "); if (counts[j] < 10 && counts[j] > -1) printf(" "); for (int k = 0; k < leaf_count[j] - 1; k++) { printf(" ", cells[j], counts[j]); } } printf("\n"); } } int get_size(int c, int d) { int lvl_size = c - c * 1 / 3; int prev_count = 0; for (int i = 0; i < d; i++) { prev_count += lvl_size; lvl_size *= 1.5; } return prev_count; } void print_array_range(int *x, int start, int end) { for (int i = start; i < end; i++) { printf("%d ", (int) x[i]); } printf("\n\n"); } void print_array(int *x, int n) { int left = 400; int right = 400; if (n <= left + right) { for (int i = 0; i < n; i++) { if (x[i] < 10 && x[i] > -1) printf(" "); if (x[i] < 100 && x[i] > -10) printf(" "); printf("%d ", (int) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%d ", (int) x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%d ", (int) x[i]); } } printf("\n\n"); } void print_array(vector<int> x, int n) { int left = 400; int right = 400; if (n <= left + right) { for (int i = 0; i < n; i++) { printf("%d ", (int) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%d ", (int) x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%d ", (int) x[i]); } } printf("\n\n"); } void print_array(float *x, int n) { int left = 30; int right = 10; if (n <= left + right) { for (int i = 0; i < n; i++) { printf("%f ", (float) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%f ", (float) x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%f ", (float) x[i]); } } printf("\n\n"); } void print_array(thrust::device_vector<int> x, int n) { int left = 30; int right = 10; if (n <= left + right) { for (int i = 0; i < n; i++) { printf("%d ", (int) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%d ", x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%d ", x[i]); } } printf("\n\n"); } int get_incorrect(int *array_1, int *array_2, int n) { int count = 0; for (int i = 0; i < n; i++) { if (array_1[i] != array_2[i]) { count++; } } return count; } float v_mean(std::vector<float> v) { //https://stackoverflow.com/questions/28574346/find-average-of-input-to-vector-c return accumulate(v.begin(), v.end(), 0.0) / v.size(); } vector<float> m_get_col(vector <vector<float>> m, int i) { vector<float> col; for (int j = 0; j < m.size(); j++) { col.push_back(m[j][i]); } return col; } float v_min(std::vector<float> v) { float min = std::numeric_limits<float>::infinity(); for (int i = 0; i < v.size(); i++) { if (v[i] < min) { min = v[i]; } } return min; } float v_max(std::vector<float> v) { float max = -100000.; for (int i = 0; i < v.size(); i++) { if (v[i] > max) { max = v[i]; } } return max; } int v_max(std::vector<int> v) { int max = -100000; for (int i = 0; i < v.size(); i++) { if (v[i] > max) { max = v[i]; } } return max; } void m_normalize(std::vector <std::vector<float>> &m) { float *min = new float[m[0].size()]; float *max = new float[m[0].size()]; for (int j = 0; j < m[0].size(); j++) { min[j] = 100000.; max[j] = -100000.; } for (int i = 0; i < m.size(); i++) { for (int j = 0; j < m[0].size(); j++) { min[j] = min[j] < m[i][j] ? min[j] : m[i][j]; max[j] = max[j] > m[i][j] ? max[j] : m[i][j]; } printf("finding min/max: %d%%\r", int(((i + 1) * 100) / m.size())); } printf("finding min/max: 100%%\n"); for (int i = 0; i < m.size(); i++) { for (int j = 0; j < m[0].size(); j++) { m[i][j] = max[j] != min[j] ? (m[i][j] - min[j]) / (max[j] - min[j]) : 0; } printf("normalizing: %d%%\r", int(((i + 1) * 100) / m.size())); } printf("normalizing: 100%%\n"); } template<class T> vector <T> clone(vector <T> v_old) { vector <T> v_clone(v_old); return v_clone; } void zero(int *array, int n) { for (int i = 0; i < n; i++) array[i] = 0; } bool subspace_of(vector<int> subspace, vector<int> subspace_mark) { int i = 0; int j = 0; while (j < subspace_mark.size() && i < subspace.size()) { if (subspace[i] == subspace_mark[j]) { i++; j++; } else { j++; } } return i == subspace.size(); } bool vec_cmp::operator()(const vector<int> &a, const vector<int> &b) const { int i = a.size() - 1; int j = b.size() - 1; while (a[i] == b[j]) { i--; j--; if (i < 0 || j < 0) { return i < j; } } return a[i] < b[j]; } void join(map <vector<int>, vector<int>, vec_cmp> &result, vector<int> &clustering, vector<int> subspace, int min_size, float r) { int clustering_max = v_max(clustering); if (clustering_max < 0) { return; } int n = clustering.size(); map<int, int> sizes; for (int i = 0; i < n; i++) { int cluster = clustering[i]; if (cluster >= 0) { if (sizes.count(cluster)) { sizes[cluster]++; } else { sizes.insert(pair<int, int>(cluster, 1)); } } } for (int i = 0; i < n; i++) { int cluster = clustering[i]; if (cluster >= 0 && sizes[cluster] < min_size) { clustering[i] = -1; } } for (pair <vector<int>, vector<int>> subspace_clustering : result) { vector<int> subspace_H = subspace_clustering.first; vector<int> clustering_H = subspace_clustering.second; if (subspace_of(subspace, subspace_H)) { map<int, int> sizes_H; set<int> to_be_removed; for (int cluster_id: clustering_H) { if (cluster_id >= 0) { if (sizes_H.count(cluster_id)) { sizes_H[cluster_id]++; } else { sizes_H.insert(pair<int, int>(cluster_id, 1)); } } } for (int i = 0; i < n; i++) { int cluster = clustering[i]; int cluster_H = clustering_H[i]; if (cluster >= 0 && cluster_H >= 0 && sizes[cluster] * r < sizes_H[cluster_H]) { to_be_removed.insert(cluster); } } for (int i = 0; i < n; i++) { int cluster = clustering[i]; if (cluster >= 0 && to_be_removed.find(cluster) != to_be_removed.end()) { clustering[i] = -1; } } } } clustering_max = v_max(clustering); if (clustering_max >= 0) { if (result.count(subspace)) { vector<int> clustering_old = result[subspace]; int m = v_max(clustering_old); for (int i = 0; i < n; i++) { if (clustering[i] == -2) { clustering_old[i] = clustering[i]; } else if (clustering[i] >= 0) { clustering_old[i] = m + 1 + clustering[i]; } } result[subspace] = clustering_old; } else { result.insert(pair < vector < int > , vector < int >> (subspace, clustering)); } } } __global__ void join_count_kernel(int *d_sizes, int *d_clustering, int n) { for (int i = threadIdx.x; i < n; i += blockDim.x) { int cluster = d_clustering[i]; if (cluster >= 0) { atomicAdd(&d_sizes[cluster], 1); } } } __global__ void join_erease_kernel(int *d_sizes, int *d_clustering, int n, int min_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int cluster = d_clustering[i]; if (cluster >= 0 && d_sizes[cluster] < min_size) { d_clustering[i] = -1; } } } __global__ void join_marke_remove_kernel(int *d_to_be_removed, int *d_sizes, int *d_clustering, int *d_sizes_H, int *d_clustering_H, int n, float r) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int cluster = d_clustering[i]; int cluster_H = d_clustering_H[i]; if (cluster >= 0 && cluster_H >= 0 && d_sizes[cluster] * r < d_sizes_H[cluster_H]) { d_to_be_removed[cluster] = 1; } } } __global__ void join_remove_kernel(int *d_to_be_removed, int *d_sizes, int *d_clustering, int n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int cluster = d_clustering[i]; if (cluster >= 0 && d_to_be_removed[cluster]) { d_clustering[i] = -1; } } } __global__ void copy_if_positive(int *d_clustering_old, int *d_clustering, int n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (d_clustering[i] >= 0) { d_clustering_old[i] = d_clustering[i]; } } } __global__ void v_max_gpu_kernel(int *d_max, int *d_array, int n) { for (int i = threadIdx.x; i < n; i += blockDim.x) { atomicMax(&d_max[0], d_array[i]); } } int v_max_gpu(int *d_array, int n) { int BLOCK_SIZE = 512; int number_of_threads = min(n, BLOCK_SIZE); int *d_max; hipMalloc(&d_max, sizeof(int)); hipMemset(d_max, -1, sizeof(int)); v_max_gpu_kernel << < 1, number_of_threads >> > (d_max, d_array, n); int h_max; hipMemcpy(&h_max, d_max, sizeof(int), hipMemcpyDeviceToHost); hipFree(d_max); return h_max; } int v_max_gpu(int *d_array, int n, TmpMalloc *tmps) { int BLOCK_SIZE = 512; int number_of_threads = min(n, BLOCK_SIZE); int *d_max = tmps->malloc_one(); hipMemset(d_max, -1, sizeof(int)); v_max_gpu_kernel << < 1, number_of_threads >> > (d_max, d_array, n); int h_max; hipMemcpy(&h_max, d_max, sizeof(int), hipMemcpyDeviceToHost); tmps->free_one(d_max); return h_max; } void join_gpu(map<vector<int>, int *, vec_cmp> &result, int *d_clustering, vector<int> subspace, int min_size, float r, int n, TmpMalloc *tmps) { tmps->reset_counters(); int BLOCK_SIZE = 512; int number_of_blocks = n / BLOCK_SIZE; if (n % BLOCK_SIZE) number_of_blocks++; int number_of_threads = min(n, BLOCK_SIZE); int *d_sizes = tmps->get_int_array(tmps->int_array_counter++, n); hipMemset(d_sizes, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes, d_clustering, n); join_erease_kernel << < number_of_blocks, number_of_threads >> > (d_sizes, d_clustering, n, min_size); int *d_clustering_H; int *d_sizes_H = tmps->get_int_array(tmps->int_array_counter++, n); int *d_to_be_removed = tmps->get_int_array(tmps->int_array_counter++, n); for (pair<vector<int>, int *> subspace_clustering : result) { vector<int> subspace_H = subspace_clustering.first; d_clustering_H = subspace_clustering.second; roctxRangePushA("subspace_of"); bool sub_of = subspace_of(subspace, subspace_H); roctxRangePop(); if (sub_of) { hipMemset(d_sizes_H, 0, n * sizeof(int)); hipMemset(d_to_be_removed, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes_H, d_clustering_H, n); join_marke_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, d_sizes_H, d_clustering_H, n, r); join_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, n); } } int clustering_max = v_max_gpu(d_clustering, n); if (clustering_max >= 0) { if (result.count(subspace)) { int *d_clustering_old = result[subspace]; copy_if_positive << < number_of_blocks, number_of_threads >> > (d_clustering_old, d_clustering, n); tmps->free_points(d_clustering); result[subspace] = d_clustering_old; } else { result.insert(pair < vector < int > , int * > (subspace, d_clustering)); } } else { tmps->free_points(d_clustering); } } void join_gpu1(map <vector<int>, vector<int>, vec_cmp> &result, vector<int> &clustering, int *d_clustering, vector<int> subspace, int min_size, float r, int n) { int BLOCK_SIZE = 512; int number_of_blocks = n / BLOCK_SIZE; if (n % BLOCK_SIZE) number_of_blocks++; int number_of_threads = min(n, BLOCK_SIZE); int *d_sizes; hipMalloc(&d_sizes, n * sizeof(int)); hipMemset(d_sizes, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes, d_clustering, n); join_erease_kernel << < number_of_blocks, number_of_threads >> > (d_sizes, d_clustering, n, min_size); int *d_subspace_H; int *d_clustering_H; hipMalloc(&d_clustering_H, n * sizeof(int)); int *d_sizes_H; hipMalloc(&d_sizes_H, n * sizeof(int)); int *d_to_be_removed; hipMalloc(&d_to_be_removed, n * sizeof(int)); for (pair <vector<int>, vector<int>> subspace_clustering : result) { vector<int> subspace_H = subspace_clustering.first; vector<int> clustering_H = subspace_clustering.second; hipMemcpy(d_clustering_H, clustering_H.data(), n * sizeof(int), hipMemcpyHostToDevice); if (subspace_of(subspace, subspace_H)) { hipMemset(d_sizes_H, 0, n * sizeof(int)); hipMemset(d_to_be_removed, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes_H, d_clustering_H, n); join_marke_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, d_sizes_H, d_clustering_H, n, r); join_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, n); } } hipMemcpy(clustering.data(), d_clustering, n * sizeof(int), hipMemcpyDeviceToHost); int clustering_max = v_max(clustering); int clustering_max_other = v_max_gpu(d_clustering, n); if (clustering_max != clustering_max_other) printf("%d!=%d\n", clustering_max, clustering_max_other); if (clustering_max >= 0) { if (result.count(subspace)) { vector<int> clustering_old = result[subspace]; int *d_clustering_old; hipMalloc(&d_clustering_old, n * sizeof(int)); hipMemcpy(d_clustering_old, clustering_old.data(), n * sizeof(int), hipMemcpyHostToDevice); copy_if_positive << < number_of_blocks, number_of_threads >> > (d_clustering_old, d_clustering, n); hipMemcpy(clustering_old.data(), d_clustering_old, n * sizeof(int), hipMemcpyDeviceToHost); result[subspace] = clustering_old; } else { result.insert(pair < vector < int > , vector < int >> (subspace, clustering)); } } }
e7c89f5ccff974645277957e63926c7768941414.cu
#include "util.cuh" #include <stdio.h> #include <stdlib.h> #include <thrust/device_vector.h> #include <numeric> #include <ATen/ATen.h> #include <torch/extension.h> #include "nvToolsExt.h" #include "TmpMalloc.cuh" #define SECTION_SIZE 64 #define SECTION_SIZE_LARGE 512 #define BLOCK_WIDTH 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } using namespace std; float *copy_to_device(vector <vector<float>> X, int number_of_points, int number_of_dims) { float *d_X; cudaMalloc(&d_X, sizeof(float) * number_of_points * number_of_dims); for (int i = 0; i < number_of_points; i++) { float *h_x_i = X[i].data(); cudaMemcpy(&d_X[i * number_of_dims], h_x_i, sizeof(float) * number_of_dims, cudaMemcpyHostToDevice); } return d_X; } float *copy_to_device(at::Tensor X, int number_of_points, int number_of_dims) { float *d_X; cudaMalloc(&d_X, sizeof(float) * number_of_points * number_of_dims); cudaMemcpy(d_X, X.data_ptr<float>(), sizeof(float) * number_of_points * number_of_dims, cudaMemcpyHostToDevice); return d_X; } __global__ void print_array_gpu(int *x, int n) { for (int i = 0; i < n; i++) { if (x[i] < 10 && x[i] > -1) printf(" "); if (x[i] < 100 && x[i] > -10) printf(" "); printf("%d ", x[i]); } printf("\n"); } __global__ void print_array_gpu(float *x, int n) { for (int i = 0; i < n; i++) { printf("%f ", x[i]); } printf("\n"); } __global__ void print_array_gpu(bool *x, int n) { for (int i = 0; i < n; i++) { printf("%d ", (int) x[i]); } printf("\n"); } __global__ void scan_kernel_eff(int *x, int *y, int n) { /** * from the cuda book */ __shared__ int XY[SECTION_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { XY[threadIdx.x] = x[i]; } for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index < blockDim.x) { XY[index] += XY[index - stride]; } } for (int stride = SECTION_SIZE; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1) * stride * 2 - 1; if (index + stride < SECTION_SIZE) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < n) { y[i] = XY[threadIdx.x]; } } __global__ void scan_kernel_eff_large1(int *x, int *y, int *S, int n) { /** * from the cuda book */ __shared__ int XY[SECTION_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { XY[threadIdx.x] = x[i]; } for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index < blockDim.x) { XY[index] += XY[index - stride]; } } for (int stride = SECTION_SIZE; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1) * stride * 2 - 1; if (index + stride < SECTION_SIZE) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < n) { y[i] = XY[threadIdx.x]; } if (threadIdx.x == 0) { S[blockIdx.x] = XY[SECTION_SIZE - 1]; } } __global__ void scan_kernel_eff_large3(int *y, int *S, int n) { /** * from the cuda book */ int i = blockIdx.x * blockDim.x + threadIdx.x; if (blockIdx.x > 0 && i < n) { y[i] += S[blockIdx.x - 1]; } } void inclusive_scan(int *source, int *result, int n) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S; cudaMalloc((void **) &S, numBlocks * sizeof(int)); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan(S, S, numBlocks); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); cudaFree(S); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); } } void inclusive_scan_points(int *source, int *result, int n, TmpMalloc *tmps) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S = tmps->malloc_points(); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan(S, S, numBlocks); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); tmps->free_points(S); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); } } void inclusive_scan_nodes(int *source, int *result, int n, TmpMalloc *tmps) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S = tmps->malloc_nodes(); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan(S, S, numBlocks); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); tmps->free_nodes(S); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); gpuErrchk(cudaPeekAtLastError()); } } void inclusive_scan_any(int *source, int *result, int n, TmpMalloc *tmps) { int numBlocks = n / SECTION_SIZE; if (n % SECTION_SIZE) numBlocks++; if (n > SECTION_SIZE) { int *S = tmps->malloc_any(numBlocks); scan_kernel_eff_large1 << < numBlocks, SECTION_SIZE >> > (source, result, S, n); inclusive_scan_any(S, S, numBlocks, tmps); scan_kernel_eff_large3 << < numBlocks, SECTION_SIZE >> > (result, S, n); tmps->free_any(S, numBlocks); } else { scan_kernel_eff << < numBlocks, SECTION_SIZE >> > (source, result, n); gpuErrchk(cudaPeekAtLastError()); } } void inclusive_scan_cpu(int *d_x, int *d_y, int n) { int *h_x = new int[n]; int *h_y = new int[n]; cudaMemcpy(h_y, d_y, n * sizeof(int), cudaMemcpyDeviceToHost); int tmp = 0; for (int i = 0; i < n; i++) { tmp += h_y[i]; h_x[i] = tmp; } cudaMemcpy(d_x, h_x, n * sizeof(int), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); delete h_x; delete h_y; } void inclusive_scan_async(int *x, int *y, int n, cudaStream_t stream) { int numBlocks = n / BLOCK_WIDTH; if (n % BLOCK_WIDTH) numBlocks++; if (n > SECTION_SIZE) { int *S; cudaMalloc((void **) &S, (n / SECTION_SIZE) * sizeof(int)); scan_kernel_eff_large1 << < numBlocks, BLOCK_WIDTH, 0, stream >> > (x, y, S, n); inclusive_scan_async(S, S, n / SECTION_SIZE, stream); scan_kernel_eff_large3 << < numBlocks, BLOCK_WIDTH, 0, stream >> > (y, S, n); cudaFree(S); } else { scan_kernel_eff << < numBlocks, BLOCK_WIDTH, 0, stream >> > (x, y, n); } } void populate(int *parents, int *cells, int *counts, int *dim_start, int *dims, int c, int d, int n) { int lvl_size = c - c * 1 / 3; int prev_lvl_size = 0; int prev_count = 0; for (int i = 0; i < d; i++) { dims[i] = d - i; dim_start[i] = prev_count; int p = -1; for (int j = 0; j < lvl_size; j++) { p += j % 3 == 2 ? 0 : 1; if (i == 0) { parents[j + prev_count] = -1; } else { parents[j + prev_count] = prev_count - prev_lvl_size + p; } } prev_count += lvl_size; prev_lvl_size = lvl_size; lvl_size *= 1.5; } for (int i = 0; i < d; i++) { int r_count = 0; int c_no = 0; for (int j = 0; j < ((i < d - 1 ? dim_start[i + 1] : n) - dim_start[i]); j++) { int m = (i == 0 ? c * 1 / 3 : c - 2); if (i != 0 && j % 3 != 2) { r_count = 0; c_no = 0; } while (r_count < m && rand() % c < m) { r_count++; c_no++; } cells[dim_start[i] + j] = c_no + 1; c_no++; } } for (int j = 0; j < dim_start[d - 1]; j++) { counts[j] = 0; } for (int j = dim_start[d - 1]; j < n; j++) { int count = rand() % 10 * rand() % 10 + 1; counts[j] = count; int p = parents[j]; while (p != -1) { counts[p] += count; p = parents[p]; } } } void print_scy_tree(int *parents, int *cells, int *counts, int *dim_start, int *dims, int d, int n) { printf("r: %d/%d\n", cells[0], counts[0]); if (d == 0) return; int *leaf_count = new int[n]; for (int i = 0; i < n; i++) leaf_count[i] = 0; for (int i = dim_start[d - 1]; i < n; i++) { leaf_count[i] = 0; int p = i; while (p > 0) { leaf_count[p]++; p = parents[p]; } } for (int i = 0; i < d; i++) { printf("%d: ", dims[i]); for (int j = dim_start[i]; j < ((i < (d - 1)) ? dim_start[i + 1] : n); j++) { if (cells[j] < 100) printf(" "); if (cells[j] < 10) printf(" "); printf("%d/%d ", cells[j], counts[j]); if (counts[j] < 100 && counts[j] > -10) printf(" "); if (counts[j] < 10 && counts[j] > -1) printf(" "); for (int k = 0; k < leaf_count[j] - 1; k++) { printf(" ", cells[j], counts[j]); } } printf("\n"); } } int get_size(int c, int d) { int lvl_size = c - c * 1 / 3; int prev_count = 0; for (int i = 0; i < d; i++) { prev_count += lvl_size; lvl_size *= 1.5; } return prev_count; } void print_array_range(int *x, int start, int end) { for (int i = start; i < end; i++) { printf("%d ", (int) x[i]); } printf("\n\n"); } void print_array(int *x, int n) { int left = 400; int right = 400; if (n <= left + right) { for (int i = 0; i < n; i++) { if (x[i] < 10 && x[i] > -1) printf(" "); if (x[i] < 100 && x[i] > -10) printf(" "); printf("%d ", (int) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%d ", (int) x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%d ", (int) x[i]); } } printf("\n\n"); } void print_array(vector<int> x, int n) { int left = 400; int right = 400; if (n <= left + right) { for (int i = 0; i < n; i++) { printf("%d ", (int) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%d ", (int) x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%d ", (int) x[i]); } } printf("\n\n"); } void print_array(float *x, int n) { int left = 30; int right = 10; if (n <= left + right) { for (int i = 0; i < n; i++) { printf("%f ", (float) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%f ", (float) x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%f ", (float) x[i]); } } printf("\n\n"); } void print_array(thrust::device_vector<int> x, int n) { int left = 30; int right = 10; if (n <= left + right) { for (int i = 0; i < n; i++) { printf("%d ", (int) x[i]); } } else { for (int i = 0; i < left; i++) { printf("%d ", x[i]); } printf(" ... "); for (int i = n - right; i < n; i++) { printf("%d ", x[i]); } } printf("\n\n"); } int get_incorrect(int *array_1, int *array_2, int n) { int count = 0; for (int i = 0; i < n; i++) { if (array_1[i] != array_2[i]) { count++; } } return count; } float v_mean(std::vector<float> v) { //https://stackoverflow.com/questions/28574346/find-average-of-input-to-vector-c return accumulate(v.begin(), v.end(), 0.0) / v.size(); } vector<float> m_get_col(vector <vector<float>> m, int i) { vector<float> col; for (int j = 0; j < m.size(); j++) { col.push_back(m[j][i]); } return col; } float v_min(std::vector<float> v) { float min = std::numeric_limits<float>::infinity(); for (int i = 0; i < v.size(); i++) { if (v[i] < min) { min = v[i]; } } return min; } float v_max(std::vector<float> v) { float max = -100000.; for (int i = 0; i < v.size(); i++) { if (v[i] > max) { max = v[i]; } } return max; } int v_max(std::vector<int> v) { int max = -100000; for (int i = 0; i < v.size(); i++) { if (v[i] > max) { max = v[i]; } } return max; } void m_normalize(std::vector <std::vector<float>> &m) { float *min = new float[m[0].size()]; float *max = new float[m[0].size()]; for (int j = 0; j < m[0].size(); j++) { min[j] = 100000.; max[j] = -100000.; } for (int i = 0; i < m.size(); i++) { for (int j = 0; j < m[0].size(); j++) { min[j] = min[j] < m[i][j] ? min[j] : m[i][j]; max[j] = max[j] > m[i][j] ? max[j] : m[i][j]; } printf("finding min/max: %d%%\r", int(((i + 1) * 100) / m.size())); } printf("finding min/max: 100%%\n"); for (int i = 0; i < m.size(); i++) { for (int j = 0; j < m[0].size(); j++) { m[i][j] = max[j] != min[j] ? (m[i][j] - min[j]) / (max[j] - min[j]) : 0; } printf("normalizing: %d%%\r", int(((i + 1) * 100) / m.size())); } printf("normalizing: 100%%\n"); } template<class T> vector <T> clone(vector <T> v_old) { vector <T> v_clone(v_old); return v_clone; } void zero(int *array, int n) { for (int i = 0; i < n; i++) array[i] = 0; } bool subspace_of(vector<int> subspace, vector<int> subspace_mark) { int i = 0; int j = 0; while (j < subspace_mark.size() && i < subspace.size()) { if (subspace[i] == subspace_mark[j]) { i++; j++; } else { j++; } } return i == subspace.size(); } bool vec_cmp::operator()(const vector<int> &a, const vector<int> &b) const { int i = a.size() - 1; int j = b.size() - 1; while (a[i] == b[j]) { i--; j--; if (i < 0 || j < 0) { return i < j; } } return a[i] < b[j]; } void join(map <vector<int>, vector<int>, vec_cmp> &result, vector<int> &clustering, vector<int> subspace, int min_size, float r) { int clustering_max = v_max(clustering); if (clustering_max < 0) { return; } int n = clustering.size(); map<int, int> sizes; for (int i = 0; i < n; i++) { int cluster = clustering[i]; if (cluster >= 0) { if (sizes.count(cluster)) { sizes[cluster]++; } else { sizes.insert(pair<int, int>(cluster, 1)); } } } for (int i = 0; i < n; i++) { int cluster = clustering[i]; if (cluster >= 0 && sizes[cluster] < min_size) { clustering[i] = -1; } } for (pair <vector<int>, vector<int>> subspace_clustering : result) { vector<int> subspace_H = subspace_clustering.first; vector<int> clustering_H = subspace_clustering.second; if (subspace_of(subspace, subspace_H)) { map<int, int> sizes_H; set<int> to_be_removed; for (int cluster_id: clustering_H) { if (cluster_id >= 0) { if (sizes_H.count(cluster_id)) { sizes_H[cluster_id]++; } else { sizes_H.insert(pair<int, int>(cluster_id, 1)); } } } for (int i = 0; i < n; i++) { int cluster = clustering[i]; int cluster_H = clustering_H[i]; if (cluster >= 0 && cluster_H >= 0 && sizes[cluster] * r < sizes_H[cluster_H]) { to_be_removed.insert(cluster); } } for (int i = 0; i < n; i++) { int cluster = clustering[i]; if (cluster >= 0 && to_be_removed.find(cluster) != to_be_removed.end()) { clustering[i] = -1; } } } } clustering_max = v_max(clustering); if (clustering_max >= 0) { if (result.count(subspace)) { vector<int> clustering_old = result[subspace]; int m = v_max(clustering_old); for (int i = 0; i < n; i++) { if (clustering[i] == -2) { clustering_old[i] = clustering[i]; } else if (clustering[i] >= 0) { clustering_old[i] = m + 1 + clustering[i]; } } result[subspace] = clustering_old; } else { result.insert(pair < vector < int > , vector < int >> (subspace, clustering)); } } } __global__ void join_count_kernel(int *d_sizes, int *d_clustering, int n) { for (int i = threadIdx.x; i < n; i += blockDim.x) { int cluster = d_clustering[i]; if (cluster >= 0) { atomicAdd(&d_sizes[cluster], 1); } } } __global__ void join_erease_kernel(int *d_sizes, int *d_clustering, int n, int min_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int cluster = d_clustering[i]; if (cluster >= 0 && d_sizes[cluster] < min_size) { d_clustering[i] = -1; } } } __global__ void join_marke_remove_kernel(int *d_to_be_removed, int *d_sizes, int *d_clustering, int *d_sizes_H, int *d_clustering_H, int n, float r) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int cluster = d_clustering[i]; int cluster_H = d_clustering_H[i]; if (cluster >= 0 && cluster_H >= 0 && d_sizes[cluster] * r < d_sizes_H[cluster_H]) { d_to_be_removed[cluster] = 1; } } } __global__ void join_remove_kernel(int *d_to_be_removed, int *d_sizes, int *d_clustering, int n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { int cluster = d_clustering[i]; if (cluster >= 0 && d_to_be_removed[cluster]) { d_clustering[i] = -1; } } } __global__ void copy_if_positive(int *d_clustering_old, int *d_clustering, int n) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if (d_clustering[i] >= 0) { d_clustering_old[i] = d_clustering[i]; } } } __global__ void v_max_gpu_kernel(int *d_max, int *d_array, int n) { for (int i = threadIdx.x; i < n; i += blockDim.x) { atomicMax(&d_max[0], d_array[i]); } } int v_max_gpu(int *d_array, int n) { int BLOCK_SIZE = 512; int number_of_threads = min(n, BLOCK_SIZE); int *d_max; cudaMalloc(&d_max, sizeof(int)); cudaMemset(d_max, -1, sizeof(int)); v_max_gpu_kernel << < 1, number_of_threads >> > (d_max, d_array, n); int h_max; cudaMemcpy(&h_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_max); return h_max; } int v_max_gpu(int *d_array, int n, TmpMalloc *tmps) { int BLOCK_SIZE = 512; int number_of_threads = min(n, BLOCK_SIZE); int *d_max = tmps->malloc_one(); cudaMemset(d_max, -1, sizeof(int)); v_max_gpu_kernel << < 1, number_of_threads >> > (d_max, d_array, n); int h_max; cudaMemcpy(&h_max, d_max, sizeof(int), cudaMemcpyDeviceToHost); tmps->free_one(d_max); return h_max; } void join_gpu(map<vector<int>, int *, vec_cmp> &result, int *d_clustering, vector<int> subspace, int min_size, float r, int n, TmpMalloc *tmps) { tmps->reset_counters(); int BLOCK_SIZE = 512; int number_of_blocks = n / BLOCK_SIZE; if (n % BLOCK_SIZE) number_of_blocks++; int number_of_threads = min(n, BLOCK_SIZE); int *d_sizes = tmps->get_int_array(tmps->int_array_counter++, n); cudaMemset(d_sizes, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes, d_clustering, n); join_erease_kernel << < number_of_blocks, number_of_threads >> > (d_sizes, d_clustering, n, min_size); int *d_clustering_H; int *d_sizes_H = tmps->get_int_array(tmps->int_array_counter++, n); int *d_to_be_removed = tmps->get_int_array(tmps->int_array_counter++, n); for (pair<vector<int>, int *> subspace_clustering : result) { vector<int> subspace_H = subspace_clustering.first; d_clustering_H = subspace_clustering.second; nvtxRangePushA("subspace_of"); bool sub_of = subspace_of(subspace, subspace_H); nvtxRangePop(); if (sub_of) { cudaMemset(d_sizes_H, 0, n * sizeof(int)); cudaMemset(d_to_be_removed, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes_H, d_clustering_H, n); join_marke_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, d_sizes_H, d_clustering_H, n, r); join_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, n); } } int clustering_max = v_max_gpu(d_clustering, n); if (clustering_max >= 0) { if (result.count(subspace)) { int *d_clustering_old = result[subspace]; copy_if_positive << < number_of_blocks, number_of_threads >> > (d_clustering_old, d_clustering, n); tmps->free_points(d_clustering); result[subspace] = d_clustering_old; } else { result.insert(pair < vector < int > , int * > (subspace, d_clustering)); } } else { tmps->free_points(d_clustering); } } void join_gpu1(map <vector<int>, vector<int>, vec_cmp> &result, vector<int> &clustering, int *d_clustering, vector<int> subspace, int min_size, float r, int n) { int BLOCK_SIZE = 512; int number_of_blocks = n / BLOCK_SIZE; if (n % BLOCK_SIZE) number_of_blocks++; int number_of_threads = min(n, BLOCK_SIZE); int *d_sizes; cudaMalloc(&d_sizes, n * sizeof(int)); cudaMemset(d_sizes, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes, d_clustering, n); join_erease_kernel << < number_of_blocks, number_of_threads >> > (d_sizes, d_clustering, n, min_size); int *d_subspace_H; int *d_clustering_H; cudaMalloc(&d_clustering_H, n * sizeof(int)); int *d_sizes_H; cudaMalloc(&d_sizes_H, n * sizeof(int)); int *d_to_be_removed; cudaMalloc(&d_to_be_removed, n * sizeof(int)); for (pair <vector<int>, vector<int>> subspace_clustering : result) { vector<int> subspace_H = subspace_clustering.first; vector<int> clustering_H = subspace_clustering.second; cudaMemcpy(d_clustering_H, clustering_H.data(), n * sizeof(int), cudaMemcpyHostToDevice); if (subspace_of(subspace, subspace_H)) { cudaMemset(d_sizes_H, 0, n * sizeof(int)); cudaMemset(d_to_be_removed, 0, n * sizeof(int)); join_count_kernel << < 1, number_of_threads >> > (d_sizes_H, d_clustering_H, n); join_marke_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, d_sizes_H, d_clustering_H, n, r); join_remove_kernel << < number_of_blocks, number_of_threads >> > (d_to_be_removed, d_sizes, d_clustering, n); } } cudaMemcpy(clustering.data(), d_clustering, n * sizeof(int), cudaMemcpyDeviceToHost); int clustering_max = v_max(clustering); int clustering_max_other = v_max_gpu(d_clustering, n); if (clustering_max != clustering_max_other) printf("%d!=%d\n", clustering_max, clustering_max_other); if (clustering_max >= 0) { if (result.count(subspace)) { vector<int> clustering_old = result[subspace]; int *d_clustering_old; cudaMalloc(&d_clustering_old, n * sizeof(int)); cudaMemcpy(d_clustering_old, clustering_old.data(), n * sizeof(int), cudaMemcpyHostToDevice); copy_if_positive << < number_of_blocks, number_of_threads >> > (d_clustering_old, d_clustering, n); cudaMemcpy(clustering_old.data(), d_clustering_old, n * sizeof(int), cudaMemcpyDeviceToHost); result[subspace] = clustering_old; } else { result.insert(pair < vector < int > , vector < int >> (subspace, clustering)); } } }
c8fe9a44049d39bbe4824c2fa2d67026cd7e5a1f.hip
// !!! This is a file automatically generated by hipify!!! #include "Prerequisites.cuh" #include "FFT.cuh" #include "Generics.cuh" #include "Helper.cuh" namespace gtom { void d_IFFTC2R(tcomplex* const d_input, tfloat* const d_output, int const ndimensions, int3 const dimensions, int batch, bool renormalize) { hipfftHandle plan = d_IFFTC2RGetPlan(ndimensions, dimensions, batch); if (renormalize) d_IFFTC2R(d_input, d_output, &plan, dimensions, batch); else d_IFFTC2R(d_input, d_output, &plan); hipfftDestroy(plan); } hipfftHandle d_IFFTC2RGetPlan(int const ndimensions, int3 const dimensions, int batch) { hipfftHandle plan; hipfftType direction = IS_TFLOAT_DOUBLE ? HIPFFT_Z2D : HIPFFT_C2R; int n[3] = { dimensions.z, dimensions.y, dimensions.x }; CHECK_CUFFT_ERRORS(hipfftPlanMany(&plan, ndimensions, n + (3 - ndimensions), NULL, 1, 0, NULL, 1, 0, direction, batch)); //cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE); hipfftSetStream(plan, hipStreamDefault); return plan; } void d_IFFTC2R(tcomplex* const d_input, tfloat* const d_output, hipfftHandle* plan, int3 dimensions, int batch) { #ifdef GTOM_DOUBLE hipfftExecZ2D(*plan, d_input, d_output); #else CHECK_CUFFT_ERRORS(hipfftExecC2R(*plan, d_input, d_output)); #endif hipStreamSynchronize(hipStreamDefault); d_MultiplyByScalar(d_output, d_output, Elements(dimensions) * batch, 1.0f / (float)Elements(dimensions)); } void d_IFFTC2R(tcomplex* const d_input, tfloat* const d_output, hipfftHandle* plan) { #ifdef GTOM_DOUBLE hipfftExecZ2D(*plan, d_input, d_output); #else hipfftExecC2R(*plan, d_input, d_output); #endif hipStreamSynchronize(hipStreamDefault); } void d_IFFTZ2D(hipfftDoubleComplex* const d_input, double* const d_output, int const ndimensions, int3 const dimensions, int batch) { hipfftHandle plan; hipfftType direction = HIPFFT_Z2D; int n[3] = { dimensions.z, dimensions.y, dimensions.x }; hipfftPlanMany(&plan, ndimensions, n + (3 - ndimensions), NULL, 1, 0, NULL, 1, 0, direction, batch); //cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE); #ifdef GTOM_DOUBLE hipfftExecZ2D(plan, d_input, d_output); #else hipfftExecZ2D(plan, d_input, d_output); #endif hipfftDestroy(plan); size_t elements = dimensions.x * dimensions.y * dimensions.z; d_MultiplyByScalar(d_output, d_output, elements, 1.0 / (double)elements); } void d_IFFTC2RFull(tcomplex* const d_input, tfloat* const d_output, int const ndimensions, int3 const dimensions, int batch) { tcomplex* d_complexoutput; hipMalloc((void**)&d_complexoutput, Elements(dimensions) * sizeof(tcomplex)); d_IFFTC2C(d_input, d_complexoutput, ndimensions, dimensions, batch); d_Re(d_complexoutput, d_output, Elements(dimensions)); hipFree(d_complexoutput); } void d_IFFTC2C(tcomplex* const d_input, tcomplex* const d_output, int const ndimensions, int3 const dimensions, int batch) { hipfftHandle plan = d_IFFTC2CGetPlan(ndimensions, dimensions, batch); d_IFFTC2C(d_input, d_output, &plan, dimensions); hipfftDestroy(plan); } hipfftHandle d_IFFTC2CGetPlan(int const ndimensions, int3 const dimensions, int batch) { hipfftHandle plan; hipfftType direction = IS_TFLOAT_DOUBLE ? HIPFFT_Z2Z : HIPFFT_C2C; int n[3] = { dimensions.z, dimensions.y, dimensions.x }; hipfftPlanMany(&plan, ndimensions, n + (3 - ndimensions), NULL, 1, 0, NULL, 1, 0, direction, batch); //cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE); return plan; } void d_IFFTC2C(tcomplex* const d_input, tcomplex* const d_output, hipfftHandle* plan, int3 const dimensions) { #ifdef GTOM_DOUBLE hipfftExecZ2Z(*plan, d_input, d_output); #else hipfftExecC2C(*plan, d_input, d_output, HIPFFT_BACKWARD); #endif hipStreamQuery(0); size_t elements = dimensions.x * dimensions.y * dimensions.z; d_MultiplyByScalar((tfloat*)d_output, (tfloat*)d_output, elements * 2, 1.0f / (float)elements); } }
c8fe9a44049d39bbe4824c2fa2d67026cd7e5a1f.cu
#include "Prerequisites.cuh" #include "FFT.cuh" #include "Generics.cuh" #include "Helper.cuh" namespace gtom { void d_IFFTC2R(tcomplex* const d_input, tfloat* const d_output, int const ndimensions, int3 const dimensions, int batch, bool renormalize) { cufftHandle plan = d_IFFTC2RGetPlan(ndimensions, dimensions, batch); if (renormalize) d_IFFTC2R(d_input, d_output, &plan, dimensions, batch); else d_IFFTC2R(d_input, d_output, &plan); cufftDestroy(plan); } cufftHandle d_IFFTC2RGetPlan(int const ndimensions, int3 const dimensions, int batch) { cufftHandle plan; cufftType direction = IS_TFLOAT_DOUBLE ? CUFFT_Z2D : CUFFT_C2R; int n[3] = { dimensions.z, dimensions.y, dimensions.x }; CHECK_CUFFT_ERRORS(cufftPlanMany(&plan, ndimensions, n + (3 - ndimensions), NULL, 1, 0, NULL, 1, 0, direction, batch)); //cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE); cufftSetStream(plan, cudaStreamDefault); return plan; } void d_IFFTC2R(tcomplex* const d_input, tfloat* const d_output, cufftHandle* plan, int3 dimensions, int batch) { #ifdef GTOM_DOUBLE cufftExecZ2D(*plan, d_input, d_output); #else CHECK_CUFFT_ERRORS(cufftExecC2R(*plan, d_input, d_output)); #endif cudaStreamSynchronize(cudaStreamDefault); d_MultiplyByScalar(d_output, d_output, Elements(dimensions) * batch, 1.0f / (float)Elements(dimensions)); } void d_IFFTC2R(tcomplex* const d_input, tfloat* const d_output, cufftHandle* plan) { #ifdef GTOM_DOUBLE cufftExecZ2D(*plan, d_input, d_output); #else cufftExecC2R(*plan, d_input, d_output); #endif cudaStreamSynchronize(cudaStreamDefault); } void d_IFFTZ2D(cufftDoubleComplex* const d_input, double* const d_output, int const ndimensions, int3 const dimensions, int batch) { cufftHandle plan; cufftType direction = CUFFT_Z2D; int n[3] = { dimensions.z, dimensions.y, dimensions.x }; cufftPlanMany(&plan, ndimensions, n + (3 - ndimensions), NULL, 1, 0, NULL, 1, 0, direction, batch); //cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE); #ifdef GTOM_DOUBLE cufftExecZ2D(plan, d_input, d_output); #else cufftExecZ2D(plan, d_input, d_output); #endif cufftDestroy(plan); size_t elements = dimensions.x * dimensions.y * dimensions.z; d_MultiplyByScalar(d_output, d_output, elements, 1.0 / (double)elements); } void d_IFFTC2RFull(tcomplex* const d_input, tfloat* const d_output, int const ndimensions, int3 const dimensions, int batch) { tcomplex* d_complexoutput; cudaMalloc((void**)&d_complexoutput, Elements(dimensions) * sizeof(tcomplex)); d_IFFTC2C(d_input, d_complexoutput, ndimensions, dimensions, batch); d_Re(d_complexoutput, d_output, Elements(dimensions)); cudaFree(d_complexoutput); } void d_IFFTC2C(tcomplex* const d_input, tcomplex* const d_output, int const ndimensions, int3 const dimensions, int batch) { cufftHandle plan = d_IFFTC2CGetPlan(ndimensions, dimensions, batch); d_IFFTC2C(d_input, d_output, &plan, dimensions); cufftDestroy(plan); } cufftHandle d_IFFTC2CGetPlan(int const ndimensions, int3 const dimensions, int batch) { cufftHandle plan; cufftType direction = IS_TFLOAT_DOUBLE ? CUFFT_Z2Z : CUFFT_C2C; int n[3] = { dimensions.z, dimensions.y, dimensions.x }; cufftPlanMany(&plan, ndimensions, n + (3 - ndimensions), NULL, 1, 0, NULL, 1, 0, direction, batch); //cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE); return plan; } void d_IFFTC2C(tcomplex* const d_input, tcomplex* const d_output, cufftHandle* plan, int3 const dimensions) { #ifdef GTOM_DOUBLE cufftExecZ2Z(*plan, d_input, d_output); #else cufftExecC2C(*plan, d_input, d_output, CUFFT_INVERSE); #endif cudaStreamQuery(0); size_t elements = dimensions.x * dimensions.y * dimensions.z; d_MultiplyByScalar((tfloat*)d_output, (tfloat*)d_output, elements * 2, 1.0f / (float)elements); } }
7b18f786784c0d42ecaacf06a20027b2923a0b89.hip
// !!! This is a file automatically generated by hipify!!! // Style: http://geosoft.no/development/cppstyle.html #include <algorithm> #include <chrono> #include <cstdio> #include <cstring> #include <fstream> #include <iostream> #include <sstream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <fmt/format.h> #include "cuda_helpers.h" #include "int_types.h" using namespace std; using namespace fmt; // 8 operations const u32 nOps = 9; const char* opStrArr[] = {"nop", "v1 ", "v2 ", "v3 ", "v4 ", "and", "or ", "eor", "not"}; const u32 NOP = 0, V1 = 1, V2 = 2, V3 = 3, V4 = 4, AND = 5, OR = 6, EOR = 7, NOT = 8; const u32 nSearchLevelsBase = 8; //8; const u32 nSearchLevelsThread = 17;//16; const u32 maxStackItems = nSearchLevelsThread / 2 + 1; const u32 nTotalTruthTables = 1 << 16; typedef u16 Bits; typedef Bits BitsStack[maxStackItems]; struct Frame { u32 op; BitsStack stack; u32 nStackItems; }; typedef Frame FrameArr[nSearchLevelsThread]; struct Program { u32 opArr[nSearchLevelsThread]; u32 nOps; }; typedef Program ProgramArr[nTotalTruthTables]; struct Base { Program program; Frame frame; }; typedef vector<Base> BaseVec; typedef chrono::high_resolution_clock Time; typedef chrono::duration<float> Fsec; __device__ __managed__ ProgramArr optimalProgramArr; __device__ __managed__ u32 nFilledTruthTables = 0; __device__ __managed__ u64 nValidProgramsFound = 0; __device__ __managed__ u64 nFoundPrograms[nTotalTruthTables]; bool initCuda(u32 cudaDevice); BaseVec generateBases(); void searchBase(); void gpuSearch(const BaseVec& baseVec); __global__ void searchKernel(u32 nThreads, const Base* baseVec); void cpuCheckBase(const Base& base); __device__ void gpuCheckBase(const Base& base); void printLastEvaluated(const Base& base, const FrameArr& frameArr, u32 nCurrentLevel, Bits truthTable); void printStatus(); void cpuSearch(const BaseVec& baseVec); void gpuSearch(const BaseVec& baseVec); inline void testProgramGenerator(); __host__ __device__ inline u32 nextValidProgram(FrameArr& frameArr, u32 nCurrentLevel, u32 nBaseLevels, u32 nSearchLevels, bool makeBases); __host__ __device__ inline void evalOperation(Frame&); __host__ __device__ inline void push(Frame& f, Bits v); __host__ __device__ inline Bits pop(Frame& s); void writeResults(const string& path, const ProgramArr& optimalProgramArr); void writeHistogram(const string& path); string serializeProgram(const FrameArr&, u32 nFrames); string serializeBase(const Base& base); string serializeFrame(const Frame& f); string secondsToHms(double sec); auto startTime = Time::now(); int main(int argc, char *argv[]) { // Switch from C locale to user's locale. This will typically cause integers to be printed with thousands // separators. locale::global(locale("")); cout.imbue(locale("")); print("Search levels: {}\n", nSearchLevelsThread); print("Base levels: {}\n", nSearchLevelsBase); print("Max stack items: {}\n", maxStackItems); // testProgramGenerator(); // return 0; memset(optimalProgramArr, 0, sizeof(optimalProgramArr)); memset(nFoundPrograms, 0, sizeof(nFoundPrograms)); auto baseVec = generateBases(); print("Bases: {}\n", baseVec.size()); //// int i = 0; // for (auto base : baseVec) { //// if (i++ == 32) { //// break; //// } // print("{:<50}{}\n", "Sorted base", serializeBase(base)); // } // Quick search to find the programs that have length lower or equal to the base. searchBase(); if (argc == 2) { u32 cudaDevice = atoi(argv[1]); if (!initCuda(cudaDevice)) { return 1; } print("\nRunning GPU search\n"); gpuSearch(baseVec); writeHistogram(format("bitwise-hist-{}-{}-{}.txt", nSearchLevelsBase, nSearchLevelsThread, nValidProgramsFound)); } else { print("\nCUDA device not selected. Running CPU search\n"); cpuSearch(baseVec); } printStatus(); writeResults("bitwise.txt", optimalProgramArr); return 0; } bool initCuda(u32 cudaDevice) { u32 cudaDeviceCount; checkCudaErrorsNoSync(hipGetDeviceCount(reinterpret_cast<int*>(&cudaDeviceCount))); if (!cudaDeviceCount) { print("Error: Found no devices supporting CUDA\n"); return false; } print("\nUsing CUDA device: {0}\n\n", cudaDevice); checkCudaErrors(hipSetDevice(cudaDevice)); printDeviceInfo(cudaDevice); return true; } // CPU void cpuSearch(const BaseVec& baseVec) { for (auto base : baseVec) { cpuCheckBase(base); } } void cpuCheckBase(const Base& base) { u32 nCurrentLevel = 0; u32 nSearchLevels = nSearchLevelsThread - nSearchLevelsBase + 1 /* nop */; Frame frameArr[nSearchLevelsThread]; // memcpy(frameArr, &base.frame, sizeof(base.frame)); frameArr[0] = base.frame; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevels, false))) { ++nValidProgramsFound; Bits truthTable = frameArr[nCurrentLevel].stack[0]; if (!optimalProgramArr[truthTable].nOps || optimalProgramArr[truthTable].nOps > base.program.nOps + nCurrentLevel) { if (!optimalProgramArr[truthTable].nOps) { ++nFilledTruthTables; } for (u32 i = 0; i < base.program.nOps - 1; ++i) { optimalProgramArr[truthTable].opArr[i] = base.program.opArr[i]; } for (u32 i = 0; i <= nCurrentLevel + 1; ++i) { optimalProgramArr[truthTable].opArr[base.program.nOps - 1 + i] = frameArr[i].op; } optimalProgramArr[truthTable].nOps = base.program.nOps + nCurrentLevel; } if (!(nValidProgramsFound & 0xffffff)) { printStatus(); printLastEvaluated(base, frameArr, nCurrentLevel, truthTable); } } } // GPU void gpuSearch(const BaseVec& baseVec) { hipProfilerStart(); u32 nThreads = static_cast<u32>(baseVec.size()); // u32 nThreads = 32; int nThreadsPerBlock = 1024; int nBlocksPerGrid = (nThreads + nThreadsPerBlock - 1) / nThreadsPerBlock; Base* dBaseVec; hipMallocManaged(&dBaseVec, nThreads * sizeof(Base)); memcpy(dBaseVec, &baseVec[0], nThreads * sizeof(Base)); hipDeviceSynchronize(); hipLaunchKernelGGL(( searchKernel), dim3(nBlocksPerGrid), dim3(nThreadsPerBlock), 0, 0, nThreads, dBaseVec); hipDeviceSynchronize(); hipError_t err(hipGetLastError()); if (err != hipSuccess) { print("Error: Kernel launch failed: {}\n", hipGetErrorString(err)); } hipProfilerStop(); hipDeviceReset(); } __global__ void searchKernel(u32 nThreads, const Base* baseVec) { u32 i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= nThreads) { return; } gpuCheckBase(baseVec[i]); } __device__ void gpuCheckBase(const Base& base) { ::printf("%d nOps=%d\n", threadIdx.x, base.program.nOps); ::printf("%d nStackItems=%d\n", threadIdx.x, base.frame.nStackItems); for (int i = 0; i < base.frame.nStackItems; ++i) { ::printf(" %d", base.frame.stack[i]); } ::printf("\n"); u32 nCurrentLevel = 0; u32 nSearchLevels = nSearchLevelsThread - nSearchLevelsBase + 1 /* nop */; Frame frameArr[nSearchLevelsThread]; // nSearchLevels memset(frameArr, 0, sizeof(frameArr)); /////////////// //memcpy(frameArr, &base.frame, sizeof(base.frame)); frameArr[0] = base.frame; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevels, false))) { Bits truthTable = frameArr[nCurrentLevel].stack[0]; // // TODO: CUDA PROFILING // u64 old = atomicAdd(reinterpret_cast<unsigned long long*>(&nValidProgramsFound), 1); // if (old > 10000) { // return; // } atomicAdd(reinterpret_cast<unsigned long long*>(&nFoundPrograms[truthTable]), 1); } } void printStatus() { Fsec elapsedSec = Time::now() - startTime; print("\nWalltime: {} ({:.2f}s)\n", secondsToHms(elapsedSec.count()), elapsedSec.count()); print("Filled truth tables: {} ({:.2f}%)\n", nFilledTruthTables, static_cast<float>(nFilledTruthTables) / nTotalTruthTables * 100.0f); print("Valid programs: {} ({:d} per sec)\n", nValidProgramsFound, static_cast<u32>(nValidProgramsFound / elapsedSec.count())); } void printLastEvaluated(const Base& base, const FrameArr& frameArr, u32 nCurrentLevel, Bits truthTable) { print("Last evaluated: {:016b}:{} -{} ({} ops)\n", truthTable, serializeBase(base), serializeProgram(frameArr, nCurrentLevel), base.program.nOps + nCurrentLevel); } void testProgramGenerator() { FrameArr frameArr; frameArr[0].op = 0; // nop frameArr[0].nStackItems = 0; evalOperation(frameArr[0]); u32 nCurrentLevel = 0; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevelsThread, false))) { print("{:<50}{}\n", "## RECEIVED ##", serializeProgram(frameArr, nCurrentLevel)); } } inline bool opIsLoad(const Base& base, u32 i) { return base.program.opArr[i] >= V1 && base.program.opArr[i] <= V4; } inline bool opIsAndOr(const Base& base, u32 i) { return base.program.opArr[i] == AND || base.program.opArr[i] == OR; } struct less_than_key { inline bool operator() (const Base& a, const Base& b) { // if (b.program.nOps > a.program.nOps) { //// print(">\n"); // return false; // } for (int i = a.program.nOps - 1; i >= 0; --i) { // if (opIsLoad(a, i) && opIsLoad(b, i)) { // continue; // } // if (opIsAndOr(a, i) && opIsAndOr(b, i)) { // continue; // } if (a.program.opArr[i] < b.program.opArr[i]) { return true; } if (a.program.opArr[i] > b.program.opArr[i]) { return false; } } assert(false); return false; } }; struct less_than_key2 { inline bool operator() (const Base& a, const Base& b) { // if (b.program.nOps > a.program.nOps) { //// print(">\n"); // return false; // } for (int i = a.program.nOps - 1; i >= 0; --i) { // if (opIsLoad(a, i) && opIsLoad(b, i)) { // continue; // } // if (opIsAndOr(a, i) && opIsAndOr(b, i)) { // continue; // } if (a.program.opArr[i] < b.program.opArr[i]) { return true; } if (a.program.opArr[i] > b.program.opArr[i]) { return false; } } assert(false); return false; } }; struct less_than_key3 { inline bool operator() (const Base& a, const Base& b) { for (int i = static_cast<int>(a.program.nOps) - 1; i >= 0; --i) { if (i == static_cast<int>(a.program.nOps) - 3) { if (a.frame.nStackItems < b.frame.nStackItems) { return true; } if (a.frame.nStackItems > b.frame.nStackItems) { return false; } } if (a.program.opArr[i] < b.program.opArr[i]) { return true; } if (a.program.opArr[i] > b.program.opArr[i]) { return false; } } assert(false); return false; } }; BaseVec generateBases() { FrameArr frameArr; frameArr[0].op = NOP; frameArr[0].nStackItems = 0; u32 nCurrentLevel = 0; // 1 is first real level, 0 is always the NOP base. BaseVec baseVec; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevelsBase + 1 /* nop */, true))) { // print("{:<50}{}\n", "## RECEIVED ##", serializeProgram(frameArr, nCurrentLevel)); if (nCurrentLevel != nSearchLevelsBase) { continue; } // print("{:<50}{}\n", "Base", serializeProgram(frameArr, nCurrentLevel)); Base base; for (u32 i = 1; i <= nCurrentLevel; ++i) { base.program.opArr[i - 1] = frameArr[i].op; } base.program.nOps = nCurrentLevel; base.frame = frameArr[nCurrentLevel]; baseVec.push_back(base); } sort(baseVec.begin(), baseVec.end(), less_than_key3()); return baseVec; } void searchBase() { FrameArr frameArr; frameArr[0].op = NOP; frameArr[0].nStackItems = 0; u32 nCurrentLevel = 0; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevelsBase, false))) { Bits truthTable = frameArr[nCurrentLevel].stack[0]; optimalProgramArr[truthTable].nOps = nCurrentLevel; for (u32 i = 1; i <= nCurrentLevel; ++i) { // skip nop optimalProgramArr[truthTable].opArr[i - 1] = frameArr[i].op; } } } __host__ __device__ u32 nextValidProgram(FrameArr& frameArr, u32 nCurrentLevel, u32 nBaseLevels, u32 nSearchLevels, bool makeBases) { // print("{:<50}{}\n", "Entering", serializeProgram(frameArr, nCurrentLevel)); bool descendIfPossible = true; while (true) { bool newLevel = false; if (descendIfPossible) { descendIfPossible = false; // Skip if we're already at the lowest level. if (nCurrentLevel == nSearchLevels - 1) { continue; } // Enter branch. ++nCurrentLevel; newLevel = true; } if (newLevel) { frameArr[nCurrentLevel].op = 1; // print("{:<50}{}\n", "Descended to new level", serializeProgram(frameArr, nCurrentLevel)); } else { frameArr[nCurrentLevel].op += 1; if (frameArr[nCurrentLevel].op == nOps) { if (--nCurrentLevel < nBaseLevels) { return nCurrentLevel; } // print("{:<50}{}\n", "Ascended to higher level, checked earlier", serializeProgram(frameArr, nCurrentLevel)); continue; } } // // FOR TESTING: Skip branch that has more than 3 values on the stack. // if (frameArr[nCurrentLevel - 1].nStackItems > 3) { // continue; // } // Skip branch that starts with (Vx Vy and/or) when x >= y if (nCurrentLevel >= 2 && frameArr[nCurrentLevel].op >= AND && frameArr[nCurrentLevel].op <= OR && frameArr[nCurrentLevel - 2].op >= V1 && frameArr[nCurrentLevel - 1].op <= V4 && frameArr[nCurrentLevel - 2].op >= frameArr[nCurrentLevel - 1].op) { continue; } // Skip branch that starts with (Vx Vy eor) when x > y if (nCurrentLevel >= 2 && frameArr[nCurrentLevel].op == EOR && frameArr[nCurrentLevel - 2].op >= V1 && frameArr[nCurrentLevel - 1].op <= V4 && frameArr[nCurrentLevel - 2].op > frameArr[nCurrentLevel - 1].op) { continue; } // Only enter and/or/eor branch when stackDepth > 1 if (frameArr[nCurrentLevel - 1].nStackItems <= 1 && frameArr[nCurrentLevel].op >= AND && frameArr[nCurrentLevel].op <= EOR) { continue; } // Only enter load branch when stackDepth < remaining u32 nUnusedLevels = nSearchLevels - nCurrentLevel; if (frameArr[nCurrentLevel - 1].nStackItems > nUnusedLevels + 1) { continue; } // Only enter not branch when stackDepth <= remaining && stackDepth > 0 if (frameArr[nCurrentLevel].op == NOT && ! (frameArr[nCurrentLevel - 1].nStackItems <= nUnusedLevels && frameArr[nCurrentLevel - 1].nStackItems > 0)) { continue; } // Don't enter branch which ends in (not not) if (frameArr[nCurrentLevel - 1].op == NOT && frameArr[nCurrentLevel].op == NOT) { continue; } // Copy the stack from the previous level. frameArr[nCurrentLevel].nStackItems = frameArr[nCurrentLevel - 1].nStackItems; for (u32 i = 0; i < frameArr[nCurrentLevel].nStackItems; ++i) { frameArr[nCurrentLevel].stack[i] = frameArr[nCurrentLevel - 1].stack[i]; } // Evaluate the next operation on top of the new stack. evalOperation(frameArr[nCurrentLevel]); if (makeBases) { return nCurrentLevel; } // This optimization can't be used in the current CUDA version. It causes the threads to fall out of sync. // // Don't enter branch which has a base in which a shorter program already exists in the table of optimal programs // if (frameArr[nCurrentLevel].nStackItems == 1) { // Bits truthTable = frameArr[nCurrentLevel].stack[0]; // if (optimalProgramArr[truthTable].nOps && optimalProgramArr[truthTable].nOps <= nCurrentLevel + 1) { // continue; // } // } if (frameArr[nCurrentLevel].nStackItems != 1) { descendIfPossible = true; continue; } // print("{:<50}{} results={}\n", "Skipping program that returns <> 1 results", serializeProgram(frameArr, nCurrentLevel), frameArr[nCurrentLevel].nStackItems); // print("{:<50}{}\n", "Returning valid program", serializeProgram(frameArr, nCurrentLevel)); return nCurrentLevel; } } __host__ __device__ void evalOperation(Frame& f) { switch (f.op) { case NOP: assert(false); case V1: push(f, 0b1010101010101010); break; case V2: push(f, 0b1100110011001100); break; case V3: push(f, 0b1111000011110000); break; case V4: push(f, 0b1111111100000000); break; case AND: push(f, pop(f) & pop(f)); break; case OR: push(f, pop(f) | pop(f)); break; case EOR: push(f, pop(f) ^ pop(f)); break; case NOT: push(f, ~pop(f)); break; default: assert(false); } } __host__ __device__ void push(Frame& f, Bits v) { f.stack[f.nStackItems] = v; ++f.nStackItems; } __host__ __device__ Bits pop(Frame& f) { --f.nStackItems; return f.stack[f.nStackItems]; } void writeResults(const string& path, const ProgramArr& programArr) { ofstream f(path, ios::out); for (u32 i = 0; i < nTotalTruthTables; ++i) { f << format("{:016b}:", i); if (programArr[i].nOps) { for (u32 j = 0; j < programArr[i].nOps; ++j) { f << format(" {}", opStrArr[programArr[i].opArr[j]]); } f << format(" ({} ops)", programArr[i].nOps); } else { f << " <none>"; } f << format("\n"); } } void writeHistogram(const string& path) { ofstream f(path, ios::out); for (u32 i = 0; i < nTotalTruthTables; ++i) { f << format("{:016b}: {}\n", i, nFoundPrograms[i]); } } string serializeProgram(const FrameArr& frameArr, u32 nFrames) { stringstream ss; for (u32 i = 1; i <= nFrames; ++i) { // skip the first op, which is nop or base connection ss << format(" {}", opStrArr[frameArr[i].op]); } return ss.str(); } string serializeBase(const Base& base) { stringstream ss; for (u32 i = 0; i < base.program.nOps; ++i) { ss << format(" {}", opStrArr[base.program.opArr[i]]); } ss << format(" (s={})", base.frame.nStackItems); return ss.str(); } string serializeFrame(const Frame& f) { stringstream ss; ss << format("Frame: op={} nStack={} stack=", opStrArr[f.op], f.nStackItems); for (u32 i = 0; i < f.nStackItems; ++i) { ss << format("{:016b}", f.stack[i]); } return ss.str(); } string secondsToHms(double sec) { u64 n = static_cast<u64>(sec); u64 s = n % 60; n /= 60; u64 m = n % 60; n /= 60; u64 h = n % 60; return format("{:02d}:{:02d}:{:02d}", h, m, s); }
7b18f786784c0d42ecaacf06a20027b2923a0b89.cu
// Style: http://geosoft.no/development/cppstyle.html #include <algorithm> #include <chrono> #include <cstdio> #include <cstring> #include <fstream> #include <iostream> #include <sstream> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <fmt/format.h> #include "cuda_helpers.h" #include "int_types.h" using namespace std; using namespace fmt; // 8 operations const u32 nOps = 9; const char* opStrArr[] = {"nop", "v1 ", "v2 ", "v3 ", "v4 ", "and", "or ", "eor", "not"}; const u32 NOP = 0, V1 = 1, V2 = 2, V3 = 3, V4 = 4, AND = 5, OR = 6, EOR = 7, NOT = 8; const u32 nSearchLevelsBase = 8; //8; const u32 nSearchLevelsThread = 17;//16; const u32 maxStackItems = nSearchLevelsThread / 2 + 1; const u32 nTotalTruthTables = 1 << 16; typedef u16 Bits; typedef Bits BitsStack[maxStackItems]; struct Frame { u32 op; BitsStack stack; u32 nStackItems; }; typedef Frame FrameArr[nSearchLevelsThread]; struct Program { u32 opArr[nSearchLevelsThread]; u32 nOps; }; typedef Program ProgramArr[nTotalTruthTables]; struct Base { Program program; Frame frame; }; typedef vector<Base> BaseVec; typedef chrono::high_resolution_clock Time; typedef chrono::duration<float> Fsec; __device__ __managed__ ProgramArr optimalProgramArr; __device__ __managed__ u32 nFilledTruthTables = 0; __device__ __managed__ u64 nValidProgramsFound = 0; __device__ __managed__ u64 nFoundPrograms[nTotalTruthTables]; bool initCuda(u32 cudaDevice); BaseVec generateBases(); void searchBase(); void gpuSearch(const BaseVec& baseVec); __global__ void searchKernel(u32 nThreads, const Base* baseVec); void cpuCheckBase(const Base& base); __device__ void gpuCheckBase(const Base& base); void printLastEvaluated(const Base& base, const FrameArr& frameArr, u32 nCurrentLevel, Bits truthTable); void printStatus(); void cpuSearch(const BaseVec& baseVec); void gpuSearch(const BaseVec& baseVec); inline void testProgramGenerator(); __host__ __device__ inline u32 nextValidProgram(FrameArr& frameArr, u32 nCurrentLevel, u32 nBaseLevels, u32 nSearchLevels, bool makeBases); __host__ __device__ inline void evalOperation(Frame&); __host__ __device__ inline void push(Frame& f, Bits v); __host__ __device__ inline Bits pop(Frame& s); void writeResults(const string& path, const ProgramArr& optimalProgramArr); void writeHistogram(const string& path); string serializeProgram(const FrameArr&, u32 nFrames); string serializeBase(const Base& base); string serializeFrame(const Frame& f); string secondsToHms(double sec); auto startTime = Time::now(); int main(int argc, char *argv[]) { // Switch from C locale to user's locale. This will typically cause integers to be printed with thousands // separators. locale::global(locale("")); cout.imbue(locale("")); print("Search levels: {}\n", nSearchLevelsThread); print("Base levels: {}\n", nSearchLevelsBase); print("Max stack items: {}\n", maxStackItems); // testProgramGenerator(); // return 0; memset(optimalProgramArr, 0, sizeof(optimalProgramArr)); memset(nFoundPrograms, 0, sizeof(nFoundPrograms)); auto baseVec = generateBases(); print("Bases: {}\n", baseVec.size()); //// int i = 0; // for (auto base : baseVec) { //// if (i++ == 32) { //// break; //// } // print("{:<50}{}\n", "Sorted base", serializeBase(base)); // } // Quick search to find the programs that have length lower or equal to the base. searchBase(); if (argc == 2) { u32 cudaDevice = atoi(argv[1]); if (!initCuda(cudaDevice)) { return 1; } print("\nRunning GPU search\n"); gpuSearch(baseVec); writeHistogram(format("bitwise-hist-{}-{}-{}.txt", nSearchLevelsBase, nSearchLevelsThread, nValidProgramsFound)); } else { print("\nCUDA device not selected. Running CPU search\n"); cpuSearch(baseVec); } printStatus(); writeResults("bitwise.txt", optimalProgramArr); return 0; } bool initCuda(u32 cudaDevice) { u32 cudaDeviceCount; checkCudaErrorsNoSync(cudaGetDeviceCount(reinterpret_cast<int*>(&cudaDeviceCount))); if (!cudaDeviceCount) { print("Error: Found no devices supporting CUDA\n"); return false; } print("\nUsing CUDA device: {0}\n\n", cudaDevice); checkCudaErrors(cudaSetDevice(cudaDevice)); printDeviceInfo(cudaDevice); return true; } // CPU void cpuSearch(const BaseVec& baseVec) { for (auto base : baseVec) { cpuCheckBase(base); } } void cpuCheckBase(const Base& base) { u32 nCurrentLevel = 0; u32 nSearchLevels = nSearchLevelsThread - nSearchLevelsBase + 1 /* nop */; Frame frameArr[nSearchLevelsThread]; // memcpy(frameArr, &base.frame, sizeof(base.frame)); frameArr[0] = base.frame; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevels, false))) { ++nValidProgramsFound; Bits truthTable = frameArr[nCurrentLevel].stack[0]; if (!optimalProgramArr[truthTable].nOps || optimalProgramArr[truthTable].nOps > base.program.nOps + nCurrentLevel) { if (!optimalProgramArr[truthTable].nOps) { ++nFilledTruthTables; } for (u32 i = 0; i < base.program.nOps - 1; ++i) { optimalProgramArr[truthTable].opArr[i] = base.program.opArr[i]; } for (u32 i = 0; i <= nCurrentLevel + 1; ++i) { optimalProgramArr[truthTable].opArr[base.program.nOps - 1 + i] = frameArr[i].op; } optimalProgramArr[truthTable].nOps = base.program.nOps + nCurrentLevel; } if (!(nValidProgramsFound & 0xffffff)) { printStatus(); printLastEvaluated(base, frameArr, nCurrentLevel, truthTable); } } } // GPU void gpuSearch(const BaseVec& baseVec) { cudaProfilerStart(); u32 nThreads = static_cast<u32>(baseVec.size()); // u32 nThreads = 32; int nThreadsPerBlock = 1024; int nBlocksPerGrid = (nThreads + nThreadsPerBlock - 1) / nThreadsPerBlock; Base* dBaseVec; cudaMallocManaged(&dBaseVec, nThreads * sizeof(Base)); memcpy(dBaseVec, &baseVec[0], nThreads * sizeof(Base)); cudaDeviceSynchronize(); searchKernel<<<nBlocksPerGrid, nThreadsPerBlock>>>(nThreads, dBaseVec); cudaDeviceSynchronize(); cudaError_t err(cudaGetLastError()); if (err != cudaSuccess) { print("Error: Kernel launch failed: {}\n", cudaGetErrorString(err)); } cudaProfilerStop(); cudaDeviceReset(); } __global__ void searchKernel(u32 nThreads, const Base* baseVec) { u32 i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= nThreads) { return; } gpuCheckBase(baseVec[i]); } __device__ void gpuCheckBase(const Base& base) { ::printf("%d nOps=%d\n", threadIdx.x, base.program.nOps); ::printf("%d nStackItems=%d\n", threadIdx.x, base.frame.nStackItems); for (int i = 0; i < base.frame.nStackItems; ++i) { ::printf(" %d", base.frame.stack[i]); } ::printf("\n"); u32 nCurrentLevel = 0; u32 nSearchLevels = nSearchLevelsThread - nSearchLevelsBase + 1 /* nop */; Frame frameArr[nSearchLevelsThread]; // nSearchLevels memset(frameArr, 0, sizeof(frameArr)); /////////////// //memcpy(frameArr, &base.frame, sizeof(base.frame)); frameArr[0] = base.frame; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevels, false))) { Bits truthTable = frameArr[nCurrentLevel].stack[0]; // // TODO: CUDA PROFILING // u64 old = atomicAdd(reinterpret_cast<unsigned long long*>(&nValidProgramsFound), 1); // if (old > 10000) { // return; // } atomicAdd(reinterpret_cast<unsigned long long*>(&nFoundPrograms[truthTable]), 1); } } void printStatus() { Fsec elapsedSec = Time::now() - startTime; print("\nWalltime: {} ({:.2f}s)\n", secondsToHms(elapsedSec.count()), elapsedSec.count()); print("Filled truth tables: {} ({:.2f}%)\n", nFilledTruthTables, static_cast<float>(nFilledTruthTables) / nTotalTruthTables * 100.0f); print("Valid programs: {} ({:d} per sec)\n", nValidProgramsFound, static_cast<u32>(nValidProgramsFound / elapsedSec.count())); } void printLastEvaluated(const Base& base, const FrameArr& frameArr, u32 nCurrentLevel, Bits truthTable) { print("Last evaluated: {:016b}:{} -{} ({} ops)\n", truthTable, serializeBase(base), serializeProgram(frameArr, nCurrentLevel), base.program.nOps + nCurrentLevel); } void testProgramGenerator() { FrameArr frameArr; frameArr[0].op = 0; // nop frameArr[0].nStackItems = 0; evalOperation(frameArr[0]); u32 nCurrentLevel = 0; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevelsThread, false))) { print("{:<50}{}\n", "## RECEIVED ##", serializeProgram(frameArr, nCurrentLevel)); } } inline bool opIsLoad(const Base& base, u32 i) { return base.program.opArr[i] >= V1 && base.program.opArr[i] <= V4; } inline bool opIsAndOr(const Base& base, u32 i) { return base.program.opArr[i] == AND || base.program.opArr[i] == OR; } struct less_than_key { inline bool operator() (const Base& a, const Base& b) { // if (b.program.nOps > a.program.nOps) { //// print(">\n"); // return false; // } for (int i = a.program.nOps - 1; i >= 0; --i) { // if (opIsLoad(a, i) && opIsLoad(b, i)) { // continue; // } // if (opIsAndOr(a, i) && opIsAndOr(b, i)) { // continue; // } if (a.program.opArr[i] < b.program.opArr[i]) { return true; } if (a.program.opArr[i] > b.program.opArr[i]) { return false; } } assert(false); return false; } }; struct less_than_key2 { inline bool operator() (const Base& a, const Base& b) { // if (b.program.nOps > a.program.nOps) { //// print(">\n"); // return false; // } for (int i = a.program.nOps - 1; i >= 0; --i) { // if (opIsLoad(a, i) && opIsLoad(b, i)) { // continue; // } // if (opIsAndOr(a, i) && opIsAndOr(b, i)) { // continue; // } if (a.program.opArr[i] < b.program.opArr[i]) { return true; } if (a.program.opArr[i] > b.program.opArr[i]) { return false; } } assert(false); return false; } }; struct less_than_key3 { inline bool operator() (const Base& a, const Base& b) { for (int i = static_cast<int>(a.program.nOps) - 1; i >= 0; --i) { if (i == static_cast<int>(a.program.nOps) - 3) { if (a.frame.nStackItems < b.frame.nStackItems) { return true; } if (a.frame.nStackItems > b.frame.nStackItems) { return false; } } if (a.program.opArr[i] < b.program.opArr[i]) { return true; } if (a.program.opArr[i] > b.program.opArr[i]) { return false; } } assert(false); return false; } }; BaseVec generateBases() { FrameArr frameArr; frameArr[0].op = NOP; frameArr[0].nStackItems = 0; u32 nCurrentLevel = 0; // 1 is first real level, 0 is always the NOP base. BaseVec baseVec; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevelsBase + 1 /* nop */, true))) { // print("{:<50}{}\n", "## RECEIVED ##", serializeProgram(frameArr, nCurrentLevel)); if (nCurrentLevel != nSearchLevelsBase) { continue; } // print("{:<50}{}\n", "Base", serializeProgram(frameArr, nCurrentLevel)); Base base; for (u32 i = 1; i <= nCurrentLevel; ++i) { base.program.opArr[i - 1] = frameArr[i].op; } base.program.nOps = nCurrentLevel; base.frame = frameArr[nCurrentLevel]; baseVec.push_back(base); } sort(baseVec.begin(), baseVec.end(), less_than_key3()); return baseVec; } void searchBase() { FrameArr frameArr; frameArr[0].op = NOP; frameArr[0].nStackItems = 0; u32 nCurrentLevel = 0; while ((nCurrentLevel = nextValidProgram(frameArr, nCurrentLevel, 1, nSearchLevelsBase, false))) { Bits truthTable = frameArr[nCurrentLevel].stack[0]; optimalProgramArr[truthTable].nOps = nCurrentLevel; for (u32 i = 1; i <= nCurrentLevel; ++i) { // skip nop optimalProgramArr[truthTable].opArr[i - 1] = frameArr[i].op; } } } __host__ __device__ u32 nextValidProgram(FrameArr& frameArr, u32 nCurrentLevel, u32 nBaseLevels, u32 nSearchLevels, bool makeBases) { // print("{:<50}{}\n", "Entering", serializeProgram(frameArr, nCurrentLevel)); bool descendIfPossible = true; while (true) { bool newLevel = false; if (descendIfPossible) { descendIfPossible = false; // Skip if we're already at the lowest level. if (nCurrentLevel == nSearchLevels - 1) { continue; } // Enter branch. ++nCurrentLevel; newLevel = true; } if (newLevel) { frameArr[nCurrentLevel].op = 1; // print("{:<50}{}\n", "Descended to new level", serializeProgram(frameArr, nCurrentLevel)); } else { frameArr[nCurrentLevel].op += 1; if (frameArr[nCurrentLevel].op == nOps) { if (--nCurrentLevel < nBaseLevels) { return nCurrentLevel; } // print("{:<50}{}\n", "Ascended to higher level, checked earlier", serializeProgram(frameArr, nCurrentLevel)); continue; } } // // FOR TESTING: Skip branch that has more than 3 values on the stack. // if (frameArr[nCurrentLevel - 1].nStackItems > 3) { // continue; // } // Skip branch that starts with (Vx Vy and/or) when x >= y if (nCurrentLevel >= 2 && frameArr[nCurrentLevel].op >= AND && frameArr[nCurrentLevel].op <= OR && frameArr[nCurrentLevel - 2].op >= V1 && frameArr[nCurrentLevel - 1].op <= V4 && frameArr[nCurrentLevel - 2].op >= frameArr[nCurrentLevel - 1].op) { continue; } // Skip branch that starts with (Vx Vy eor) when x > y if (nCurrentLevel >= 2 && frameArr[nCurrentLevel].op == EOR && frameArr[nCurrentLevel - 2].op >= V1 && frameArr[nCurrentLevel - 1].op <= V4 && frameArr[nCurrentLevel - 2].op > frameArr[nCurrentLevel - 1].op) { continue; } // Only enter and/or/eor branch when stackDepth > 1 if (frameArr[nCurrentLevel - 1].nStackItems <= 1 && frameArr[nCurrentLevel].op >= AND && frameArr[nCurrentLevel].op <= EOR) { continue; } // Only enter load branch when stackDepth < remaining u32 nUnusedLevels = nSearchLevels - nCurrentLevel; if (frameArr[nCurrentLevel - 1].nStackItems > nUnusedLevels + 1) { continue; } // Only enter not branch when stackDepth <= remaining && stackDepth > 0 if (frameArr[nCurrentLevel].op == NOT && ! (frameArr[nCurrentLevel - 1].nStackItems <= nUnusedLevels && frameArr[nCurrentLevel - 1].nStackItems > 0)) { continue; } // Don't enter branch which ends in (not not) if (frameArr[nCurrentLevel - 1].op == NOT && frameArr[nCurrentLevel].op == NOT) { continue; } // Copy the stack from the previous level. frameArr[nCurrentLevel].nStackItems = frameArr[nCurrentLevel - 1].nStackItems; for (u32 i = 0; i < frameArr[nCurrentLevel].nStackItems; ++i) { frameArr[nCurrentLevel].stack[i] = frameArr[nCurrentLevel - 1].stack[i]; } // Evaluate the next operation on top of the new stack. evalOperation(frameArr[nCurrentLevel]); if (makeBases) { return nCurrentLevel; } // This optimization can't be used in the current CUDA version. It causes the threads to fall out of sync. // // Don't enter branch which has a base in which a shorter program already exists in the table of optimal programs // if (frameArr[nCurrentLevel].nStackItems == 1) { // Bits truthTable = frameArr[nCurrentLevel].stack[0]; // if (optimalProgramArr[truthTable].nOps && optimalProgramArr[truthTable].nOps <= nCurrentLevel + 1) { // continue; // } // } if (frameArr[nCurrentLevel].nStackItems != 1) { descendIfPossible = true; continue; } // print("{:<50}{} results={}\n", "Skipping program that returns <> 1 results", serializeProgram(frameArr, nCurrentLevel), frameArr[nCurrentLevel].nStackItems); // print("{:<50}{}\n", "Returning valid program", serializeProgram(frameArr, nCurrentLevel)); return nCurrentLevel; } } __host__ __device__ void evalOperation(Frame& f) { switch (f.op) { case NOP: assert(false); case V1: push(f, 0b1010101010101010); break; case V2: push(f, 0b1100110011001100); break; case V3: push(f, 0b1111000011110000); break; case V4: push(f, 0b1111111100000000); break; case AND: push(f, pop(f) & pop(f)); break; case OR: push(f, pop(f) | pop(f)); break; case EOR: push(f, pop(f) ^ pop(f)); break; case NOT: push(f, ~pop(f)); break; default: assert(false); } } __host__ __device__ void push(Frame& f, Bits v) { f.stack[f.nStackItems] = v; ++f.nStackItems; } __host__ __device__ Bits pop(Frame& f) { --f.nStackItems; return f.stack[f.nStackItems]; } void writeResults(const string& path, const ProgramArr& programArr) { ofstream f(path, ios::out); for (u32 i = 0; i < nTotalTruthTables; ++i) { f << format("{:016b}:", i); if (programArr[i].nOps) { for (u32 j = 0; j < programArr[i].nOps; ++j) { f << format(" {}", opStrArr[programArr[i].opArr[j]]); } f << format(" ({} ops)", programArr[i].nOps); } else { f << " <none>"; } f << format("\n"); } } void writeHistogram(const string& path) { ofstream f(path, ios::out); for (u32 i = 0; i < nTotalTruthTables; ++i) { f << format("{:016b}: {}\n", i, nFoundPrograms[i]); } } string serializeProgram(const FrameArr& frameArr, u32 nFrames) { stringstream ss; for (u32 i = 1; i <= nFrames; ++i) { // skip the first op, which is nop or base connection ss << format(" {}", opStrArr[frameArr[i].op]); } return ss.str(); } string serializeBase(const Base& base) { stringstream ss; for (u32 i = 0; i < base.program.nOps; ++i) { ss << format(" {}", opStrArr[base.program.opArr[i]]); } ss << format(" (s={})", base.frame.nStackItems); return ss.str(); } string serializeFrame(const Frame& f) { stringstream ss; ss << format("Frame: op={} nStack={} stack=", opStrArr[f.op], f.nStackItems); for (u32 i = 0; i < f.nStackItems; ++i) { ss << format("{:016b}", f.stack[i]); } return ss.str(); } string secondsToHms(double sec) { u64 n = static_cast<u64>(sec); u64 s = n % 60; n /= 60; u64 m = n % 60; n /= 60; u64 h = n % 60; return format("{:02d}:{:02d}:{:02d}", h, m, s); }
43b4c0cc2b29c1a6a98898a40b87c675d8936178.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////// // > Copyright (c) 2017 by Contributors. // > https://github.com/cjmcv // > brief Softmax. // > author Jianming Chen //////////////////////////////////////////////////////////////// #ifdef USE_ROCM #include "operator/softmax_op.h" #include <sstream> #include <float.h> namespace dlex_cnn { template <typename Dtype> __global__ void MaxPerNum(const int num, const int size, Dtype* arr, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { Dtype *base = arr + index * size; Dtype maxval = -FLT_MAX; for (int i = 0; i < size; i++) maxval = max(maxval, base[i]); out[index] = maxval; } } template <typename Dtype> __global__ void SubExpPerNum(const int num, const int size, Dtype* in_data, const Dtype* val, Dtype* out_data) { CUDA_KERNEL_LOOP(index, num) { Dtype *in_base = in_data + index * size; Dtype *out_base = out_data + index * size; for (int i = 0; i < size; i++) out_base[i] = exp(in_base[i] - val[index]); } } template <typename Dtype> __global__ void SumPerNum(const int n, const int size, Dtype* arr, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { Dtype *base = arr + index * size; Dtype sum = 0; for (int i = 0; i < size; i++) sum += base[i]; out[index] = sum; } } template <typename Dtype> __global__ void DivInplacePerNum(const int num, const int size, const Dtype *val, Dtype* data) { CUDA_KERNEL_LOOP(index, num) { Dtype *arr = data + index * size; for (int i = 0; i < size; i++) arr[i] = arr[i] / val[index]; } } template <typename Dtype> void SoftmaxOp<Dtype>::Forward_gpu( const std::vector<std::shared_ptr<Tensor<Dtype>>> &prev, const std::vector<std::shared_ptr<Tensor<Dtype>>> &next) { const std::vector<int> prev_data_size = prev[0]->get_size(); const std::vector<int> next_data_size = next[0]->get_size(); //const std::vector<int> prev_data_shape = prev[0]->get_shape(); const std::vector<int> next_data_shape = next[0]->get_shape(); Dtype *prev_data_base = (Dtype *)prev[0]->GetPushGpuData(); Dtype *next_data_base = (Dtype *)next[0]->GetGpuData(); const int next_data_num = next_data_shape[tind::eNum]; const int prev_data_size3D = prev_data_size[tind::e3D]; const int next_data_size3D = next_data_size[tind::e3D]; next[0]->SetGpuZero(); if (gpu_num_temp_ == NULL) CUDA_DCHECK(hipMalloc(&gpu_num_temp_, sizeof(Dtype) * next_data_num)); CUDA_DCHECK(hipMemset(gpu_num_temp_, 0, sizeof(Dtype) * next_data_num)); MaxPerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, prev_data_base, gpu_num_temp_); SubExpPerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, prev_data_base, gpu_num_temp_, next_data_base); SumPerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, next_data_base, gpu_num_temp_); DivInplacePerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, gpu_num_temp_, next_data_base); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void SoftmaxBackwardKernel1(const int n, const Dtype* next_data, const Dtype* next_diff, Dtype *prev_diff) { CUDA_KERNEL_LOOP(index, n) { const Dtype val_next_data = next_data[index]; Dtype val_prev_diff = prev_diff[index]; for (int next_diff_idx = 0; next_diff_idx < n; next_diff_idx++) { val_prev_diff -= val_next_data * next_data[next_diff_idx] * next_diff[next_diff_idx]; } prev_diff[index] = val_prev_diff; } } template <typename Dtype> __global__ void SoftmaxBackwardKernel2(const int n, const Dtype* next_data, const Dtype* next_diff, Dtype *prev_diff) { CUDA_KERNEL_LOOP(index, n) { prev_diff[index] += next_data[index] * next_diff[index]; } } template <typename Dtype> void SoftmaxOp<Dtype>::Backward_gpu( const std::vector<std::shared_ptr<Tensor<Dtype>>> &prev, const std::vector<std::shared_ptr<Tensor<Dtype>>> &next, const std::vector<std::shared_ptr<Tensor<Dtype>>> &prev_diff, const std::vector<std::shared_ptr<Tensor<Dtype>>> &next_diff ) { const std::vector<int> prev_data_size = prev[0]->get_size(); const std::vector<int> next_data_size = next[0]->get_size(); const std::vector<int> prev_diff_size = prev_diff[0]->get_size(); const std::vector<int> next_diff_size = next_diff[0]->get_size(); const std::vector<int> prev_data_shape = prev[0]->get_shape(); const std::vector<int> next_data_shape = next[0]->get_shape(); const std::vector<int> prev_diff_shape = prev_diff[0]->get_shape(); const std::vector<int> next_diff_shape = next_diff[0]->get_shape(); Dtype *prev_data_base = (Dtype *)prev[0]->GetPushGpuData(); Dtype *next_data_base = (Dtype *)next[0]->GetPushGpuData(); Dtype *prev_diff_base = (Dtype *)prev_diff[0]->GetGpuData(); Dtype *next_diff_base = (Dtype *)next_diff[0]->GetPushGpuData(); if (prev_data_size[tind::e4D] != next_data_size[tind::e4D]) { DLOG_ERR("[ SoftmaxOp::Backward ]: the size of input and output data must be equal \n"); return; } if (prev_diff_size[tind::e4D] != next_diff_size[tind::e4D]) { DLOG_ERR("[ SoftmaxOp::Backward ]: the size of input diff and output diff must be equal \n"); return; } if (prev_diff_size[tind::e4D] != prev_data_size[tind::e4D]) { DLOG_ERR("[ SoftmaxOp::Backward ]: the size of input diff and output data must be equal \n"); return; } //update prev_diff prev_diff[0]->SetGpuZero(); const int prev_data_size3D = prev_data_size[tind::e3D]; const int next_data_size3D = next_data_size[tind::e3D]; const int prev_diff_size3D = prev_diff_size[tind::e3D]; const int next_diff_size3D = next_diff_size[tind::e3D]; for (int pn = 0; pn < prev_data_shape[tind::eNum]; pn++) { const Dtype* prev_data = prev_data_base + pn * prev_data_size3D; const Dtype* next_data = next_data_base + pn * next_data_size3D; const Dtype* next_diff = next_diff_base + pn * next_diff_size3D; Dtype* prev_diff = prev_diff_base + pn * prev_diff_size3D; SoftmaxBackwardKernel1<Dtype> << <DLEX_GET_BLOCKS(prev_diff_size3D), DLEX_CUDA_NUM_THREADS >> >( prev_diff_size3D, next_data, next_diff, prev_diff); SoftmaxBackwardKernel2<Dtype> << <DLEX_GET_BLOCKS(prev_diff_size3D), DLEX_CUDA_NUM_THREADS >> >( prev_diff_size3D, next_data, next_diff, prev_diff); CUDA_POST_KERNEL_CHECK; } } template void SoftmaxOp<float>::Forward_gpu( const std::vector<std::shared_ptr<Tensor<float>>> &prev, const std::vector<std::shared_ptr<Tensor<float>>> &next); template void SoftmaxOp<double>::Forward_gpu( const std::vector<std::shared_ptr<Tensor<double>>> &prev, const std::vector<std::shared_ptr<Tensor<double>>> &next); template void SoftmaxOp<float>::Backward_gpu( const std::vector<std::shared_ptr<Tensor<float>>> &prev, const std::vector<std::shared_ptr<Tensor<float>>> &next, const std::vector<std::shared_ptr<Tensor<float>>> &prev_diff, const std::vector<std::shared_ptr<Tensor<float>>> &next_diff); template void SoftmaxOp<double>::Backward_gpu( const std::vector<std::shared_ptr<Tensor<double>>> &prev, const std::vector<std::shared_ptr<Tensor<double>>> &next, const std::vector<std::shared_ptr<Tensor<double>>> &prev_diff, const std::vector<std::shared_ptr<Tensor<double>>> &next_diff); }//namespace #endif
43b4c0cc2b29c1a6a98898a40b87c675d8936178.cu
//////////////////////////////////////////////////////////////// // > Copyright (c) 2017 by Contributors. // > https://github.com/cjmcv // > brief Softmax. // > author Jianming Chen //////////////////////////////////////////////////////////////// #ifdef USE_CUDA #include "operator/softmax_op.h" #include <sstream> #include <float.h> namespace dlex_cnn { template <typename Dtype> __global__ void MaxPerNum(const int num, const int size, Dtype* arr, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { Dtype *base = arr + index * size; Dtype maxval = -FLT_MAX; for (int i = 0; i < size; i++) maxval = max(maxval, base[i]); out[index] = maxval; } } template <typename Dtype> __global__ void SubExpPerNum(const int num, const int size, Dtype* in_data, const Dtype* val, Dtype* out_data) { CUDA_KERNEL_LOOP(index, num) { Dtype *in_base = in_data + index * size; Dtype *out_base = out_data + index * size; for (int i = 0; i < size; i++) out_base[i] = exp(in_base[i] - val[index]); } } template <typename Dtype> __global__ void SumPerNum(const int n, const int size, Dtype* arr, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { Dtype *base = arr + index * size; Dtype sum = 0; for (int i = 0; i < size; i++) sum += base[i]; out[index] = sum; } } template <typename Dtype> __global__ void DivInplacePerNum(const int num, const int size, const Dtype *val, Dtype* data) { CUDA_KERNEL_LOOP(index, num) { Dtype *arr = data + index * size; for (int i = 0; i < size; i++) arr[i] = arr[i] / val[index]; } } template <typename Dtype> void SoftmaxOp<Dtype>::Forward_gpu( const std::vector<std::shared_ptr<Tensor<Dtype>>> &prev, const std::vector<std::shared_ptr<Tensor<Dtype>>> &next) { const std::vector<int> prev_data_size = prev[0]->get_size(); const std::vector<int> next_data_size = next[0]->get_size(); //const std::vector<int> prev_data_shape = prev[0]->get_shape(); const std::vector<int> next_data_shape = next[0]->get_shape(); Dtype *prev_data_base = (Dtype *)prev[0]->GetPushGpuData(); Dtype *next_data_base = (Dtype *)next[0]->GetGpuData(); const int next_data_num = next_data_shape[tind::eNum]; const int prev_data_size3D = prev_data_size[tind::e3D]; const int next_data_size3D = next_data_size[tind::e3D]; next[0]->SetGpuZero(); if (gpu_num_temp_ == NULL) CUDA_DCHECK(cudaMalloc(&gpu_num_temp_, sizeof(Dtype) * next_data_num)); CUDA_DCHECK(cudaMemset(gpu_num_temp_, 0, sizeof(Dtype) * next_data_num)); MaxPerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, prev_data_base, gpu_num_temp_); SubExpPerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, prev_data_base, gpu_num_temp_, next_data_base); SumPerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, next_data_base, gpu_num_temp_); DivInplacePerNum<Dtype> << <DLEX_GET_BLOCKS(next_data_num), DLEX_CUDA_NUM_THREADS >> >( next_data_num, prev_data_size3D, gpu_num_temp_, next_data_base); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void SoftmaxBackwardKernel1(const int n, const Dtype* next_data, const Dtype* next_diff, Dtype *prev_diff) { CUDA_KERNEL_LOOP(index, n) { const Dtype val_next_data = next_data[index]; Dtype val_prev_diff = prev_diff[index]; for (int next_diff_idx = 0; next_diff_idx < n; next_diff_idx++) { val_prev_diff -= val_next_data * next_data[next_diff_idx] * next_diff[next_diff_idx]; } prev_diff[index] = val_prev_diff; } } template <typename Dtype> __global__ void SoftmaxBackwardKernel2(const int n, const Dtype* next_data, const Dtype* next_diff, Dtype *prev_diff) { CUDA_KERNEL_LOOP(index, n) { prev_diff[index] += next_data[index] * next_diff[index]; } } template <typename Dtype> void SoftmaxOp<Dtype>::Backward_gpu( const std::vector<std::shared_ptr<Tensor<Dtype>>> &prev, const std::vector<std::shared_ptr<Tensor<Dtype>>> &next, const std::vector<std::shared_ptr<Tensor<Dtype>>> &prev_diff, const std::vector<std::shared_ptr<Tensor<Dtype>>> &next_diff ) { const std::vector<int> prev_data_size = prev[0]->get_size(); const std::vector<int> next_data_size = next[0]->get_size(); const std::vector<int> prev_diff_size = prev_diff[0]->get_size(); const std::vector<int> next_diff_size = next_diff[0]->get_size(); const std::vector<int> prev_data_shape = prev[0]->get_shape(); const std::vector<int> next_data_shape = next[0]->get_shape(); const std::vector<int> prev_diff_shape = prev_diff[0]->get_shape(); const std::vector<int> next_diff_shape = next_diff[0]->get_shape(); Dtype *prev_data_base = (Dtype *)prev[0]->GetPushGpuData(); Dtype *next_data_base = (Dtype *)next[0]->GetPushGpuData(); Dtype *prev_diff_base = (Dtype *)prev_diff[0]->GetGpuData(); Dtype *next_diff_base = (Dtype *)next_diff[0]->GetPushGpuData(); if (prev_data_size[tind::e4D] != next_data_size[tind::e4D]) { DLOG_ERR("[ SoftmaxOp::Backward ]: the size of input and output data must be equal \n"); return; } if (prev_diff_size[tind::e4D] != next_diff_size[tind::e4D]) { DLOG_ERR("[ SoftmaxOp::Backward ]: the size of input diff and output diff must be equal \n"); return; } if (prev_diff_size[tind::e4D] != prev_data_size[tind::e4D]) { DLOG_ERR("[ SoftmaxOp::Backward ]: the size of input diff and output data must be equal \n"); return; } //update prev_diff prev_diff[0]->SetGpuZero(); const int prev_data_size3D = prev_data_size[tind::e3D]; const int next_data_size3D = next_data_size[tind::e3D]; const int prev_diff_size3D = prev_diff_size[tind::e3D]; const int next_diff_size3D = next_diff_size[tind::e3D]; for (int pn = 0; pn < prev_data_shape[tind::eNum]; pn++) { const Dtype* prev_data = prev_data_base + pn * prev_data_size3D; const Dtype* next_data = next_data_base + pn * next_data_size3D; const Dtype* next_diff = next_diff_base + pn * next_diff_size3D; Dtype* prev_diff = prev_diff_base + pn * prev_diff_size3D; SoftmaxBackwardKernel1<Dtype> << <DLEX_GET_BLOCKS(prev_diff_size3D), DLEX_CUDA_NUM_THREADS >> >( prev_diff_size3D, next_data, next_diff, prev_diff); SoftmaxBackwardKernel2<Dtype> << <DLEX_GET_BLOCKS(prev_diff_size3D), DLEX_CUDA_NUM_THREADS >> >( prev_diff_size3D, next_data, next_diff, prev_diff); CUDA_POST_KERNEL_CHECK; } } template void SoftmaxOp<float>::Forward_gpu( const std::vector<std::shared_ptr<Tensor<float>>> &prev, const std::vector<std::shared_ptr<Tensor<float>>> &next); template void SoftmaxOp<double>::Forward_gpu( const std::vector<std::shared_ptr<Tensor<double>>> &prev, const std::vector<std::shared_ptr<Tensor<double>>> &next); template void SoftmaxOp<float>::Backward_gpu( const std::vector<std::shared_ptr<Tensor<float>>> &prev, const std::vector<std::shared_ptr<Tensor<float>>> &next, const std::vector<std::shared_ptr<Tensor<float>>> &prev_diff, const std::vector<std::shared_ptr<Tensor<float>>> &next_diff); template void SoftmaxOp<double>::Backward_gpu( const std::vector<std::shared_ptr<Tensor<double>>> &prev, const std::vector<std::shared_ptr<Tensor<double>>> &next, const std::vector<std::shared_ptr<Tensor<double>>> &prev_diff, const std::vector<std::shared_ptr<Tensor<double>>> &next_diff); }//namespace #endif
054ad79e18f559063685d764e13c513dd990c973.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Vector3_hip.cuh" #include "assert.cuh" __host__ __device__ Vector3::Vector3() : _x(0), _y(0), _z(0) {} __host__ __device__ Vector3::Vector3(number_t x, number_t y, number_t z) : _x(x), _y(y), _z(z) {} __host__ __device__ Vector3::~Vector3() {} __host__ __device__ Vector3 Vector3::operator = (const Vector3& other) { _x = other._x; _y = other._y; _z = other._z; return *this; } __host__ __device__ number_t Vector3::x() const { return _x; } __host__ __device__ number_t Vector3::y() const { return _y; } __host__ __device__ number_t Vector3::z() const { return _z; } __host__ __device__ void Vector3::set_x(number_t x) { _x = x; } __host__ __device__ void Vector3::set_y(number_t y) { _y = y; } __host__ __device__ void Vector3::set_z(number_t z) { _z = z; } __host__ __device__ void Vector3::normalize() { assert(magnitude() > EPSILON); *this /= magnitude(); } __host__ __device__ number_t Vector3::magnitude() const { return SQRT(sqrMagnitude()); } __host__ __device__ number_t Vector3::sqrMagnitude() const { return this->dot(*this); } __host__ __device__ number_t* Vector3::toCArray() const { number_t* result = new number_t[3]; result[0] = _x; result[1] = _y; result[2] = _z; return result; } __host__ __device__ bool Vector3::operator==(const Vector3& other) const { return _x == other._x && _y == other._y && _z == other._z; } __host__ __device__ bool Vector3::operator!=(const Vector3& other) const { return !(*this == other); } __host__ __device__ const Vector3 Vector3::operator*(const number_t value) const { return Vector3(_x * value, _y * value, _z * value); } __host__ __device__ const Vector3 Vector3::operator/(const number_t value) const { assert(value < -EPSILON || EPSILON < value); return (*this * ((number_t)1.0 / value)); } __host__ __device__ const Vector3 Vector3::operator+(const Vector3& other) const { return Vector3(_x + other._x, _y + other._y, _z + other._z); } __host__ __device__ const Vector3 Vector3::operator-(const Vector3& other) const { return *this + (other * -1); } __host__ __device__ Vector3 Vector3::operator*=(number_t value) { return *this = *this * value; } __host__ __device__ Vector3 Vector3::operator/=(number_t value) { return *this = *this * value; } __host__ __device__ Vector3 Vector3::operator+=(const Vector3& other) { return *this = *this + other; } __host__ __device__ Vector3 Vector3::operator-=(const Vector3& other) { return *this = *this - other; } __host__ __device__ const Vector3 Vector3::normalized() { return *this / magnitude(); } __host__ __device__ number_t Vector3::dot(const Vector3& other) const { return _x * other._x + _y * other._y + _z * other._z; } __host__ __device__ const Vector3 Vector3::cross(const Vector3& other) const { return Vector3(_y * other._z - _z * other._y, _z * other._x - _x * other._z, _x * other._y - _y * other._x); } __host__ __device__ const Vector3 Vector3::multiplyComponentWise(const Vector3& other) const { return Vector3(_x * other._x, _y * other._y, _z * other._z); } __host__ __device__ number_t Vector3::angle(const Vector3& other) const { return ATAN2(angle_sin(other), angle_cos(other)); } __host__ __device__ number_t Vector3::unsignedAngle(const Vector3& other) const { return abs(angle(other)); } __host__ __device__ number_t Vector3::angle_sin(const Vector3& other) const { return cross(other).magnitude() / magnitude() / other.magnitude(); } __host__ __device__ number_t Vector3::angle_cos(const Vector3& other) const { return dot(other) / magnitude() / other.magnitude(); } __host__ __device__ const Vector3 Vector3::reflect(const Vector3& planeNormal) const { return planeNormal * (2 * angle_cos(planeNormal)) * magnitude() + *this; } __host__ __device__ number_t Vector3::distance(const Vector3& a, const Vector3& b) { return (a - b).magnitude(); } __host__ __device__ number_t Vector3::sqrDistance(const Vector3& a, const Vector3& b) { return (a - b).sqrMagnitude(); }
054ad79e18f559063685d764e13c513dd990c973.cu
#include "cuda_runtime.h" #include "Vector3.cuh" #include "assert.cuh" __host__ __device__ Vector3::Vector3() : _x(0), _y(0), _z(0) {} __host__ __device__ Vector3::Vector3(number_t x, number_t y, number_t z) : _x(x), _y(y), _z(z) {} __host__ __device__ Vector3::~Vector3() {} __host__ __device__ Vector3 Vector3::operator = (const Vector3& other) { _x = other._x; _y = other._y; _z = other._z; return *this; } __host__ __device__ number_t Vector3::x() const { return _x; } __host__ __device__ number_t Vector3::y() const { return _y; } __host__ __device__ number_t Vector3::z() const { return _z; } __host__ __device__ void Vector3::set_x(number_t x) { _x = x; } __host__ __device__ void Vector3::set_y(number_t y) { _y = y; } __host__ __device__ void Vector3::set_z(number_t z) { _z = z; } __host__ __device__ void Vector3::normalize() { assert(magnitude() > EPSILON); *this /= magnitude(); } __host__ __device__ number_t Vector3::magnitude() const { return SQRT(sqrMagnitude()); } __host__ __device__ number_t Vector3::sqrMagnitude() const { return this->dot(*this); } __host__ __device__ number_t* Vector3::toCArray() const { number_t* result = new number_t[3]; result[0] = _x; result[1] = _y; result[2] = _z; return result; } __host__ __device__ bool Vector3::operator==(const Vector3& other) const { return _x == other._x && _y == other._y && _z == other._z; } __host__ __device__ bool Vector3::operator!=(const Vector3& other) const { return !(*this == other); } __host__ __device__ const Vector3 Vector3::operator*(const number_t value) const { return Vector3(_x * value, _y * value, _z * value); } __host__ __device__ const Vector3 Vector3::operator/(const number_t value) const { assert(value < -EPSILON || EPSILON < value); return (*this * ((number_t)1.0 / value)); } __host__ __device__ const Vector3 Vector3::operator+(const Vector3& other) const { return Vector3(_x + other._x, _y + other._y, _z + other._z); } __host__ __device__ const Vector3 Vector3::operator-(const Vector3& other) const { return *this + (other * -1); } __host__ __device__ Vector3 Vector3::operator*=(number_t value) { return *this = *this * value; } __host__ __device__ Vector3 Vector3::operator/=(number_t value) { return *this = *this * value; } __host__ __device__ Vector3 Vector3::operator+=(const Vector3& other) { return *this = *this + other; } __host__ __device__ Vector3 Vector3::operator-=(const Vector3& other) { return *this = *this - other; } __host__ __device__ const Vector3 Vector3::normalized() { return *this / magnitude(); } __host__ __device__ number_t Vector3::dot(const Vector3& other) const { return _x * other._x + _y * other._y + _z * other._z; } __host__ __device__ const Vector3 Vector3::cross(const Vector3& other) const { return Vector3(_y * other._z - _z * other._y, _z * other._x - _x * other._z, _x * other._y - _y * other._x); } __host__ __device__ const Vector3 Vector3::multiplyComponentWise(const Vector3& other) const { return Vector3(_x * other._x, _y * other._y, _z * other._z); } __host__ __device__ number_t Vector3::angle(const Vector3& other) const { return ATAN2(angle_sin(other), angle_cos(other)); } __host__ __device__ number_t Vector3::unsignedAngle(const Vector3& other) const { return abs(angle(other)); } __host__ __device__ number_t Vector3::angle_sin(const Vector3& other) const { return cross(other).magnitude() / magnitude() / other.magnitude(); } __host__ __device__ number_t Vector3::angle_cos(const Vector3& other) const { return dot(other) / magnitude() / other.magnitude(); } __host__ __device__ const Vector3 Vector3::reflect(const Vector3& planeNormal) const { return planeNormal * (2 * angle_cos(planeNormal)) * magnitude() + *this; } __host__ __device__ number_t Vector3::distance(const Vector3& a, const Vector3& b) { return (a - b).magnitude(); } __host__ __device__ number_t Vector3::sqrDistance(const Vector3& a, const Vector3& b) { return (a - b).sqrMagnitude(); }
f7bf1405995f022f66b2c54ef18296d54d8c6f07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "hdf5.h" #include "hdf5_hl.h" hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } getchar(); return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
f7bf1405995f022f66b2c54ef18296d54d8c6f07.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "hdf5.h" #include "hdf5_hl.h" cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } getchar(); return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
4c2f35a7a6b38b8276537e86a9122fae06eb18b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { /////////////////////////////////////////////////////////////////////// template <typename T> SD_DEVICE void pullRowsKernel(void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) { auto x = reinterpret_cast<T *>(vx); auto z = reinterpret_cast<T *>(vz); auto xEWS = shape::elementWiseStride(tadShapeInfo); auto zEWS = shape::elementWiseStride(zTadShapeInfo); auto tadLength = shape::length(tadShapeInfo); if (xEWS >= 1 && zEWS >= 1) { for (int idx = blockIdx.x; idx < len; idx += gridDim.x) { T *rX = x + tadOffsets[indexes[idx]]; T *rZ = z + zTadOffsets[idx]; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { rZ[i * zEWS] = rX[i * xEWS]; } } } else { for (int idx = blockIdx.x; idx < len; idx += gridDim.x) { T *rX = x + tadOffsets[indexes[idx]]; T *rZ = z + zTadOffsets[idx]; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = shape::getIndexOffset(i, tadShapeInfo); auto zOffset = shape::getIndexOffset(i, zTadShapeInfo); rZ[zOffset] = rX[xOffset]; } } } } /////////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL void execPullRowsKernel(void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) { pullRowsKernel<T>(vx, vz, len, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); } /////////////////////////////////////////////////////////////////////// template <typename T> SD_HOST void pullRowsKernelGeneric(dim3 &launchDims, hipStream_t *stream, void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) { hipLaunchKernelGGL(( execPullRowsKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, vz, len, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); sd::DebugHelper::checkErrorCode(stream, "pullRows(...) failed"); } BUILD_SINGLE_TEMPLATE(template void pullRowsKernelGeneric, (dim3 & launchDims, hipStream_t *stream, void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets), SD_COMMON_TYPES); } // namespace sd
4c2f35a7a6b38b8276537e86a9122fae06eb18b2.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace sd { /////////////////////////////////////////////////////////////////////// template <typename T> SD_DEVICE void pullRowsKernel(void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) { auto x = reinterpret_cast<T *>(vx); auto z = reinterpret_cast<T *>(vz); auto xEWS = shape::elementWiseStride(tadShapeInfo); auto zEWS = shape::elementWiseStride(zTadShapeInfo); auto tadLength = shape::length(tadShapeInfo); if (xEWS >= 1 && zEWS >= 1) { for (int idx = blockIdx.x; idx < len; idx += gridDim.x) { T *rX = x + tadOffsets[indexes[idx]]; T *rZ = z + zTadOffsets[idx]; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { rZ[i * zEWS] = rX[i * xEWS]; } } } else { for (int idx = blockIdx.x; idx < len; idx += gridDim.x) { T *rX = x + tadOffsets[indexes[idx]]; T *rZ = z + zTadOffsets[idx]; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = shape::getIndexOffset(i, tadShapeInfo); auto zOffset = shape::getIndexOffset(i, zTadShapeInfo); rZ[zOffset] = rX[xOffset]; } } } } /////////////////////////////////////////////////////////////////////// template <typename T> SD_KERNEL void execPullRowsKernel(void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) { pullRowsKernel<T>(vx, vz, len, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); } /////////////////////////////////////////////////////////////////////// template <typename T> SD_HOST void pullRowsKernelGeneric(dim3 &launchDims, cudaStream_t *stream, void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets) { execPullRowsKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, vz, len, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); sd::DebugHelper::checkErrorCode(stream, "pullRows(...) failed"); } BUILD_SINGLE_TEMPLATE(template void pullRowsKernelGeneric, (dim3 & launchDims, cudaStream_t *stream, void *vx, void *vz, sd::LongType len, sd::LongType *indexes, sd::LongType const *tadShapeInfo, sd::LongType const *tadOffsets, sd::LongType const *zTadShapeInfo, sd::LongType const *zTadOffsets), SD_COMMON_TYPES); } // namespace sd
e660ad38c22e814736a5750bb94fcba74090fbcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i]; // 16 // for ( int i = 0 ; i < 1 ; i++) // { // x[i] = x[i] + 1; // 8*10 // // for ( int j = 0 ; j < 2000 ; j++ ) // // x[i] += 2; // } } } int main(void) { int N = 30 * (1 << 20); float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); std::cout << "N = " << N << std::endl; for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice); hipEventRecord(start); // Perform SAXPY on 1M elements hipLaunchKernelGGL(( saxpy), dim3((N+511)/512), dim3(512), 0, 0, N, 2.0f, d_x, d_y); hipEventRecord(stop); hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = max(maxError, abs(y[i]-4.0f)); } printf("Max error: %f\n", maxError); printf("Effective Bandwidth (GB/s): %f\n", N*4*(2)/milliseconds/1e6); std::cout << "Time taken = " << milliseconds << " ms" << std::endl; }
e660ad38c22e814736a5750bb94fcba74090fbcc.cu
#include<iostream> __global__ void saxpy(int n, float a, float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i]; // 16 // for ( int i = 0 ; i < 1 ; i++) // { // x[i] = x[i] + 1; // 8*10 // // for ( int j = 0 ; j < 2000 ; j++ ) // // x[i] += 2; // } } } int main(void) { int N = 30 * (1 << 20); float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); std::cout << "N = " << N << std::endl; for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start); // Perform SAXPY on 1M elements saxpy<<<(N+511)/512, 512>>>(N, 2.0f, d_x, d_y); cudaEventRecord(stop); cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float maxError = 0.0f; for (int i = 0; i < N; i++) { maxError = max(maxError, abs(y[i]-4.0f)); } printf("Max error: %f\n", maxError); printf("Effective Bandwidth (GB/s): %f\n", N*4*(2)/milliseconds/1e6); std::cout << "Time taken = " << milliseconds << " ms" << std::endl; }
60ba5b72034965d8c8ebee229f76fadc80ca0a88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/tensor_util.h" #include "gtest/gtest.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/place.h" namespace paddle { namespace framework { static __global__ void FillNAN(float* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2] = NAN; } static __global__ void FillInf(float* buf) { buf[0] = INFINITY; buf[1] = 0.1; buf[2] = 0.2; } static __global__ void FillNAN(platform::float16* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2].x = 0x7fff; } static __global__ void FillInf(platform::float16* buf) { buf[0] = 0.0; buf[1].x = 0x7c00; buf[2] = 0.5; } static __global__ void FillFinite(float* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2] = 0.2; } static __global__ void FillFinite(platform::float16* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2] = 0.2; } TEST(TensorContainsNAN, GPU) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsNAN(tensor)); } { Tensor tensor; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsNAN(tensor)); } } TEST(TensorContainsInf, GPU) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsInf(tensor)); } { Tensor tensor; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsInf(tensor)); } } TEST(TensorIsfinite, GPU) { paddle::platform::CUDAPlace gpu(0); using paddle::platform::float16; auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); // contains inf { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } { Tensor tensor; float16* buf = tensor.mutable_data<float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } // contains nan { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } { Tensor tensor; float16* buf = tensor.mutable_data<float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } // all element are finite { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillFinite), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(TensorIsfinite(tensor)); } { Tensor tensor; float16* buf = tensor.mutable_data<float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillFinite), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(TensorIsfinite(tensor)); } } TEST(TensorContainsInf, GPUWithoutWait) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); TensorContainsInf(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } { Tensor tensor, out; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); TensorContainsInf(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } } TEST(TensorContainsNAN, GPUWithoutWait) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); TensorContainsNAN(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } { Tensor tensor, out; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); TensorContainsNAN(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } } TEST(TensorIsfinite, GPUWithoutWait) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillInf), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); TensorIsfinite(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); EXPECT_EQ(tmp.data<bool>()[0], false); } { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillNAN), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); TensorIsfinite(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); EXPECT_EQ(tmp.data<bool>()[0], false); } { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else hipLaunchKernelGGL(( FillFinite), dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #endif cuda_ctx->Wait(); TensorIsfinite(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); EXPECT_EQ(tmp.data<bool>()[0], true); } } } // namespace framework } // namespace paddle
60ba5b72034965d8c8ebee229f76fadc80ca0a88.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/tensor_util.h" #include "gtest/gtest.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/place.h" namespace paddle { namespace framework { static __global__ void FillNAN(float* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2] = NAN; } static __global__ void FillInf(float* buf) { buf[0] = INFINITY; buf[1] = 0.1; buf[2] = 0.2; } static __global__ void FillNAN(platform::float16* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2].x = 0x7fff; } static __global__ void FillInf(platform::float16* buf) { buf[0] = 0.0; buf[1].x = 0x7c00; buf[2] = 0.5; } static __global__ void FillFinite(float* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2] = 0.2; } static __global__ void FillFinite(platform::float16* buf) { buf[0] = 0.0; buf[1] = 0.1; buf[2] = 0.2; } TEST(TensorContainsNAN, GPU) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsNAN(tensor)); } { Tensor tensor; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsNAN(tensor)); } } TEST(TensorContainsInf, GPU) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsInf(tensor)); } { Tensor tensor; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); ASSERT_TRUE(TensorContainsInf(tensor)); } } TEST(TensorIsfinite, GPU) { paddle::platform::CUDAPlace gpu(0); using paddle::platform::float16; auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); // contains inf { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } { Tensor tensor; float16* buf = tensor.mutable_data<float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } // contains nan { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } { Tensor tensor; float16* buf = tensor.mutable_data<float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(!TensorIsfinite(tensor)); } // all element are finite { Tensor tensor; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillFinite<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(TensorIsfinite(tensor)); } { Tensor tensor; float16* buf = tensor.mutable_data<float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillFinite<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); EXPECT_TRUE(TensorIsfinite(tensor)); } } TEST(TensorContainsInf, GPUWithoutWait) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); TensorContainsInf(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } { Tensor tensor, out; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); TensorContainsInf(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } } TEST(TensorContainsNAN, GPUWithoutWait) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); TensorContainsNAN(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } { Tensor tensor, out; paddle::platform::float16* buf = tensor.mutable_data<paddle::platform::float16>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); TensorContainsNAN(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); ASSERT_EQ(tmp.data<bool>()[0], true); } } TEST(TensorIsfinite, GPUWithoutWait) { paddle::platform::CUDAPlace gpu(0); auto& pool = paddle::platform::DeviceContextPool::Instance(); auto* cuda_ctx = pool.GetByPlace(gpu); { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); TensorIsfinite(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); EXPECT_EQ(tmp.data<bool>()[0], false); } { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); TensorIsfinite(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); EXPECT_EQ(tmp.data<bool>()[0], false); } { Tensor tensor, out; float* buf = tensor.mutable_data<float>({3}, gpu); #ifdef PADDLE_WITH_HIP hipLaunchKernelGGL( FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf); #else FillFinite<<<1, 1, 0, cuda_ctx->stream()>>>(buf); #endif cuda_ctx->Wait(); TensorIsfinite(tensor, &out); platform::CPUPlace cpu; Tensor tmp; TensorCopy(out, cpu, *cuda_ctx, &tmp); cuda_ctx->Wait(); EXPECT_EQ(tmp.data<bool>()[0], true); } } } // namespace framework } // namespace paddle
84125205d49617db7cc6074ec9ce5b5d7394c5d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <vector> #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/roi_align_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static constexpr int kROISize = 4; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <class T> __device__ T BilinearInterpolate(const T* input_data, const int height, const int width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } T ly = y - y_low, lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; T v1 = input_data[y_low * width + x_low]; T v2 = input_data[y_low * width + x_high]; T v3 = input_data[y_high * width + x_low]; T v4 = input_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <class T> __device__ void BilinearInterpolateGradient(const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } T ly = y - *y_low, lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <class T> __global__ void GPUROIAlignForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* output_data, const bool continuous_coordinate) { CUDA_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0; T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset; T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset; T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset; T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset; T roi_width = roi_xmax - roi_xmin; T roi_height = roi_ymax - roi_ymin; if (!continuous_coordinate) { roi_width = max(roi_width, static_cast<T>(1.)); roi_height = max(roi_height, static_cast<T>(1.)); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_input_data = input_data + (roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); T output_val = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = BilinearInterpolate(offset_input_data, height, width, y, x); output_val += val; } } output_val /= count; output_data[i] = output_val; } } template <typename T> __global__ void GPUROIAlignBackward( const int nthreads, const T* input_rois, const T* out_grad, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* input_grad, const bool continuous_coordinate) { CUDA_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_offset = continuous_coordinate ? T(0.5) : 0; T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset; T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset; T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset; T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset; T roi_width = roi_xmax - roi_xmin; T roi_height = roi_ymax - roi_ymin; if (!continuous_coordinate) { roi_width = max(roi_width, static_cast<T>(1.)); roi_height = max(roi_height, static_cast<T>(1.)); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_input_grad = input_grad + (roi_batch_ind * channels + c) * height * width; const T* offset_out_grad = out_grad + (n * channels + c) * pooled_height * pooled_width; const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw]; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1 = 0, w2 = 0, w3 = 0, w4 = 0; int x_low = -1, x_high = -1, y_low = -1, y_high = -1; BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high); T diff1 = out_grad_this_bin * w1 / count; T diff2 = out_grad_this_bin * w2 / count; T diff3 = out_grad_this_bin * w3 / count; T diff4 = out_grad_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low, diff1); platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high, diff2); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low, diff3); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high, diff4); } } } } } template <typename Place, typename T> class GPUROIAlignOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); auto aligned = ctx.Attr<bool>("aligned"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; #ifdef WITH_NV_JETSON platform::ChangeThreadNum(ctx.cuda_device_context(), &threads, 256); #endif Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); auto cplace = platform::CPUPlace(); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace); auto& dev_ctx = ctx.cuda_device_context(); auto gplace = ctx.GetPlace(); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); int rois_batch_size = rois_num_t->numel(); PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The rois_batch_size and imgs " "batch_size must be the same. But received rois_batch_size = %d, " "batch_size = %d", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(cplace, rois_num_list.data(), gplace, rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { roi_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto lod = rois->lod(); PADDLE_ENFORCE_EQ( lod.empty(), false, platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does " "not contain LoD information.")); auto rois_lod = lod.back(); int rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of rois and batch size " "of images must be the same. But received rois batch size = %d, " "and images batch size = %d", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ( rois_num, rois_num_with_lod, platform::errors::InvalidArgument( "The actual number of rois and the number of rois " "provided from Input(RoIsLoD) in RoIAlign must be the same." " But received actual number of rois is %d, and the number " "of rois from RoIsLoD is %d", rois_num, rois_num_with_lod)); for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } } int bytes = roi_batch_id_list.numel() * sizeof(int); auto roi_ptr = memory::Alloc(dev_ctx, bytes); int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr()); memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes, dev_ctx.stream()); hipLaunchKernelGGL(( GPUROIAlignForward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data, out->mutable_data<T>(ctx.GetPlace()), aligned); } }; template <typename Place, typename T> class GPUROIAlignGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); auto aligned = ctx.Attr<bool>("aligned"); int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (!in_grad) { return; } Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); auto cplace = platform::CPUPlace(); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace); auto& dev_ctx = ctx.cuda_device_context(); auto gplace = ctx.GetPlace(); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); int rois_batch_size = rois_num_t->numel(); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(cplace, rois_num_list.data(), gplace, rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = start; i < start + rois_num_list[n]; ++i) { roi_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } } auto roi_ptr = memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int)); int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr()); int bytes = roi_batch_id_list.numel() * sizeof(int); memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes, dev_ctx.stream()); in_grad->mutable_data<T>(ctx.GetPlace()); phi::funcs::SetConstant<Place, T> set_zero; set_zero(dev_ctx, in_grad, static_cast<T>(0)); int output_grad_size = out_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { hipLaunchKernelGGL(( GPUROIAlignBackward<T>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data, in_grad->mutable_data<T>(ctx.GetPlace()), aligned); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roi_align, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( roi_align_grad, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
84125205d49617db7cc6074ec9ce5b5d7394c5d2.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <vector> #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/operators/roi_align_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static constexpr int kROISize = 4; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <class T> __device__ T BilinearInterpolate(const T* input_data, const int height, const int width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = static_cast<T>(y_low); } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = static_cast<T>(x_low); } else { x_high = x_low + 1; } T ly = y - y_low, lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; T v1 = input_data[y_low * width + x_low]; T v2 = input_data[y_low * width + x_high]; T v3 = input_data[y_high * width + x_low]; T v4 = input_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <class T> __device__ void BilinearInterpolateGradient(const int height, const int width, T y, T x, T* w1, T* w2, T* w3, T* w4, int* x_low, int* x_high, int* y_low, int* y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { return; } y = y <= 0 ? 0 : y; x = x <= 0 ? 0 : x; *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } T ly = y - *y_low, lx = x - *x_low; T hy = 1. - ly, hx = 1. - lx; *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <class T> __global__ void GPUROIAlignForward( const int nthreads, const T* input_data, const T* input_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* output_data, const bool continuous_coordinate) { CUDA_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_offset = continuous_coordinate ? static_cast<T>(0.5) : 0; T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset; T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset; T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset; T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset; T roi_width = roi_xmax - roi_xmin; T roi_height = roi_ymax - roi_ymin; if (!continuous_coordinate) { roi_width = max(roi_width, static_cast<T>(1.)); roi_height = max(roi_height, static_cast<T>(1.)); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_input_data = input_data + (roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); T output_val = 0; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = BilinearInterpolate(offset_input_data, height, width, y, x); output_val += val; } } output_val /= count; output_data[i] = output_val; } } template <typename T> __global__ void GPUROIAlignBackward( const int nthreads, const T* input_rois, const T* out_grad, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, int* roi_batch_id_data, T* input_grad, const bool continuous_coordinate) { CUDA_KERNEL_LOOP(i, nthreads) { int pw = i % pooled_width; int ph = (i / pooled_width) % pooled_height; int c = (i / pooled_width / pooled_height) % channels; int n = i / pooled_width / pooled_height / channels; const T* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; T roi_offset = continuous_coordinate ? T(0.5) : 0; T roi_xmin = offset_input_rois[0] * spatial_scale - roi_offset; T roi_ymin = offset_input_rois[1] * spatial_scale - roi_offset; T roi_xmax = offset_input_rois[2] * spatial_scale - roi_offset; T roi_ymax = offset_input_rois[3] * spatial_scale - roi_offset; T roi_width = roi_xmax - roi_xmin; T roi_height = roi_ymax - roi_ymin; if (!continuous_coordinate) { roi_width = max(roi_width, static_cast<T>(1.)); roi_height = max(roi_height, static_cast<T>(1.)); } T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_input_grad = input_grad + (roi_batch_ind * channels + c) * height * width; const T* offset_out_grad = out_grad + (n * channels + c) * pooled_height * pooled_width; const T out_grad_this_bin = offset_out_grad[ph * pooled_width + pw]; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); const T count = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_ymin + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_xmin + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1 = 0, w2 = 0, w3 = 0, w4 = 0; int x_low = -1, x_high = -1, y_low = -1, y_high = -1; BilinearInterpolateGradient(height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high); T diff1 = out_grad_this_bin * w1 / count; T diff2 = out_grad_this_bin * w2 / count; T diff3 = out_grad_this_bin * w3 / count; T diff4 = out_grad_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_low, diff1); platform::CudaAtomicAdd(offset_input_grad + y_low * width + x_high, diff2); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_low, diff3); platform::CudaAtomicAdd(offset_input_grad + y_high * width + x_high, diff4); } } } } } template <typename Place, typename T> class GPUROIAlignOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out = ctx.Output<Tensor>("Out"); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); auto aligned = ctx.Attr<bool>("aligned"); auto in_dims = in->dims(); int batch_size = in_dims[0]; int channels = in_dims[1]; int height = in_dims[2]; int width = in_dims[3]; int rois_num = rois->dims()[0]; if (rois_num == 0) return; int output_size = out->numel(); int blocks = NumBlocks(output_size); int threads = kNumCUDAThreads; #ifdef WITH_NV_JETSON platform::ChangeThreadNum(ctx.cuda_device_context(), &threads, 256); #endif Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); auto cplace = platform::CPUPlace(); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace); auto& dev_ctx = ctx.cuda_device_context(); auto gplace = ctx.GetPlace(); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); int rois_batch_size = rois_num_t->numel(); PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The rois_batch_size and imgs " "batch_size must be the same. But received rois_batch_size = %d, " "batch_size = %d", rois_batch_size, batch_size)); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(cplace, rois_num_list.data(), gplace, rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (int i = start; i < start + rois_num_list[n]; ++i) { roi_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto lod = rois->lod(); PADDLE_ENFORCE_EQ( lod.empty(), false, platform::errors::InvalidArgument("Input(ROIs) in ROIAlignOp does " "not contain LoD information.")); auto rois_lod = lod.back(); int rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, platform::errors::InvalidArgument( "The batch size of rois and batch size " "of images must be the same. But received rois batch size = %d, " "and images batch size = %d", rois_batch_size, batch_size)); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ( rois_num, rois_num_with_lod, platform::errors::InvalidArgument( "The actual number of rois and the number of rois " "provided from Input(RoIsLoD) in RoIAlign must be the same." " But received actual number of rois is %d, and the number " "of rois from RoIsLoD is %d", rois_num, rois_num_with_lod)); for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } } int bytes = roi_batch_id_list.numel() * sizeof(int); auto roi_ptr = memory::Alloc(dev_ctx, bytes); int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr()); memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes, dev_ctx.stream()); GPUROIAlignForward<T><<<blocks, threads, 0, dev_ctx.stream()>>>( output_size, in->data<T>(), rois->data<T>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data, out->mutable_data<T>(ctx.GetPlace()), aligned); } }; template <typename Place, typename T> class GPUROIAlignGradOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in = ctx.Input<Tensor>("X"); auto* rois = ctx.Input<LoDTensor>("ROIs"); auto* out_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* in_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto pooled_height = ctx.Attr<int>("pooled_height"); auto pooled_width = ctx.Attr<int>("pooled_width"); auto spatial_scale = ctx.Attr<float>("spatial_scale"); auto sampling_ratio = ctx.Attr<int>("sampling_ratio"); auto aligned = ctx.Attr<bool>("aligned"); int rois_num = rois->dims()[0]; int channels = in->dims()[1]; int height = in->dims()[2]; int width = in->dims()[3]; if (!in_grad) { return; } Tensor roi_batch_id_list; roi_batch_id_list.Resize({rois_num}); auto cplace = platform::CPUPlace(); int* roi_batch_id_data = roi_batch_id_list.mutable_data<int>(cplace); auto& dev_ctx = ctx.cuda_device_context(); auto gplace = ctx.GetPlace(); if (ctx.HasInput("RoisNum")) { auto* rois_num_t = ctx.Input<Tensor>("RoisNum"); int rois_batch_size = rois_num_t->numel(); std::vector<int> rois_num_list(rois_batch_size); memory::Copy(cplace, rois_num_list.data(), gplace, rois_num_t->data<int>(), sizeof(int) * rois_batch_size, 0); int start = 0; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = start; i < start + rois_num_list[n]; ++i) { roi_batch_id_data[i] = n; } start += rois_num_list[n]; } } else { auto rois_lod = rois->lod().back(); int rois_batch_size = rois_lod.size() - 1; for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; } } } auto roi_ptr = memory::Alloc(dev_ctx, roi_batch_id_list.numel() * sizeof(int)); int* roi_id_data = reinterpret_cast<int*>(roi_ptr->ptr()); int bytes = roi_batch_id_list.numel() * sizeof(int); memory::Copy(gplace, roi_id_data, cplace, roi_batch_id_data, bytes, dev_ctx.stream()); in_grad->mutable_data<T>(ctx.GetPlace()); phi::funcs::SetConstant<Place, T> set_zero; set_zero(dev_ctx, in_grad, static_cast<T>(0)); int output_grad_size = out_grad->numel(); int blocks = NumBlocks(output_grad_size); int threads = kNumCUDAThreads; if (output_grad_size > 0) { GPUROIAlignBackward<T><<<blocks, threads, 0, dev_ctx.stream()>>>( output_grad_size, rois->data<T>(), out_grad->data<T>(), rois_num, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, roi_id_data, in_grad->mutable_data<T>(ctx.GetPlace()), aligned); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( roi_align, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignOpKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( roi_align_grad, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, float>, ops::GPUROIAlignGradOpKernel<paddle::platform::CUDADeviceContext, double>);
f3fba375bd3455fbc22f757e20f27cd42c061c3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> #include <sys/time.h> #include <hip/hip_cooperative_groups.h> using namespace cooperative_groups; /////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca" //////////////large vs small data. /////test multiple warps (64k truncates. 4k merges. different SMs increase concurrency interfere merging between pages and truncate more. 64k 14 per SM. 4k no limit per SM? non-consecutive 4k cause fragmentation. consecutive 4k poorly merged also cause fragmentation. too many 4ks may cause both con and non-con poorly merged. 4ks from different SMs can also be merged. concurrency release the burden on each SM so it merges more 4ks better. But it loads larger page with redundant data?) (test on volta?). /////see blog trace (not as the blog said). /////test blog 2 warps (more warps tested). /////blog improvement test (A, B initialized in the same way). /////redo idea_test4 with correct timing (simple with/without sync). /////real applications prefetching + triggerring. void init_cpu_data(long long int* A, long long int size, double stride){ for (long long int i = 0; i < size; i++){ A[i]=1; } /* for (long long int i = 0; i < size - stride; i++){ A[i]=(i + stride); } for (long long int i = size - stride; i < size; i++){ A[i]=0; } */ } __global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){ long long int index = (blockIdx.x * blockDim.x + threadIdx.x); long long int thread_num = gridDim.x * blockDim.x; for(long long int it = 0; it < data_size; it = it + thread_num){ A[index + it]=23; } } long long unsigned time_diff(timespec start, timespec end){ struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0){ temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else{ temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } long long unsigned time_interval_ns = temp.tv_nsec; long long unsigned time_interval_s = temp.tv_sec; time_interval_s = time_interval_s * 1000000000; return time_interval_s + time_interval_ns; } #define stride 512 ///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m), __global__ void page_visitor(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride; //if(warp_id == 27){ // temp = (1 * 32 + (threadIdx.x % 32) ) * stride; //} //double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride; //double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1; long long int index = __double2ll_rd(temp); long long int value1; //if(warp_id == 0 || warp_id == 27){ if(threadIdx.x % 32 <= clock_count){ value1 = A1[index]; B1[index] = value1; } //} } __global__ void page_visitor2(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///mixed same core //thread_block block = this_thread_block(); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1; int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; //if(warp_id == 0){ // temp = (threadIdx.x % 32) * stride * 2 + 0 * stride; //} //if(warp_id == 27){ // temp = (threadIdx.x % 32) * stride * 2 + 1 * stride; //} //unsigned warpid; //asm("mov.u32 %0, %warpid;" : "=r"(warpid)); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1; //double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * (blockDim.x / 32) + warpid * stride; long long int index = __double2ll_rd(temp); long long int value1; //if(warp_id == 0 || warp_id == 27){ if(threadIdx.x % 32 <= clock_count){ value1 = A1[index]; B1[index] = value1; } //} } __global__ void page_visitor3(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///mixed different cores //thread_block block = this_thread_block(); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1; int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; //if(warp_id == 0){ // temp = (threadIdx.x % 32) * stride * 2 + 0 * stride; //} //if(warp_id == 27){ // temp = (threadIdx.x % 32) * stride * 2 + 1 * stride; //} //unsigned warpid; //asm("mov.u32 %0, %warpid;" : "=r"(warpid)); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1; //double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * (blockDim.x / 32) + warpid * stride; long long int index = __double2ll_rd(temp); long long int value1; //if(warp_id == 0 || warp_id == 27){ /* if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid int smid = 1; asm("mov.u32 %0, %smid;" : "=r"(smid) ); printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid); } */ if(threadIdx.x % 32 <= clock_count){ value1 = A1[index]; B1[index] = value1; } //} } ///////////long 0 - 31 same core ///////////long 0 - 64 same core ///////////long 0 - 64 different core ///////////mixed 0 - 64 same core ///////////mixed 0 - 64 different core int main(int argc, char **argv) { printf("\n"); // set device hipDeviceProp_t device_prop; //long long int dev_id = findCudaDevice(argc, (const char **) argv); long long int dev_id = 0; checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id)); //int peak_clk = 1;//kHz //checkCudaErrors(hipDeviceGetAttribute(&peak_clk, hipDeviceAttributeClockRate, dev_id)); //float clock_rate = (float) peak_clk; //printf("clock_rate:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == hipComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } /* if (device_prop.concurrentManagedAccess == 1){ printf("This device supports concurrent Managed Access.\n"); }else{ printf("This device does not support concurrent Managed Access.\n"); } */ int value1 = 1; checkCudaErrors(hipDeviceGetAttribute(&value1, hipDeviceAttributeConcurrentManagedAccess, dev_id)); //printf("hipDeviceAttributeConcurrentManagedAccess = %d\n", value1); ///* //printf("############approach\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; hipLaunchKernelGGL(( page_visitor), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; hipLaunchKernelGGL(( page_visitor), dim3(block_num), dim3(512), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 16; hipLaunchKernelGGL(( page_visitor), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; hipLaunchKernelGGL(( page_visitor2), dim3(block_num), dim3(512), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(hipMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(hipMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, GPU_data_out1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); if(0){ hipLaunchKernelGGL(( gpu_initialization), dim3(8192 * 128 * scale / factor), dim3(512), 0, 0, CPU_data_in1, data_stride, data_size);///1024 per block max hipDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 16; hipLaunchKernelGGL(( page_visitor3), dim3(block_num), dim3(32), 0, 0, CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////mixed hipDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(hipFree(CPU_data_in1)); checkCudaErrors(hipFree(GPU_data_out1)); } } } } } } } printf("\n"); exit(EXIT_SUCCESS); }
f3fba375bd3455fbc22f757e20f27cd42c061c3d.cu
#include <cstdio> #include <ctime> #include <vector> #include <algorithm> #include <stdlib.h> // utilities #include <helper_cuda.h> #include <time.h> #include <sys/time.h> #include <cooperative_groups.h> using namespace cooperative_groups; /////////////////////////////L1 is enabled. "ALL_CCFLAGS += -Xptxas -dlcm=ca" //////////////large vs small data. /////test multiple warps (64k truncates. 4k merges. different SMs increase concurrency interfere merging between pages and truncate more. 64k 14 per SM. 4k no limit per SM? non-consecutive 4k cause fragmentation. consecutive 4k poorly merged also cause fragmentation. too many 4ks may cause both con and non-con poorly merged. 4ks from different SMs can also be merged. concurrency release the burden on each SM so it merges more 4ks better. But it loads larger page with redundant data?) (test on volta?). /////see blog trace (not as the blog said). /////test blog 2 warps (more warps tested). /////blog improvement test (A, B initialized in the same way). /////redo idea_test4 with correct timing (simple with/without sync). /////real applications prefetching + triggerring. void init_cpu_data(long long int* A, long long int size, double stride){ for (long long int i = 0; i < size; i++){ A[i]=1; } /* for (long long int i = 0; i < size - stride; i++){ A[i]=(i + stride); } for (long long int i = size - stride; i < size; i++){ A[i]=0; } */ } __global__ void gpu_initialization(long long int *A, double data_stride, long long int data_size){ long long int index = (blockIdx.x * blockDim.x + threadIdx.x); long long int thread_num = gridDim.x * blockDim.x; for(long long int it = 0; it < data_size; it = it + thread_num){ A[index + it]=23; } } long long unsigned time_diff(timespec start, timespec end){ struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0){ temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000 + end.tv_nsec - start.tv_nsec; } else{ temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } long long unsigned time_interval_ns = temp.tv_nsec; long long unsigned time_interval_s = temp.tv_sec; time_interval_s = time_interval_s * 1000000000; return time_interval_s + time_interval_ns; } #define stride 512 ///////////////512(4k), 1024(8k), 8192(64k), 16384(128k), 262144 (2m), 4194304 (32m), 8388608 (64m), __global__ void page_visitor(long long int *A1, long long int *B1, double data_stride, long long int clock_count){////long long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; double temp = (warp_id * 32 + (threadIdx.x % 32) ) * stride; //if(warp_id == 27){ // temp = (1 * 32 + (threadIdx.x % 32) ) * stride; //} //double temp = (blockIdx.x * blockDim.x + threadIdx.x) * stride; //double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * 2 + blockIdx.x * 1; long long int index = __double2ll_rd(temp); long long int value1; //if(warp_id == 0 || warp_id == 27){ if(threadIdx.x % 32 <= clock_count){ value1 = A1[index]; B1[index] = value1; } //} } __global__ void page_visitor2(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///mixed same core //thread_block block = this_thread_block(); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1; int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; //if(warp_id == 0){ // temp = (threadIdx.x % 32) * stride * 2 + 0 * stride; //} //if(warp_id == 27){ // temp = (threadIdx.x % 32) * stride * 2 + 1 * stride; //} //unsigned warpid; //asm("mov.u32 %0, %warpid;" : "=r"(warpid)); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1; //double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * (blockDim.x / 32) + warpid * stride; long long int index = __double2ll_rd(temp); long long int value1; //if(warp_id == 0 || warp_id == 27){ if(threadIdx.x % 32 <= clock_count){ value1 = A1[index]; B1[index] = value1; } //} } __global__ void page_visitor3(long long int *A1, long long int *B1, double data_stride, long long int clock_count){///mixed different cores //thread_block block = this_thread_block(); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) % 32 * 1; int warps_per_grid = (blockDim.x * gridDim.x) >> 5; long long int warp_id = (threadIdx.x + blockIdx.x * blockDim.x) >> 5; double temp = (threadIdx.x % 32) * stride * warps_per_grid + warp_id * stride; //if(warp_id == 0){ // temp = (threadIdx.x % 32) * stride * 2 + 0 * stride; //} //if(warp_id == 27){ // temp = (threadIdx.x % 32) * stride * 2 + 1 * stride; //} //unsigned warpid; //asm("mov.u32 %0, %warpid;" : "=r"(warpid)); //double temp = (blockIdx.x * blockDim.x + threadIdx.x) * 1; //double temp = ((blockIdx.x * blockDim.x + threadIdx.x) % 32) * stride * (blockDim.x / 32) + warpid * stride; long long int index = __double2ll_rd(temp); long long int value1; //if(warp_id == 0 || warp_id == 27){ /* if(threadIdx.x == 0){/////%tid %ntid %laneid %warpid %nwarpid %ctaid %nctaid %smid %nsmid %gridid int smid = 1; asm("mov.u32 %0, %smid;" : "=r"(smid) ); printf("blockIdx.x: %d, smid: %d\n", blockIdx.x, smid); } */ if(threadIdx.x % 32 <= clock_count){ value1 = A1[index]; B1[index] = value1; } //} } ///////////long 0 - 31 same core ///////////long 0 - 64 same core ///////////long 0 - 64 different core ///////////mixed 0 - 64 same core ///////////mixed 0 - 64 different core int main(int argc, char **argv) { printf("\n"); // set device cudaDeviceProp device_prop; //long long int dev_id = findCudaDevice(argc, (const char **) argv); long long int dev_id = 0; checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id)); //int peak_clk = 1;//kHz //checkCudaErrors(cudaDeviceGetAttribute(&peak_clk, cudaDevAttrClockRate, dev_id)); //float clock_rate = (float) peak_clk; //printf("clock_rate:%f\n", clock_rate); if (!device_prop.managedMemory) { // This samples requires being run on a device that supports Unified Memory fprintf(stderr, "Unified Memory not supported on this device\n"); exit(EXIT_WAIVED); } if (device_prop.computeMode == cudaComputeModeProhibited) { // This sample requires being run with a default or process exclusive mode fprintf(stderr, "This sample requires a device in either default or process exclusive mode\n"); exit(EXIT_WAIVED); } /* if (device_prop.concurrentManagedAccess == 1){ printf("This device supports concurrent Managed Access.\n"); }else{ printf("This device does not support concurrent Managed Access.\n"); } */ int value1 = 1; checkCudaErrors(cudaDeviceGetAttribute(&value1, cudaDevAttrConcurrentManagedAccess, dev_id)); //printf("cudaDevAttrConcurrentManagedAccess = %d\n", value1); ///* //printf("############approach\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; page_visitor<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; page_visitor<<<block_num, 512>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 16; page_visitor<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 1; page_visitor2<<<block_num, 512>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////long cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); } } } } } } } printf("\n"); for(long long int time = 0; time <= 0; time = time + 1){ //printf("\n####################time: %llu\n", time); //long long int coverage2 = 0; for(long long int coverage = 1; coverage <= 1; coverage = coverage * 2){///////////////8192 is 2m. //coverage2++; //if(coverage2 == 2){ // coverage = 1; //} //printf("############coverage: %llu\n", coverage); for(long long int rate = 1; rate <= 1; rate = rate * 2){ //printf("############rate: %llu\n", rate); //long long int offset2 = 0; //for(long long int offset = 0; offset <= 0; offset = offset * 2){///////8 for(long long int offset = 0; offset <= 0; offset = offset + 8){ //offset2++; //if(offset2 == 2){ // offset = 1; //} //printf("############offset: %llu\n", offset); for(long long int factor = 1; factor <= 1; factor = factor * 2){/////////////16384 (128k) max //printf("####################factor: %llu\n", factor); for(double data_stride = 1 * 1 * 1 * factor; data_stride <= 1 * 1 * 1 * factor; data_stride = data_stride * 2){///134217728 = 1gb, 268435456 = 2gb, 536870912 = 4gb, 1073741824 = 8gb, 2147483648 = 16gb, 4294967296 = 32gb, 8589934592 = 64gb. (index) //printf("\n"); for(long long int clock_count = 0; clock_count <= 31; clock_count = clock_count + 1){ ///long long int time2 = time; //if(time2 > clock_count){ // time2 = clock_count; //} ///////////////////////////////////////////////////////////////////CPU data begin double temp = data_stride * 512; long long int data_size = (long long int) temp; //data_size = data_size * 8192 * 512 / factor; data_size = data_size * 8192 * 128 / factor; long long int *CPU_data_in1; checkCudaErrors(cudaMallocManaged(&CPU_data_in1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////CPU data end long long int *GPU_data_out1; checkCudaErrors(cudaMallocManaged(&GPU_data_out1, sizeof(long long int) * data_size));/////////////using unified memory ///////////////////////////////////////////////////////////////////GPU data out end if(1){ double scale = 1; if(data_stride < 1){ scale = data_stride;/////////make sure threadIdx is smaller than data_size in the initialization } gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(GPU_data_out1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); if(0){ gpu_initialization<<<8192 * 128 * scale / factor, 512>>>(CPU_data_in1, data_stride, data_size);///1024 per block max cudaDeviceSynchronize(); }else{ init_cpu_data(CPU_data_in1, data_size, data_stride); } }else{ init_cpu_data(GPU_data_out1, data_size, data_stride); init_cpu_data(CPU_data_in1, data_size, data_stride); } /////////////////////////////////time struct timespec ts1; clock_gettime(CLOCK_REALTIME, &ts1); int block_num = 16; page_visitor3<<<block_num, 32>>>(CPU_data_in1, GPU_data_out1, data_stride, clock_count);/////mixed cudaDeviceSynchronize(); /////////////////////////////////time struct timespec ts2; clock_gettime(CLOCK_REALTIME, &ts2); //printf("###################data_stride%lld#########################clock_count:%lld\n", data_stride, clock_count); //printf("*\n*\n*\nruntime: %lluns\n", time_diff(ts1, ts2)); printf("%llu ", time_diff(ts1, ts2)); fflush(stdout); checkCudaErrors(cudaFree(CPU_data_in1)); checkCudaErrors(cudaFree(GPU_data_out1)); } } } } } } } printf("\n"); exit(EXIT_SUCCESS); }
149d2ff2dbef48640c8e76fcd63e2f30775accc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 08.11.2018 // @author [email protected] // #include "../scalar_int.h" #include <op_boilerplate.h> #include <types/types.h> #include "../legacy_ops.h" using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void scalarAlongDimension(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { functions::scalar::ScalarIntTransform<X>::template transformCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void scalarSimpleShaped(void* x, void *y, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationBuffer) { functions::scalar::ScalarIntTransform<X>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer); } // *********************************************************************// // *********************************************************************// namespace functions { namespace scalar { //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> __device__ void ScalarIntTransform<X>::transformCuda(void* vscalar, void *vy, Nd4jLong *yShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationBuffer) { auto scalar = reinterpret_cast<X*>(vscalar)[0]; auto y = reinterpret_cast<X*>(vy); auto params = reinterpret_cast<X*>(vparams); auto z = reinterpret_cast<X*>(vz); auto yRank = shape::rank(yShapeInfo); auto yEWS = shape::elementWiseStride(yShapeInfo); auto yShape = shape::shapeOf(yShapeInfo); auto yStride = shape::stride(yShapeInfo); auto zRank = shape::rank(zShapeInfo); auto zEWS = shape::elementWiseStride(zShapeInfo); auto zShape = shape::shapeOf(zShapeInfo); auto zStride = shape::stride(zShapeInfo); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int len; if(threadIdx.x == 0) len = shape::length(yShapeInfo); __syncthreads(); if(yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) { transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer); } else { for (Nd4jLong i = tid; i < len; i+= totalThreads) z[shape::getIndexOffset(i, zShapeInfo, len)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo, len)], scalar, params); } } //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> __device__ void ScalarIntTransform<X>::transformCuda(Nd4jLong len, void* vx, void *vy, Nd4jLong yEWS, void *vparams, void *vz, Nd4jLong zEWS, int *allocationBuffer) { auto x = reinterpret_cast<X*>(vx)[0]; auto y = reinterpret_cast<X*>(vy); auto z = reinterpret_cast<X*>(vz); auto params = reinterpret_cast<X*>(vparams); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; Nd4jLong i = tid; if(yEWS == 1 && zEWS == 1) { for (; i < len; i += totalThreads) z[i] = OpType::op(y[i], x, params); } else { for (; i < len; i += totalThreads) z[i * zEWS] = OpType::op(y[i * yEWS], x, params); } } //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> __device__ void ScalarIntTransform<X>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vscalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { auto x = reinterpret_cast<X*>(vx); auto scalars = reinterpret_cast<X*>(vscalars); auto z = reinterpret_cast<X*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); if (tadShapeInfoZ == nullptr) { tadShapeInfoZ = tadShapeInfo; tadOffsetsZ = tadOffsets; } // tad preparation auto tadEws = shape::elementWiseStride(tadShapeInfo); auto zEws = shape::elementWiseStride(tadShapeInfoZ); auto tadLength = shape::length(tadShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); auto numTads =shape::length(xShapeInfo) / tadLength; if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { X *oZ = z + tadOffsetsZ[r]; X *oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams); } } else { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { X *oZ = z + tadOffsetsZ[r]; X *oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[shape::getIndexOffset(f, tadShapeInfoZ, tadLength)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo, tadLength)], s, extraParams); } } } //////////////////////////////////////////////////////////////////////// template<typename X> template <typename OpType> _CUDA_H void ScalarIntTransform<X>::intermediateAlongDimension(dim3& launchDims, hipStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *z, Nd4jLong *zShapeInfo, void *scalars, void *extraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { hipLaunchKernelGGL(( scalarAlongDimension<X, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> void _CUDA_H ScalarIntTransform<X>::intermediateShaped(dim3& launchDims, hipStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void* vscalar, void *vextraParams, int *allocPointer){ hipLaunchKernelGGL(( scalarSimpleShaped<X, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer); } //////////////////////////////////////////////////////////////////////// template<typename X> void ScalarIntTransform<X>::executeCudaShaped(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void* vscalar, void *vextraParams) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H14 opNum:[%i]\n", opNum); DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, vextraParams, nullptr), SCALAR_INT_OPS); } //////////////////////////////////////////////////////////////////////// template<typename X> void ScalarIntTransform<X>::executeCudaAlongDimension(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vscalars, void *vextraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { DISPATCH_BY_OPNUM_T(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_INT_OPS); } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ScalarIntTransform, , INTEGER_TYPES); template<typename X> template <typename OpType> void ScalarIntTransform<X,>::transform(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { } template<typename X> void ScalarIntTransform<X>::transform(int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { } template<typename X> void ScalarIntTransform<X>::transform(const int opNum, void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) { } template<typename X> void ScalarIntTransform<X>::transform(const int opNum, void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) { } template<typename X> template<typename OpType> void ScalarIntTransform<X>::transform(void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) { } template<typename X> template<typename OpType> void ScalarIntTransform<X>::transform(void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) { } } }
149d2ff2dbef48640c8e76fcd63e2f30775accc6.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 08.11.2018 // @author [email protected] // #include "../scalar_int.h" #include <op_boilerplate.h> #include <types/types.h> #include "../legacy_ops.h" using namespace simdOps; //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void scalarAlongDimension(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { functions::scalar::ScalarIntTransform<X>::template transformCuda<OpType>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template <typename X, typename OpType> __global__ void scalarSimpleShaped(void* x, void *y, Nd4jLong *xShapeInfo, void *params, void *z, Nd4jLong *zShapeInfo, int *allocationBuffer) { functions::scalar::ScalarIntTransform<X>::template transformCuda<OpType>(y, x, xShapeInfo, params, z, zShapeInfo, allocationBuffer); } // *********************************************************************// // *********************************************************************// namespace functions { namespace scalar { //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> __device__ void ScalarIntTransform<X>::transformCuda(void* vscalar, void *vy, Nd4jLong *yShapeInfo, void *vparams, void *vz, Nd4jLong *zShapeInfo, int *allocationBuffer) { auto scalar = reinterpret_cast<X*>(vscalar)[0]; auto y = reinterpret_cast<X*>(vy); auto params = reinterpret_cast<X*>(vparams); auto z = reinterpret_cast<X*>(vz); auto yRank = shape::rank(yShapeInfo); auto yEWS = shape::elementWiseStride(yShapeInfo); auto yShape = shape::shapeOf(yShapeInfo); auto yStride = shape::stride(yShapeInfo); auto zRank = shape::rank(zShapeInfo); auto zEWS = shape::elementWiseStride(zShapeInfo); auto zShape = shape::shapeOf(zShapeInfo); auto zStride = shape::stride(zShapeInfo); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int len; if(threadIdx.x == 0) len = shape::length(yShapeInfo); __syncthreads(); if(yEWS >= 1 && zEWS >= 1 && shape::order(yShapeInfo) == shape::order(zShapeInfo)) { transformCuda<OpType>(len, vscalar, vy, yEWS, vparams, vz, zEWS, allocationBuffer); } else { for (Nd4jLong i = tid; i < len; i+= totalThreads) z[shape::getIndexOffset(i, zShapeInfo, len)] = OpType::op(y[shape::getIndexOffset(i, yShapeInfo, len)], scalar, params); } } //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> __device__ void ScalarIntTransform<X>::transformCuda(Nd4jLong len, void* vx, void *vy, Nd4jLong yEWS, void *vparams, void *vz, Nd4jLong zEWS, int *allocationBuffer) { auto x = reinterpret_cast<X*>(vx)[0]; auto y = reinterpret_cast<X*>(vy); auto z = reinterpret_cast<X*>(vz); auto params = reinterpret_cast<X*>(vparams); int totalThreads = gridDim.x * blockDim.x; int tid = blockIdx.x * blockDim.x + threadIdx.x; Nd4jLong i = tid; if(yEWS == 1 && zEWS == 1) { for (; i < len; i += totalThreads) z[i] = OpType::op(y[i], x, params); } else { for (; i < len; i += totalThreads) z[i * zEWS] = OpType::op(y[i * yEWS], x, params); } } //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> __device__ void ScalarIntTransform<X>::transformCuda(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, void *vscalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { auto x = reinterpret_cast<X*>(vx); auto scalars = reinterpret_cast<X*>(vscalars); auto z = reinterpret_cast<X*>(vz); auto extraParams = reinterpret_cast<X*>(vextraParams); if (tadShapeInfoZ == nullptr) { tadShapeInfoZ = tadShapeInfo; tadOffsetsZ = tadOffsets; } // tad preparation auto tadEws = shape::elementWiseStride(tadShapeInfo); auto zEws = shape::elementWiseStride(tadShapeInfoZ); auto tadLength = shape::length(tadShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); auto numTads =shape::length(xShapeInfo) / tadLength; if (tadEws > 0 && zEws > 0 && shape::order(tadShapeInfo) == shape::order(zShapeInfo)) { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { X *oZ = z + tadOffsetsZ[r]; X *oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[f * zEws] = OpType::op(oX[f * tadEws], s, extraParams); } } else { // main loop, rolling over tads for (int r = blockIdx.x; r < numTads; r += gridDim.x) { X *oZ = z + tadOffsetsZ[r]; X *oX = x + tadOffsets[r]; auto s = scalars[r]; for (int f = threadIdx.x; f < tadLength; f += blockDim.x) oZ[shape::getIndexOffset(f, tadShapeInfoZ, tadLength)] = OpType::op(oX[shape::getIndexOffset(f, tadShapeInfo, tadLength)], s, extraParams); } } } //////////////////////////////////////////////////////////////////////// template<typename X> template <typename OpType> _CUDA_H void ScalarIntTransform<X>::intermediateAlongDimension(dim3& launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShapeInfo, void *z, Nd4jLong *zShapeInfo, void *scalars, void *extraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { scalarAlongDimension<X, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, extraParams, z, zShapeInfo, scalars, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ); } //////////////////////////////////////////////////////////////////////// template<typename X> template<typename OpType> void _CUDA_H ScalarIntTransform<X>::intermediateShaped(dim3& launchDims, cudaStream_t *stream, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void* vscalar, void *vextraParams, int *allocPointer){ scalarSimpleShaped<X, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vx, vscalar, xShapeInfo, vextraParams, vz, zShapeInfo, allocPointer); } //////////////////////////////////////////////////////////////////////// template<typename X> void ScalarIntTransform<X>::executeCudaShaped(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void* vscalar, void *vextraParams) { if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("H14 opNum:[%i]\n", opNum); DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalar, vextraParams, nullptr), SCALAR_INT_OPS); } //////////////////////////////////////////////////////////////////////// template<typename X> void ScalarIntTransform<X>::executeCudaAlongDimension(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, void *vz, Nd4jLong *zShapeInfo, void *vscalars, void *vextraParams, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { DISPATCH_BY_OPNUM_T(intermediateAlongDimension, PARAMS(launchDims, stream, vx, xShapeInfo, vz, zShapeInfo, vscalars, vextraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), SCALAR_INT_OPS); } BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT ScalarIntTransform, , INTEGER_TYPES); template<typename X> template <typename OpType> void ScalarIntTransform<X,>::transform(void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { } template<typename X> void ScalarIntTransform<X>::transform(int opNum, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, void *scalars, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, Nd4jLong *tadShapeInfoZ, Nd4jLong *tadOffsetsZ) { } template<typename X> void ScalarIntTransform<X>::transform(const int opNum, void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) { } template<typename X> void ScalarIntTransform<X>::transform(const int opNum, void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) { } template<typename X> template<typename OpType> void ScalarIntTransform<X>::transform(void *x, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *scalar, void *extraParams) { } template<typename X> template<typename OpType> void ScalarIntTransform<X>::transform(void *x, Nd4jLong xStride, void *result, Nd4jLong resultStride, void *scalar, void *extraParams, const Nd4jLong n) { } } }
514389adb6afa0403fa53785c91032ff38af65c1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/algorithms.hpp> #include <cugraph/experimental/detail/graph_utils.cuh> #include <cugraph/experimental/graph_functions.hpp> #include <cugraph/experimental/graph_view.hpp> #include <cugraph/patterns/copy_to_adj_matrix_row_col.cuh> #include <cugraph/patterns/update_frontier_v_push_if_out_nbr.cuh> #include <cugraph/patterns/vertex_frontier.cuh> #include <cugraph/utilities/device_comm.cuh> #include <cugraph/utilities/error.hpp> #include <cugraph/utilities/shuffle_comm.cuh> #include <cugraph/vertex_partition_device.cuh> #include <raft/handle.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/optional.h> #include <thrust/shuffle.h> #include <thrust/tuple.h> #include <algorithm> #include <limits> #include <random> #include <type_traits> #include <vector> namespace cugraph { namespace experimental { namespace { // FIXME: this function (after modification) may be useful for SSSP with the near-far method to // determine the near-far threshold. // add new roots till the sum of the degrees first becomes no smaller than degree_sum_threshold and // returns a triplet of (new roots, number of scanned candidates, sum of the degrees of the new // roots) template <typename GraphViewType> std::tuple<rmm::device_uvector<typename GraphViewType::vertex_type>, typename GraphViewType::vertex_type, typename GraphViewType::edge_type> accumulate_new_roots(raft::handle_t const &handle, vertex_partition_device_t<GraphViewType> vertex_partition, typename GraphViewType::vertex_type const *components, typename GraphViewType::edge_type const *degrees, typename GraphViewType::vertex_type const *candidate_first, typename GraphViewType::vertex_type const *candidate_last, typename GraphViewType::vertex_type max_new_roots, typename GraphViewType::edge_type degree_sum_threshold) { using vertex_t = typename GraphViewType::vertex_type; using edge_t = typename GraphViewType::edge_type; // tuning parameter (time to scan max_scan_size elements should not take significantly longer than // scanning a single element) vertex_t max_scan_size = static_cast<vertex_t>(handle.get_device_properties().multiProcessorCount) * vertex_t{16384}; rmm::device_uvector<vertex_t> new_roots(max_new_roots, handle.get_stream_view()); vertex_t num_new_roots{0}; vertex_t num_scanned{0}; edge_t degree_sum{0}; while ((candidate_first + num_scanned < candidate_last) && (degree_sum < degree_sum_threshold) && (num_new_roots < max_new_roots)) { auto scan_size = ::min( max_scan_size, static_cast<vertex_t>(thrust::distance(candidate_first + num_scanned, candidate_last))); rmm::device_uvector<vertex_t> tmp_new_roots(scan_size, handle.get_stream_view()); rmm::device_uvector<vertex_t> tmp_indices(tmp_new_roots.size(), handle.get_stream_view()); auto input_pair_first = thrust::make_zip_iterator(thrust::make_tuple( candidate_first + num_scanned, thrust::make_counting_iterator(vertex_t{0}))); auto output_pair_first = thrust::make_zip_iterator(thrust::make_tuple(tmp_new_roots.begin(), tmp_indices.begin())); tmp_new_roots.resize( static_cast<vertex_t>(thrust::distance( output_pair_first, thrust::copy_if( rmm::exec_policy(handle.get_stream_view()), input_pair_first, input_pair_first + scan_size, output_pair_first, [vertex_partition, components] __device__(auto pair) { auto v = thrust::get<0>(pair); return (components[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)] == invalid_component_id<vertex_t>::value); }))), handle.get_stream_view()); tmp_indices.resize(tmp_new_roots.size(), handle.get_stream_view()); if (tmp_new_roots.size() > 0) { rmm::device_uvector<edge_t> tmp_cumulative_degrees(tmp_new_roots.size(), handle.get_stream_view()); thrust::transform( rmm::exec_policy(handle.get_stream_view()), tmp_new_roots.begin(), tmp_new_roots.end(), tmp_cumulative_degrees.begin(), [vertex_partition, degrees] __device__(auto v) { return degrees[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)]; }); thrust::inclusive_scan(rmm::exec_policy(handle.get_stream_view()), tmp_cumulative_degrees.begin(), tmp_cumulative_degrees.end(), tmp_cumulative_degrees.begin()); auto last = thrust::lower_bound(rmm::exec_policy(handle.get_stream_view()), tmp_cumulative_degrees.begin(), tmp_cumulative_degrees.end(), degree_sum_threshold - degree_sum); if (last != tmp_cumulative_degrees.end()) { ++last; } auto tmp_num_new_roots = ::min(static_cast<vertex_t>(thrust::distance(tmp_cumulative_degrees.begin(), last)), max_new_roots - num_new_roots); thrust::copy(rmm::exec_policy(handle.get_stream_view()), tmp_new_roots.begin(), tmp_new_roots.begin() + tmp_num_new_roots, new_roots.begin() + num_new_roots); num_new_roots += tmp_num_new_roots; vertex_t tmp_num_scanned{0}; edge_t tmp_degree_sum{0}; if (tmp_num_new_roots == static_cast<vertex_t>(tmp_new_roots.size())) { tmp_num_scanned = scan_size; } else { raft::update_host( &tmp_num_scanned, tmp_indices.data() + tmp_num_new_roots, size_t{1}, handle.get_stream()); } raft::update_host(&tmp_degree_sum, tmp_cumulative_degrees.data() + (tmp_num_new_roots - 1), size_t{1}, handle.get_stream()); handle.get_stream_view().synchronize(); num_scanned += tmp_num_scanned; degree_sum += tmp_degree_sum; } else { num_scanned += scan_size; } } new_roots.resize(num_new_roots, handle.get_stream_view()); new_roots.shrink_to_fit(handle.get_stream_view()); return std::make_tuple(std::move(new_roots), num_scanned, degree_sum); } // FIXME: to silence the spurious warning (missing return statement ...) due to the nvcc bug // (https://stackoverflow.com/questions/64523302/cuda-missing-return-statement-at-end-of-non-void- // function-in-constexpr-if-fun) template <typename GraphViewType> struct v_op_t { using vertex_type = typename GraphViewType::vertex_type; vertex_partition_device_t<GraphViewType> vertex_partition{}; vertex_type *level_components{}; decltype(thrust::make_zip_iterator(thrust::make_tuple( static_cast<vertex_type *>(nullptr), static_cast<vertex_type *>(nullptr)))) edge_buffer_first{}; // FIXME: we can use cuda::atomic instead but currently on a system with x86 + GPU, this requires // placing the atomic barrier on managed memory and this adds additional complication. size_t *num_edge_inserts{}; size_t next_bucket_idx{}; size_t conflict_bucket_idx{}; // relevant only if GraphViewType::is_multi_gpu is true template <bool multi_gpu = GraphViewType::is_multi_gpu> __device__ std::enable_if_t<multi_gpu, thrust::optional<thrust::tuple<size_t, std::byte>>> operator()(thrust::tuple<vertex_type, vertex_type> tagged_v, int v_val /* dummy */) const { auto tag = thrust::get<1>(tagged_v); auto v_offset = vertex_partition.get_local_vertex_offset_from_vertex_nocheck(thrust::get<0>(tagged_v)); // FIXME: better switch to atomic_ref after // https://github.com/nvidia/libcudacxx/milestone/2 auto old = atomicCAS(level_components + v_offset, invalid_component_id<vertex_type>::value, tag); if (old != invalid_component_id<vertex_type>::value && old != tag) { // conflict return thrust::optional<thrust::tuple<size_t, std::byte>>{ thrust::make_tuple(conflict_bucket_idx, std::byte{0} /* dummy */)}; } else { return (old == invalid_component_id<vertex_type>::value) ? thrust::optional<thrust::tuple<size_t, std::byte>>{thrust::make_tuple( next_bucket_idx, std::byte{0} /* dummy */)} : thrust::nullopt; } } template <bool multi_gpu = GraphViewType::is_multi_gpu> __device__ std::enable_if_t<!multi_gpu, thrust::optional<thrust::tuple<size_t, std::byte>>> operator()(thrust::tuple<vertex_type, vertex_type> tagged_v, int v_val /* dummy */) const { return thrust::optional<thrust::tuple<size_t, std::byte>>{ thrust::make_tuple(next_bucket_idx, std::byte{0} /* dummy */)}; } }; template <typename GraphViewType> void weakly_connected_components_impl(raft::handle_t const &handle, GraphViewType const &push_graph_view, typename GraphViewType::vertex_type *components, bool do_expensive_check) { using vertex_t = typename GraphViewType::vertex_type; using edge_t = typename GraphViewType::edge_type; using weight_t = typename GraphViewType::weight_type; static_assert(std::is_integral<vertex_t>::value, "GraphViewType::vertex_type should be integral."); static_assert(!GraphViewType::is_adj_matrix_transposed, "GraphViewType should support the push model."); auto const num_vertices = push_graph_view.get_number_of_vertices(); if (num_vertices == 0) { return; } // 1. check input arguments CUGRAPH_EXPECTS( push_graph_view.is_symmetric(), "Invalid input argument: input graph should be symmetric for weakly connected components."); if (do_expensive_check) { // nothing to do } // 2. recursively run multi-root frontier expansion enum class Bucket { cur, next, conflict /* relevant only if GraphViewType::is_multi_gpu is true */, num_buckets }; // tuning parameter to balance work per iteration (should be large enough to be throughput // bounded) vs # conflicts between frontiers with different roots (# conflicts == # edges for the // next level) auto degree_sum_threshold = static_cast<edge_t>(handle.get_device_properties().multiProcessorCount) * edge_t{1024}; size_t num_levels{0}; graph_t<vertex_t, edge_t, typename GraphViewType::weight_type, GraphViewType::is_adj_matrix_transposed, GraphViewType::is_multi_gpu> level_graph(handle); rmm::device_uvector<vertex_t> level_renumber_map(0, handle.get_stream_view()); std::vector<rmm::device_uvector<vertex_t>> level_component_vectors{}; // vertex ID in this level to the component ID in the previous level std::vector<rmm::device_uvector<vertex_t>> level_renumber_map_vectors{}; std::vector<vertex_t> level_local_vertex_first_vectors{}; while (true) { auto level_graph_view = num_levels == 0 ? push_graph_view : level_graph.view(); vertex_partition_device_t<GraphViewType> vertex_partition(level_graph_view); level_component_vectors.push_back(rmm::device_uvector<vertex_t>( num_levels == 0 ? vertex_t{0} : level_graph_view.get_number_of_local_vertices(), handle.get_stream_view())); level_renumber_map_vectors.push_back(std::move(level_renumber_map)); level_local_vertex_first_vectors.push_back(level_graph_view.get_local_vertex_first()); auto level_components = num_levels == 0 ? components : level_component_vectors[num_levels].data(); ++num_levels; auto degrees = level_graph_view.compute_out_degrees(handle); // 2-1. filter out isolated vertices auto pair_first = thrust::make_zip_iterator(thrust::make_tuple( thrust::make_counting_iterator(level_graph_view.get_local_vertex_first()), degrees.begin())); thrust::transform(rmm::exec_policy(handle.get_stream_view()), pair_first, pair_first + level_graph_view.get_number_of_local_vertices(), level_components, [] __device__(auto pair) { auto v = thrust::get<0>(pair); auto degree = thrust::get<1>(pair); return degree > 0 ? invalid_component_id<vertex_t>::value : v; }); // 2-2. initialize new root candidates // Vertices are first partitioned to high-degree vertices and low-degree vertices, we can reach // degree_sum_threshold with fewer high-degree vertices leading to a higher compression ratio. // The degree threshold is set to ceil(sqrt(degree_sum_threshold * 2)); this guarantees the // compression ratio of at least 50% (ignoring rounding errors) even if all the selected roots // fall into a single connected component as there will be at least as many non-root vertices in // the connected component (assuming there are no multi-edges, if there are multi-edges, we may // not get 50% compression in # vertices but still get compression in # edges). the remaining // low-degree vertices will be randomly shuffled so comparable ratios of vertices will be // selected as roots in the remaining connected components. rmm::device_uvector<vertex_t> new_root_candidates( level_graph_view.get_number_of_local_vertices(), handle.get_stream_view()); new_root_candidates.resize( thrust::distance( new_root_candidates.begin(), thrust::copy_if( rmm::exec_policy(handle.get_stream_view()), thrust::make_counting_iterator(level_graph_view.get_local_vertex_first()), thrust::make_counting_iterator(level_graph_view.get_local_vertex_last()), new_root_candidates.begin(), [vertex_partition, level_components] __device__(auto v) { return level_components[vertex_partition.get_local_vertex_offset_from_vertex_nocheck( v)] == invalid_component_id<vertex_t>::value; })), handle.get_stream_view()); auto high_degree_partition_last = thrust::stable_partition( rmm::exec_policy(handle.get_stream_view()), new_root_candidates.begin(), new_root_candidates.end(), [vertex_partition, degrees = degrees.data(), threshold = static_cast<edge_t>( ceil(sqrt(static_cast<double>(degree_sum_threshold) * 2.0)))] __device__(auto v) { return degrees[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)] >= threshold; }); thrust::shuffle(rmm::exec_policy(handle.get_stream_view()), high_degree_partition_last, new_root_candidates.end(), thrust::default_random_engine()); double constexpr max_new_roots_ratio = 0.05; // to avoid selecting all the vertices as roots leading to zero compression static_assert(max_new_roots_ratio > 0.0); auto max_new_roots = ::max( static_cast<vertex_t>(new_root_candidates.size() * max_new_roots_ratio), vertex_t{1}); auto init_max_new_roots = max_new_roots; // to avoid selecting too many (possibly all) vertices as initial roots leading to no // compression in the worst case. if (GraphViewType::is_multi_gpu && (level_graph_view.get_number_of_vertices() <= static_cast<vertex_t>(handle.get_comms().get_size() * ceil(1.0 / max_new_roots_ratio)))) { auto &comm = handle.get_comms(); auto const comm_rank = comm.get_rank(); auto const comm_size = comm.get_size(); // FIXME: a temporary workaround for a NCCL(2.9.6) bug that causes a hang on DGX1 (due to // remote memory allocation), host_scalar_gather is sufficient otherwise. #if 1 auto new_root_candidate_counts = host_scalar_allgather(comm, new_root_candidates.size(), handle.get_stream()); #else auto new_root_candidate_counts = host_scalar_gather(comm, new_root_candidates.size(), int{0}, handle.get_stream()); #endif if (comm_rank == 0) { std::vector<int> gpuids{}; gpuids.reserve( std::reduce(new_root_candidate_counts.begin(), new_root_candidate_counts.end())); for (size_t i = 0; i < new_root_candidate_counts.size(); ++i) { gpuids.insert(gpuids.end(), new_root_candidate_counts[i], static_cast<int>(i)); } std::random_device rd{}; std::shuffle(gpuids.begin(), gpuids.end(), std::mt19937(rd())); gpuids.resize( ::max(static_cast<vertex_t>(gpuids.size() * max_new_roots_ratio), vertex_t{1})); std::vector<vertex_t> init_max_new_root_counts(comm_size, vertex_t{0}); for (size_t i = 0; i < gpuids.size(); ++i) { ++init_max_new_root_counts[gpuids[i]]; } // FIXME: we need to add host_scalar_scatter #if 1 rmm::device_uvector<vertex_t> d_counts(comm_size, handle.get_stream_view()); raft::update_device(d_counts.data(), init_max_new_root_counts.data(), init_max_new_root_counts.size(), handle.get_stream()); device_bcast( comm, d_counts.data(), d_counts.data(), d_counts.size(), int{0}, handle.get_stream()); raft::update_host( &init_max_new_roots, d_counts.data() + comm_rank, size_t{1}, handle.get_stream()); #else iinit_max_new_roots = host_scalar_scatter(comm, init_max_new_root_counts.data(), int{0}, handle.get_stream()); #endif } else { // FIXME: we need to add host_scalar_scatter #if 1 rmm::device_uvector<vertex_t> d_counts(comm_size, handle.get_stream_view()); device_bcast( comm, d_counts.data(), d_counts.data(), d_counts.size(), int{0}, handle.get_stream()); raft::update_host( &init_max_new_roots, d_counts.data() + comm_rank, size_t{1}, handle.get_stream()); #else iinit_max_new_roots = host_scalar_scatter(comm, init_max_new_root_counts.data(), int{0}, handle.get_stream()); #endif } handle.get_stream_view().synchronize(); } // 2-3. initialize vertex frontier, edge_buffer, and col_components (if multi-gpu) VertexFrontier<vertex_t, vertex_t, GraphViewType::is_multi_gpu, static_cast<size_t>(Bucket::num_buckets)> vertex_frontier(handle); vertex_t next_candidate_offset{0}; edge_t edge_count{0}; auto edge_buffer = allocate_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>(0, handle.get_stream()); // FIXME: we can use cuda::atomic instead but currently on a system with x86 + GPU, this // requires placing the atomic variable on managed memory and this make it less attractive. rmm::device_scalar<size_t> num_edge_inserts(size_t{0}, handle.get_stream_view()); rmm::device_uvector<vertex_t> col_components( GraphViewType::is_multi_gpu ? level_graph_view.get_number_of_local_adj_matrix_partition_cols() : vertex_t{0}, handle.get_stream_view()); if (GraphViewType::is_multi_gpu) { thrust::fill(rmm::exec_policy(handle.get_stream_view()), col_components.begin(), col_components.end(), invalid_component_id<vertex_t>::value); } // 2.4 iterate till every vertex gets visited size_t iter{0}; while (true) { if ((edge_count < degree_sum_threshold) && (next_candidate_offset < static_cast<vertex_t>(new_root_candidates.size()))) { auto [new_roots, num_scanned, degree_sum] = accumulate_new_roots(handle, vertex_partition, level_components, degrees.data(), new_root_candidates.data() + next_candidate_offset, new_root_candidates.data() + new_root_candidates.size(), iter == 0 ? init_max_new_roots : max_new_roots, degree_sum_threshold - edge_count); next_candidate_offset += num_scanned; edge_count += degree_sum; thrust::sort( rmm::exec_policy(handle.get_stream_view()), new_roots.begin(), new_roots.end()); thrust::for_each( rmm::exec_policy(handle.get_stream_view()), new_roots.begin(), new_roots.end(), [vertex_partition, components = level_components] __device__(auto c) { components[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(c)] = c; }); auto pair_first = thrust::make_zip_iterator(thrust::make_tuple(new_roots.begin(), new_roots.begin())); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .insert(pair_first, pair_first + new_roots.size()); } if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) { break; } if (GraphViewType::is_multi_gpu) { copy_to_adj_matrix_col( handle, level_graph_view, thrust::get<0>(vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .begin() .get_iterator_tuple()), thrust::get<0>(vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .end() .get_iterator_tuple()), level_components, col_components.begin()); } auto max_pushes = GraphViewType::is_multi_gpu ? compute_num_out_nbrs_from_frontier( handle, level_graph_view, vertex_frontier, static_cast<size_t>(Bucket::cur)) : edge_count; // FIXME: if we use cuco::static_map (no duplicates, ideally we need static_set), edge_buffer // size cannot exceed (# roots)^2 and we can avoid additional sort & unique (but resizing the // buffer may be more expensive). auto old_num_edge_inserts = num_edge_inserts.value(handle.get_stream_view()); resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, old_num_edge_inserts + max_pushes, handle.get_stream()); update_frontier_v_push_if_out_nbr( handle, level_graph_view, vertex_frontier, static_cast<size_t>(Bucket::cur), std::vector<size_t>{static_cast<size_t>(Bucket::next)}, thrust::make_counting_iterator(0) /* dummy */, thrust::make_counting_iterator(0) /* dummy */, [col_components = GraphViewType::is_multi_gpu ? col_components.data() : level_components, col_first = level_graph_view.get_local_adj_matrix_partition_col_first(), edge_buffer_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), num_edge_inserts = num_edge_inserts.data()] __device__(auto tagged_src, vertex_t dst, auto src_val, auto dst_val) { auto tag = thrust::get<1>(tagged_src); auto col_offset = dst - col_first; // FIXME: better switch to atomic_ref after // https://github.com/nvidia/libcudacxx/milestone/2 auto old = atomicCAS(col_components + col_offset, invalid_component_id<vertex_t>::value, tag); if (old != invalid_component_id<vertex_t>::value && old != tag) { // conflict static_assert(sizeof(unsigned long long int) == sizeof(size_t)); auto edge_idx = atomicAdd(reinterpret_cast<unsigned long long int *>(num_edge_inserts), static_cast<unsigned long long int>(1)); // keep only the edges in the lower triangular part *(edge_buffer_first + edge_idx) = tag >= old ? thrust::make_tuple(tag, old) : thrust::make_tuple(old, tag); } return (old == invalid_component_id<vertex_t>::value) ? thrust::optional<vertex_t>{tag} : thrust::nullopt; }, reduce_op::null(), thrust::make_constant_iterator(0) /* dummy */, thrust::make_discard_iterator() /* dummy */, v_op_t<GraphViewType>{ vertex_partition, level_components, get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), num_edge_inserts.data(), static_cast<size_t>(Bucket::next), static_cast<size_t>(Bucket::conflict)}); if (GraphViewType::is_multi_gpu) { auto cur_num_edge_inserts = num_edge_inserts.value(handle.get_stream_view()); auto &conflict_bucket = vertex_frontier.get_bucket(static_cast<size_t>(Bucket::conflict)); resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, cur_num_edge_inserts + conflict_bucket.size(), handle.get_stream()); thrust::for_each( rmm::exec_policy(handle.get_stream_view()), conflict_bucket.begin(), conflict_bucket.end(), [vertex_partition, level_components, edge_buffer_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), num_edge_inserts = num_edge_inserts.data()] __device__(auto tagged_v) { auto v_offset = vertex_partition.get_local_vertex_offset_from_vertex_nocheck( thrust::get<0>(tagged_v)); auto old = *(level_components + v_offset); auto tag = thrust::get<1>(tagged_v); static_assert(sizeof(unsigned long long int) == sizeof(size_t)); auto edge_idx = atomicAdd(reinterpret_cast<unsigned long long int *>(num_edge_inserts), static_cast<unsigned long long int>(1)); // keep only the edges in the lower triangular part *(edge_buffer_first + edge_idx) = tag >= old ? thrust::make_tuple(tag, old) : thrust::make_tuple(old, tag); }); conflict_bucket.clear(); } // maintain the list of sorted unique edges (we can avoid this if we use cuco::static_map(no // duplicates, ideally we need static_set)). auto new_num_edge_inserts = num_edge_inserts.value(handle.get_stream_view()); if (new_num_edge_inserts > old_num_edge_inserts) { auto edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); thrust::sort(rmm::exec_policy(handle.get_stream_view()), edge_first + old_num_edge_inserts, edge_first + new_num_edge_inserts); if (old_num_edge_inserts > 0) { auto tmp_edge_buffer = allocate_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( new_num_edge_inserts, handle.get_stream()); auto tmp_edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(tmp_edge_buffer); thrust::merge(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_first + old_num_edge_inserts, edge_first + old_num_edge_inserts, edge_first + new_num_edge_inserts, tmp_edge_first); edge_buffer = std::move(tmp_edge_buffer); } edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); auto unique_edge_last = thrust::unique(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_first + new_num_edge_inserts); auto num_unique_edges = static_cast<size_t>(thrust::distance(edge_first, unique_edge_last)); num_edge_inserts.set_value(num_unique_edges, handle.get_stream_view()); } vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear(); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit(); vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur), static_cast<size_t>(Bucket::next)); edge_count = thrust::transform_reduce( rmm::exec_policy(handle.get_stream_view()), thrust::get<0>(vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .begin() .get_iterator_tuple()), thrust::get<0>( vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).end().get_iterator_tuple()), [vertex_partition, degrees = degrees.data()] __device__(auto v) { return degrees[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)]; }, edge_t{0}, thrust::plus<edge_t>()); ++iter; } // 2-5. construct the next level graph from the edges emitted on conflicts auto num_inserts = num_edge_inserts.value(handle.get_stream_view()); auto aggregate_num_inserts = num_inserts; if (GraphViewType::is_multi_gpu) { auto &comm = handle.get_comms(); aggregate_num_inserts = host_scalar_allreduce(comm, num_inserts, handle.get_stream()); } if (aggregate_num_inserts > 0) { resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, static_cast<size_t>(num_inserts * 2), handle.get_stream()); auto input_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); auto output_first = thrust::make_zip_iterator( thrust::make_tuple(thrust::get<1>(input_first.get_iterator_tuple()), thrust::get<0>(input_first.get_iterator_tuple()))) + num_inserts; thrust::copy(rmm::exec_policy(handle.get_stream_view()), input_first, input_first + num_inserts, output_first); if (GraphViewType::is_multi_gpu) { auto &comm = handle.get_comms(); auto const comm_size = comm.get_size(); auto &row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_size = row_comm.get_size(); auto &col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_size = col_comm.get_size(); std::tie(edge_buffer, std::ignore) = cugraph::experimental::groupby_gpuid_and_shuffle_values( comm, get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), get_dataframe_buffer_end<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), [key_func = cugraph::experimental::detail::compute_gpu_id_from_edge_t<vertex_t>{ comm_size, row_comm_size, col_comm_size}] __device__(auto val) { return key_func(thrust::get<0>(val), thrust::get<1>(val)); }, handle.get_stream()); auto edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); auto edge_last = get_dataframe_buffer_end<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); thrust::sort(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_last); auto unique_edge_last = thrust::unique(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_last); resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, static_cast<size_t>(thrust::distance(edge_first, unique_edge_last)), handle.get_stream()); shrink_to_fit_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>(edge_buffer, handle.get_stream()); } std::tie(level_graph, level_renumber_map) = create_graph_from_edgelist<vertex_t, edge_t, weight_t, GraphViewType::is_adj_matrix_transposed, GraphViewType::is_multi_gpu>( handle, std::nullopt, std::move(std::get<0>(edge_buffer)), std::move(std::get<1>(edge_buffer)), rmm::device_uvector<weight_t>(size_t{0}, handle.get_stream_view()), graph_properties_t{true, false, false}, true); } else { break; } } // 3. recursive update the current level component IDs from the next level component IDs for (size_t i = 0; i < num_levels - 1; ++i) { size_t next_level = num_levels - 1 - i; size_t current_level = next_level - 1; rmm::device_uvector<vertex_t> next_local_vertices(level_renumber_map_vectors[next_level].size(), handle.get_stream_view()); thrust::sequence(rmm::exec_policy(handle.get_stream_view()), next_local_vertices.begin(), next_local_vertices.end(), level_local_vertex_first_vectors[next_level]); relabel<vertex_t, GraphViewType::is_multi_gpu>( handle, std::make_tuple(next_local_vertices.data(), level_renumber_map_vectors[next_level].data()), next_local_vertices.size(), level_component_vectors[next_level].data(), level_component_vectors[next_level].size(), false); relabel<vertex_t, GraphViewType::is_multi_gpu>( handle, std::make_tuple(level_renumber_map_vectors[next_level].data(), level_component_vectors[next_level].data()), level_renumber_map_vectors[next_level].size(), current_level == 0 ? components : level_component_vectors[current_level].data(), current_level == 0 ? push_graph_view.get_number_of_local_vertices() : level_component_vectors[current_level].size(), true); } } } // namespace template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu> void weakly_connected_components( raft::handle_t const &handle, graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const &graph_view, vertex_t *components, bool do_expensive_check) { weakly_connected_components_impl(handle, graph_view, components, do_expensive_check); } // explicit instantiation template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, false> const &graph_view, int64_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, true> const &graph_view, int64_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, false> const &graph_view, int64_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, true> const &graph_view, int64_t *components, bool do_expensive_check); } // namespace experimental } // namespace cugraph
514389adb6afa0403fa53785c91032ff38af65c1.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cugraph/algorithms.hpp> #include <cugraph/experimental/detail/graph_utils.cuh> #include <cugraph/experimental/graph_functions.hpp> #include <cugraph/experimental/graph_view.hpp> #include <cugraph/patterns/copy_to_adj_matrix_row_col.cuh> #include <cugraph/patterns/update_frontier_v_push_if_out_nbr.cuh> #include <cugraph/patterns/vertex_frontier.cuh> #include <cugraph/utilities/device_comm.cuh> #include <cugraph/utilities/error.hpp> #include <cugraph/utilities/shuffle_comm.cuh> #include <cugraph/vertex_partition_device.cuh> #include <raft/handle.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/optional.h> #include <thrust/shuffle.h> #include <thrust/tuple.h> #include <algorithm> #include <limits> #include <random> #include <type_traits> #include <vector> namespace cugraph { namespace experimental { namespace { // FIXME: this function (after modification) may be useful for SSSP with the near-far method to // determine the near-far threshold. // add new roots till the sum of the degrees first becomes no smaller than degree_sum_threshold and // returns a triplet of (new roots, number of scanned candidates, sum of the degrees of the new // roots) template <typename GraphViewType> std::tuple<rmm::device_uvector<typename GraphViewType::vertex_type>, typename GraphViewType::vertex_type, typename GraphViewType::edge_type> accumulate_new_roots(raft::handle_t const &handle, vertex_partition_device_t<GraphViewType> vertex_partition, typename GraphViewType::vertex_type const *components, typename GraphViewType::edge_type const *degrees, typename GraphViewType::vertex_type const *candidate_first, typename GraphViewType::vertex_type const *candidate_last, typename GraphViewType::vertex_type max_new_roots, typename GraphViewType::edge_type degree_sum_threshold) { using vertex_t = typename GraphViewType::vertex_type; using edge_t = typename GraphViewType::edge_type; // tuning parameter (time to scan max_scan_size elements should not take significantly longer than // scanning a single element) vertex_t max_scan_size = static_cast<vertex_t>(handle.get_device_properties().multiProcessorCount) * vertex_t{16384}; rmm::device_uvector<vertex_t> new_roots(max_new_roots, handle.get_stream_view()); vertex_t num_new_roots{0}; vertex_t num_scanned{0}; edge_t degree_sum{0}; while ((candidate_first + num_scanned < candidate_last) && (degree_sum < degree_sum_threshold) && (num_new_roots < max_new_roots)) { auto scan_size = std::min( max_scan_size, static_cast<vertex_t>(thrust::distance(candidate_first + num_scanned, candidate_last))); rmm::device_uvector<vertex_t> tmp_new_roots(scan_size, handle.get_stream_view()); rmm::device_uvector<vertex_t> tmp_indices(tmp_new_roots.size(), handle.get_stream_view()); auto input_pair_first = thrust::make_zip_iterator(thrust::make_tuple( candidate_first + num_scanned, thrust::make_counting_iterator(vertex_t{0}))); auto output_pair_first = thrust::make_zip_iterator(thrust::make_tuple(tmp_new_roots.begin(), tmp_indices.begin())); tmp_new_roots.resize( static_cast<vertex_t>(thrust::distance( output_pair_first, thrust::copy_if( rmm::exec_policy(handle.get_stream_view()), input_pair_first, input_pair_first + scan_size, output_pair_first, [vertex_partition, components] __device__(auto pair) { auto v = thrust::get<0>(pair); return (components[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)] == invalid_component_id<vertex_t>::value); }))), handle.get_stream_view()); tmp_indices.resize(tmp_new_roots.size(), handle.get_stream_view()); if (tmp_new_roots.size() > 0) { rmm::device_uvector<edge_t> tmp_cumulative_degrees(tmp_new_roots.size(), handle.get_stream_view()); thrust::transform( rmm::exec_policy(handle.get_stream_view()), tmp_new_roots.begin(), tmp_new_roots.end(), tmp_cumulative_degrees.begin(), [vertex_partition, degrees] __device__(auto v) { return degrees[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)]; }); thrust::inclusive_scan(rmm::exec_policy(handle.get_stream_view()), tmp_cumulative_degrees.begin(), tmp_cumulative_degrees.end(), tmp_cumulative_degrees.begin()); auto last = thrust::lower_bound(rmm::exec_policy(handle.get_stream_view()), tmp_cumulative_degrees.begin(), tmp_cumulative_degrees.end(), degree_sum_threshold - degree_sum); if (last != tmp_cumulative_degrees.end()) { ++last; } auto tmp_num_new_roots = std::min(static_cast<vertex_t>(thrust::distance(tmp_cumulative_degrees.begin(), last)), max_new_roots - num_new_roots); thrust::copy(rmm::exec_policy(handle.get_stream_view()), tmp_new_roots.begin(), tmp_new_roots.begin() + tmp_num_new_roots, new_roots.begin() + num_new_roots); num_new_roots += tmp_num_new_roots; vertex_t tmp_num_scanned{0}; edge_t tmp_degree_sum{0}; if (tmp_num_new_roots == static_cast<vertex_t>(tmp_new_roots.size())) { tmp_num_scanned = scan_size; } else { raft::update_host( &tmp_num_scanned, tmp_indices.data() + tmp_num_new_roots, size_t{1}, handle.get_stream()); } raft::update_host(&tmp_degree_sum, tmp_cumulative_degrees.data() + (tmp_num_new_roots - 1), size_t{1}, handle.get_stream()); handle.get_stream_view().synchronize(); num_scanned += tmp_num_scanned; degree_sum += tmp_degree_sum; } else { num_scanned += scan_size; } } new_roots.resize(num_new_roots, handle.get_stream_view()); new_roots.shrink_to_fit(handle.get_stream_view()); return std::make_tuple(std::move(new_roots), num_scanned, degree_sum); } // FIXME: to silence the spurious warning (missing return statement ...) due to the nvcc bug // (https://stackoverflow.com/questions/64523302/cuda-missing-return-statement-at-end-of-non-void- // function-in-constexpr-if-fun) template <typename GraphViewType> struct v_op_t { using vertex_type = typename GraphViewType::vertex_type; vertex_partition_device_t<GraphViewType> vertex_partition{}; vertex_type *level_components{}; decltype(thrust::make_zip_iterator(thrust::make_tuple( static_cast<vertex_type *>(nullptr), static_cast<vertex_type *>(nullptr)))) edge_buffer_first{}; // FIXME: we can use cuda::atomic instead but currently on a system with x86 + GPU, this requires // placing the atomic barrier on managed memory and this adds additional complication. size_t *num_edge_inserts{}; size_t next_bucket_idx{}; size_t conflict_bucket_idx{}; // relevant only if GraphViewType::is_multi_gpu is true template <bool multi_gpu = GraphViewType::is_multi_gpu> __device__ std::enable_if_t<multi_gpu, thrust::optional<thrust::tuple<size_t, std::byte>>> operator()(thrust::tuple<vertex_type, vertex_type> tagged_v, int v_val /* dummy */) const { auto tag = thrust::get<1>(tagged_v); auto v_offset = vertex_partition.get_local_vertex_offset_from_vertex_nocheck(thrust::get<0>(tagged_v)); // FIXME: better switch to atomic_ref after // https://github.com/nvidia/libcudacxx/milestone/2 auto old = atomicCAS(level_components + v_offset, invalid_component_id<vertex_type>::value, tag); if (old != invalid_component_id<vertex_type>::value && old != tag) { // conflict return thrust::optional<thrust::tuple<size_t, std::byte>>{ thrust::make_tuple(conflict_bucket_idx, std::byte{0} /* dummy */)}; } else { return (old == invalid_component_id<vertex_type>::value) ? thrust::optional<thrust::tuple<size_t, std::byte>>{thrust::make_tuple( next_bucket_idx, std::byte{0} /* dummy */)} : thrust::nullopt; } } template <bool multi_gpu = GraphViewType::is_multi_gpu> __device__ std::enable_if_t<!multi_gpu, thrust::optional<thrust::tuple<size_t, std::byte>>> operator()(thrust::tuple<vertex_type, vertex_type> tagged_v, int v_val /* dummy */) const { return thrust::optional<thrust::tuple<size_t, std::byte>>{ thrust::make_tuple(next_bucket_idx, std::byte{0} /* dummy */)}; } }; template <typename GraphViewType> void weakly_connected_components_impl(raft::handle_t const &handle, GraphViewType const &push_graph_view, typename GraphViewType::vertex_type *components, bool do_expensive_check) { using vertex_t = typename GraphViewType::vertex_type; using edge_t = typename GraphViewType::edge_type; using weight_t = typename GraphViewType::weight_type; static_assert(std::is_integral<vertex_t>::value, "GraphViewType::vertex_type should be integral."); static_assert(!GraphViewType::is_adj_matrix_transposed, "GraphViewType should support the push model."); auto const num_vertices = push_graph_view.get_number_of_vertices(); if (num_vertices == 0) { return; } // 1. check input arguments CUGRAPH_EXPECTS( push_graph_view.is_symmetric(), "Invalid input argument: input graph should be symmetric for weakly connected components."); if (do_expensive_check) { // nothing to do } // 2. recursively run multi-root frontier expansion enum class Bucket { cur, next, conflict /* relevant only if GraphViewType::is_multi_gpu is true */, num_buckets }; // tuning parameter to balance work per iteration (should be large enough to be throughput // bounded) vs # conflicts between frontiers with different roots (# conflicts == # edges for the // next level) auto degree_sum_threshold = static_cast<edge_t>(handle.get_device_properties().multiProcessorCount) * edge_t{1024}; size_t num_levels{0}; graph_t<vertex_t, edge_t, typename GraphViewType::weight_type, GraphViewType::is_adj_matrix_transposed, GraphViewType::is_multi_gpu> level_graph(handle); rmm::device_uvector<vertex_t> level_renumber_map(0, handle.get_stream_view()); std::vector<rmm::device_uvector<vertex_t>> level_component_vectors{}; // vertex ID in this level to the component ID in the previous level std::vector<rmm::device_uvector<vertex_t>> level_renumber_map_vectors{}; std::vector<vertex_t> level_local_vertex_first_vectors{}; while (true) { auto level_graph_view = num_levels == 0 ? push_graph_view : level_graph.view(); vertex_partition_device_t<GraphViewType> vertex_partition(level_graph_view); level_component_vectors.push_back(rmm::device_uvector<vertex_t>( num_levels == 0 ? vertex_t{0} : level_graph_view.get_number_of_local_vertices(), handle.get_stream_view())); level_renumber_map_vectors.push_back(std::move(level_renumber_map)); level_local_vertex_first_vectors.push_back(level_graph_view.get_local_vertex_first()); auto level_components = num_levels == 0 ? components : level_component_vectors[num_levels].data(); ++num_levels; auto degrees = level_graph_view.compute_out_degrees(handle); // 2-1. filter out isolated vertices auto pair_first = thrust::make_zip_iterator(thrust::make_tuple( thrust::make_counting_iterator(level_graph_view.get_local_vertex_first()), degrees.begin())); thrust::transform(rmm::exec_policy(handle.get_stream_view()), pair_first, pair_first + level_graph_view.get_number_of_local_vertices(), level_components, [] __device__(auto pair) { auto v = thrust::get<0>(pair); auto degree = thrust::get<1>(pair); return degree > 0 ? invalid_component_id<vertex_t>::value : v; }); // 2-2. initialize new root candidates // Vertices are first partitioned to high-degree vertices and low-degree vertices, we can reach // degree_sum_threshold with fewer high-degree vertices leading to a higher compression ratio. // The degree threshold is set to ceil(sqrt(degree_sum_threshold * 2)); this guarantees the // compression ratio of at least 50% (ignoring rounding errors) even if all the selected roots // fall into a single connected component as there will be at least as many non-root vertices in // the connected component (assuming there are no multi-edges, if there are multi-edges, we may // not get 50% compression in # vertices but still get compression in # edges). the remaining // low-degree vertices will be randomly shuffled so comparable ratios of vertices will be // selected as roots in the remaining connected components. rmm::device_uvector<vertex_t> new_root_candidates( level_graph_view.get_number_of_local_vertices(), handle.get_stream_view()); new_root_candidates.resize( thrust::distance( new_root_candidates.begin(), thrust::copy_if( rmm::exec_policy(handle.get_stream_view()), thrust::make_counting_iterator(level_graph_view.get_local_vertex_first()), thrust::make_counting_iterator(level_graph_view.get_local_vertex_last()), new_root_candidates.begin(), [vertex_partition, level_components] __device__(auto v) { return level_components[vertex_partition.get_local_vertex_offset_from_vertex_nocheck( v)] == invalid_component_id<vertex_t>::value; })), handle.get_stream_view()); auto high_degree_partition_last = thrust::stable_partition( rmm::exec_policy(handle.get_stream_view()), new_root_candidates.begin(), new_root_candidates.end(), [vertex_partition, degrees = degrees.data(), threshold = static_cast<edge_t>( ceil(sqrt(static_cast<double>(degree_sum_threshold) * 2.0)))] __device__(auto v) { return degrees[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)] >= threshold; }); thrust::shuffle(rmm::exec_policy(handle.get_stream_view()), high_degree_partition_last, new_root_candidates.end(), thrust::default_random_engine()); double constexpr max_new_roots_ratio = 0.05; // to avoid selecting all the vertices as roots leading to zero compression static_assert(max_new_roots_ratio > 0.0); auto max_new_roots = std::max( static_cast<vertex_t>(new_root_candidates.size() * max_new_roots_ratio), vertex_t{1}); auto init_max_new_roots = max_new_roots; // to avoid selecting too many (possibly all) vertices as initial roots leading to no // compression in the worst case. if (GraphViewType::is_multi_gpu && (level_graph_view.get_number_of_vertices() <= static_cast<vertex_t>(handle.get_comms().get_size() * ceil(1.0 / max_new_roots_ratio)))) { auto &comm = handle.get_comms(); auto const comm_rank = comm.get_rank(); auto const comm_size = comm.get_size(); // FIXME: a temporary workaround for a NCCL(2.9.6) bug that causes a hang on DGX1 (due to // remote memory allocation), host_scalar_gather is sufficient otherwise. #if 1 auto new_root_candidate_counts = host_scalar_allgather(comm, new_root_candidates.size(), handle.get_stream()); #else auto new_root_candidate_counts = host_scalar_gather(comm, new_root_candidates.size(), int{0}, handle.get_stream()); #endif if (comm_rank == 0) { std::vector<int> gpuids{}; gpuids.reserve( std::reduce(new_root_candidate_counts.begin(), new_root_candidate_counts.end())); for (size_t i = 0; i < new_root_candidate_counts.size(); ++i) { gpuids.insert(gpuids.end(), new_root_candidate_counts[i], static_cast<int>(i)); } std::random_device rd{}; std::shuffle(gpuids.begin(), gpuids.end(), std::mt19937(rd())); gpuids.resize( std::max(static_cast<vertex_t>(gpuids.size() * max_new_roots_ratio), vertex_t{1})); std::vector<vertex_t> init_max_new_root_counts(comm_size, vertex_t{0}); for (size_t i = 0; i < gpuids.size(); ++i) { ++init_max_new_root_counts[gpuids[i]]; } // FIXME: we need to add host_scalar_scatter #if 1 rmm::device_uvector<vertex_t> d_counts(comm_size, handle.get_stream_view()); raft::update_device(d_counts.data(), init_max_new_root_counts.data(), init_max_new_root_counts.size(), handle.get_stream()); device_bcast( comm, d_counts.data(), d_counts.data(), d_counts.size(), int{0}, handle.get_stream()); raft::update_host( &init_max_new_roots, d_counts.data() + comm_rank, size_t{1}, handle.get_stream()); #else iinit_max_new_roots = host_scalar_scatter(comm, init_max_new_root_counts.data(), int{0}, handle.get_stream()); #endif } else { // FIXME: we need to add host_scalar_scatter #if 1 rmm::device_uvector<vertex_t> d_counts(comm_size, handle.get_stream_view()); device_bcast( comm, d_counts.data(), d_counts.data(), d_counts.size(), int{0}, handle.get_stream()); raft::update_host( &init_max_new_roots, d_counts.data() + comm_rank, size_t{1}, handle.get_stream()); #else iinit_max_new_roots = host_scalar_scatter(comm, init_max_new_root_counts.data(), int{0}, handle.get_stream()); #endif } handle.get_stream_view().synchronize(); } // 2-3. initialize vertex frontier, edge_buffer, and col_components (if multi-gpu) VertexFrontier<vertex_t, vertex_t, GraphViewType::is_multi_gpu, static_cast<size_t>(Bucket::num_buckets)> vertex_frontier(handle); vertex_t next_candidate_offset{0}; edge_t edge_count{0}; auto edge_buffer = allocate_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>(0, handle.get_stream()); // FIXME: we can use cuda::atomic instead but currently on a system with x86 + GPU, this // requires placing the atomic variable on managed memory and this make it less attractive. rmm::device_scalar<size_t> num_edge_inserts(size_t{0}, handle.get_stream_view()); rmm::device_uvector<vertex_t> col_components( GraphViewType::is_multi_gpu ? level_graph_view.get_number_of_local_adj_matrix_partition_cols() : vertex_t{0}, handle.get_stream_view()); if (GraphViewType::is_multi_gpu) { thrust::fill(rmm::exec_policy(handle.get_stream_view()), col_components.begin(), col_components.end(), invalid_component_id<vertex_t>::value); } // 2.4 iterate till every vertex gets visited size_t iter{0}; while (true) { if ((edge_count < degree_sum_threshold) && (next_candidate_offset < static_cast<vertex_t>(new_root_candidates.size()))) { auto [new_roots, num_scanned, degree_sum] = accumulate_new_roots(handle, vertex_partition, level_components, degrees.data(), new_root_candidates.data() + next_candidate_offset, new_root_candidates.data() + new_root_candidates.size(), iter == 0 ? init_max_new_roots : max_new_roots, degree_sum_threshold - edge_count); next_candidate_offset += num_scanned; edge_count += degree_sum; thrust::sort( rmm::exec_policy(handle.get_stream_view()), new_roots.begin(), new_roots.end()); thrust::for_each( rmm::exec_policy(handle.get_stream_view()), new_roots.begin(), new_roots.end(), [vertex_partition, components = level_components] __device__(auto c) { components[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(c)] = c; }); auto pair_first = thrust::make_zip_iterator(thrust::make_tuple(new_roots.begin(), new_roots.begin())); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .insert(pair_first, pair_first + new_roots.size()); } if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).aggregate_size() == 0) { break; } if (GraphViewType::is_multi_gpu) { copy_to_adj_matrix_col( handle, level_graph_view, thrust::get<0>(vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .begin() .get_iterator_tuple()), thrust::get<0>(vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .end() .get_iterator_tuple()), level_components, col_components.begin()); } auto max_pushes = GraphViewType::is_multi_gpu ? compute_num_out_nbrs_from_frontier( handle, level_graph_view, vertex_frontier, static_cast<size_t>(Bucket::cur)) : edge_count; // FIXME: if we use cuco::static_map (no duplicates, ideally we need static_set), edge_buffer // size cannot exceed (# roots)^2 and we can avoid additional sort & unique (but resizing the // buffer may be more expensive). auto old_num_edge_inserts = num_edge_inserts.value(handle.get_stream_view()); resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, old_num_edge_inserts + max_pushes, handle.get_stream()); update_frontier_v_push_if_out_nbr( handle, level_graph_view, vertex_frontier, static_cast<size_t>(Bucket::cur), std::vector<size_t>{static_cast<size_t>(Bucket::next)}, thrust::make_counting_iterator(0) /* dummy */, thrust::make_counting_iterator(0) /* dummy */, [col_components = GraphViewType::is_multi_gpu ? col_components.data() : level_components, col_first = level_graph_view.get_local_adj_matrix_partition_col_first(), edge_buffer_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), num_edge_inserts = num_edge_inserts.data()] __device__(auto tagged_src, vertex_t dst, auto src_val, auto dst_val) { auto tag = thrust::get<1>(tagged_src); auto col_offset = dst - col_first; // FIXME: better switch to atomic_ref after // https://github.com/nvidia/libcudacxx/milestone/2 auto old = atomicCAS(col_components + col_offset, invalid_component_id<vertex_t>::value, tag); if (old != invalid_component_id<vertex_t>::value && old != tag) { // conflict static_assert(sizeof(unsigned long long int) == sizeof(size_t)); auto edge_idx = atomicAdd(reinterpret_cast<unsigned long long int *>(num_edge_inserts), static_cast<unsigned long long int>(1)); // keep only the edges in the lower triangular part *(edge_buffer_first + edge_idx) = tag >= old ? thrust::make_tuple(tag, old) : thrust::make_tuple(old, tag); } return (old == invalid_component_id<vertex_t>::value) ? thrust::optional<vertex_t>{tag} : thrust::nullopt; }, reduce_op::null(), thrust::make_constant_iterator(0) /* dummy */, thrust::make_discard_iterator() /* dummy */, v_op_t<GraphViewType>{ vertex_partition, level_components, get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), num_edge_inserts.data(), static_cast<size_t>(Bucket::next), static_cast<size_t>(Bucket::conflict)}); if (GraphViewType::is_multi_gpu) { auto cur_num_edge_inserts = num_edge_inserts.value(handle.get_stream_view()); auto &conflict_bucket = vertex_frontier.get_bucket(static_cast<size_t>(Bucket::conflict)); resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, cur_num_edge_inserts + conflict_bucket.size(), handle.get_stream()); thrust::for_each( rmm::exec_policy(handle.get_stream_view()), conflict_bucket.begin(), conflict_bucket.end(), [vertex_partition, level_components, edge_buffer_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), num_edge_inserts = num_edge_inserts.data()] __device__(auto tagged_v) { auto v_offset = vertex_partition.get_local_vertex_offset_from_vertex_nocheck( thrust::get<0>(tagged_v)); auto old = *(level_components + v_offset); auto tag = thrust::get<1>(tagged_v); static_assert(sizeof(unsigned long long int) == sizeof(size_t)); auto edge_idx = atomicAdd(reinterpret_cast<unsigned long long int *>(num_edge_inserts), static_cast<unsigned long long int>(1)); // keep only the edges in the lower triangular part *(edge_buffer_first + edge_idx) = tag >= old ? thrust::make_tuple(tag, old) : thrust::make_tuple(old, tag); }); conflict_bucket.clear(); } // maintain the list of sorted unique edges (we can avoid this if we use cuco::static_map(no // duplicates, ideally we need static_set)). auto new_num_edge_inserts = num_edge_inserts.value(handle.get_stream_view()); if (new_num_edge_inserts > old_num_edge_inserts) { auto edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); thrust::sort(rmm::exec_policy(handle.get_stream_view()), edge_first + old_num_edge_inserts, edge_first + new_num_edge_inserts); if (old_num_edge_inserts > 0) { auto tmp_edge_buffer = allocate_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( new_num_edge_inserts, handle.get_stream()); auto tmp_edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(tmp_edge_buffer); thrust::merge(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_first + old_num_edge_inserts, edge_first + old_num_edge_inserts, edge_first + new_num_edge_inserts, tmp_edge_first); edge_buffer = std::move(tmp_edge_buffer); } edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); auto unique_edge_last = thrust::unique(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_first + new_num_edge_inserts); auto num_unique_edges = static_cast<size_t>(thrust::distance(edge_first, unique_edge_last)); num_edge_inserts.set_value(num_unique_edges, handle.get_stream_view()); } vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).clear(); vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).shrink_to_fit(); vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur), static_cast<size_t>(Bucket::next)); edge_count = thrust::transform_reduce( rmm::exec_policy(handle.get_stream_view()), thrust::get<0>(vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)) .begin() .get_iterator_tuple()), thrust::get<0>( vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur)).end().get_iterator_tuple()), [vertex_partition, degrees = degrees.data()] __device__(auto v) { return degrees[vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v)]; }, edge_t{0}, thrust::plus<edge_t>()); ++iter; } // 2-5. construct the next level graph from the edges emitted on conflicts auto num_inserts = num_edge_inserts.value(handle.get_stream_view()); auto aggregate_num_inserts = num_inserts; if (GraphViewType::is_multi_gpu) { auto &comm = handle.get_comms(); aggregate_num_inserts = host_scalar_allreduce(comm, num_inserts, handle.get_stream()); } if (aggregate_num_inserts > 0) { resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, static_cast<size_t>(num_inserts * 2), handle.get_stream()); auto input_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); auto output_first = thrust::make_zip_iterator( thrust::make_tuple(thrust::get<1>(input_first.get_iterator_tuple()), thrust::get<0>(input_first.get_iterator_tuple()))) + num_inserts; thrust::copy(rmm::exec_policy(handle.get_stream_view()), input_first, input_first + num_inserts, output_first); if (GraphViewType::is_multi_gpu) { auto &comm = handle.get_comms(); auto const comm_size = comm.get_size(); auto &row_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().row_name()); auto const row_comm_size = row_comm.get_size(); auto &col_comm = handle.get_subcomm(cugraph::partition_2d::key_naming_t().col_name()); auto const col_comm_size = col_comm.get_size(); std::tie(edge_buffer, std::ignore) = cugraph::experimental::groupby_gpuid_and_shuffle_values( comm, get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), get_dataframe_buffer_end<thrust::tuple<vertex_t, vertex_t>>(edge_buffer), [key_func = cugraph::experimental::detail::compute_gpu_id_from_edge_t<vertex_t>{ comm_size, row_comm_size, col_comm_size}] __device__(auto val) { return key_func(thrust::get<0>(val), thrust::get<1>(val)); }, handle.get_stream()); auto edge_first = get_dataframe_buffer_begin<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); auto edge_last = get_dataframe_buffer_end<thrust::tuple<vertex_t, vertex_t>>(edge_buffer); thrust::sort(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_last); auto unique_edge_last = thrust::unique(rmm::exec_policy(handle.get_stream_view()), edge_first, edge_last); resize_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>( edge_buffer, static_cast<size_t>(thrust::distance(edge_first, unique_edge_last)), handle.get_stream()); shrink_to_fit_dataframe_buffer<thrust::tuple<vertex_t, vertex_t>>(edge_buffer, handle.get_stream()); } std::tie(level_graph, level_renumber_map) = create_graph_from_edgelist<vertex_t, edge_t, weight_t, GraphViewType::is_adj_matrix_transposed, GraphViewType::is_multi_gpu>( handle, std::nullopt, std::move(std::get<0>(edge_buffer)), std::move(std::get<1>(edge_buffer)), rmm::device_uvector<weight_t>(size_t{0}, handle.get_stream_view()), graph_properties_t{true, false, false}, true); } else { break; } } // 3. recursive update the current level component IDs from the next level component IDs for (size_t i = 0; i < num_levels - 1; ++i) { size_t next_level = num_levels - 1 - i; size_t current_level = next_level - 1; rmm::device_uvector<vertex_t> next_local_vertices(level_renumber_map_vectors[next_level].size(), handle.get_stream_view()); thrust::sequence(rmm::exec_policy(handle.get_stream_view()), next_local_vertices.begin(), next_local_vertices.end(), level_local_vertex_first_vectors[next_level]); relabel<vertex_t, GraphViewType::is_multi_gpu>( handle, std::make_tuple(next_local_vertices.data(), level_renumber_map_vectors[next_level].data()), next_local_vertices.size(), level_component_vectors[next_level].data(), level_component_vectors[next_level].size(), false); relabel<vertex_t, GraphViewType::is_multi_gpu>( handle, std::make_tuple(level_renumber_map_vectors[next_level].data(), level_component_vectors[next_level].data()), level_renumber_map_vectors[next_level].size(), current_level == 0 ? components : level_component_vectors[current_level].data(), current_level == 0 ? push_graph_view.get_number_of_local_vertices() : level_component_vectors[current_level].size(), true); } } } // namespace template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu> void weakly_connected_components( raft::handle_t const &handle, graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const &graph_view, vertex_t *components, bool do_expensive_check) { weakly_connected_components_impl(handle, graph_view, components, do_expensive_check); } // explicit instantiation template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, float, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int32_t, double, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, float, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, false> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int32_t, int64_t, double, false, true> const &graph_view, int32_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, false> const &graph_view, int64_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, float, false, true> const &graph_view, int64_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, false> const &graph_view, int64_t *components, bool do_expensive_check); template void weakly_connected_components( raft::handle_t const &handle, graph_view_t<int64_t, int64_t, double, false, true> const &graph_view, int64_t *components, bool do_expensive_check); } // namespace experimental } // namespace cugraph
a471ac2756a014899693afd55e1c0fd9056f02b6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #define dimSize 64 #define MAXLINE 200 #define MAXCAD 200 /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ void cleanString(char *cadena, char *out) { int i,j; for( i = j = 0; cadena[i] != 0;++i) { if(isalnum(cadena[i])||cadena[i]=='{'||cadena[i]=='.'||cadena[i]==',') { out[j]=cadena[i]; j++; } } for( i = j; out[i] != 0;++i) out[j]=0; } /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ int readHeader1(char* filename, int *lines, int *samples, int *bands, int *dataType, char* interleave, int *byteOrder, char* waveUnit) { FILE *fp; char line[MAXLINE] = ""; char value [MAXLINE] = ""; if ((fp=fopen(filename,"rt"))!=NULL) { fseek(fp,0L,SEEK_SET); while(fgets(line, MAXLINE, fp)!='\0') { //samples if(strstr(line, "samples")!=NULL && samples !=NULL) { cleanString(strstr(line, "="),value); *samples = atoi(value); } //lines if(strstr(line, "lines")!=NULL && lines !=NULL) { cleanString(strstr(line, "="),value); *lines = atoi(value); } //Bands if(strstr(line, "bands")!=NULL && bands !=NULL) { cleanString(strstr(line, "="),value); *bands = atoi(value); } //Interleave if(strstr(line, "interleave")!=NULL && interleave !=NULL) { cleanString(strstr(line, "="),value); strcpy(interleave,value); } //Data Type if(strstr(line, "data type")!=NULL && dataType !=NULL) { cleanString(strstr(line, "="),value); *dataType = atoi(value); } //Byte Order if(strstr(line, "byte order")!=NULL && byteOrder !=NULL) { cleanString(strstr(line, "="),value); *byteOrder = atoi(value); } //Wavelength Unit if(strstr(line, "wavelength unit")!=NULL && waveUnit !=NULL) { cleanString(strstr(line, "="),value); strcpy(waveUnit,value); } } fclose(fp); return 0; } else return -2; //No file found } /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ int readHeader2(char* filename, double* wavelength) { FILE *fp; char line[MAXLINE] = ""; char value [MAXLINE] = ""; if ((fp=fopen(filename,"rt"))!=NULL) { fseek(fp,0L,SEEK_SET); while(fgets(line, MAXLINE, fp)!='\0') { //Wavelength if(strstr(line, "wavelength =")!=NULL && wavelength !=NULL) { char strAll[100000]=" "; char *pch; int cont = 0; do { fgets(line, 200, fp); cleanString(line,value); strcat(strAll,value); } while(strstr(line, "}")==NULL); pch = strtok(strAll,","); while (pch != NULL) { wavelength[cont]= atof(pch); pch = strtok (NULL, ","); cont++; } } } fclose(fp); return 0; } else return -2; //No file found } /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ int loadImage(char* filename, double* image, int lines, int samples, int bands, int dataType, char* interleave) { FILE *fp; short int *tipo_short_int; float *tipo_float; double * tipo_double; unsigned int *tipo_uint; int i, j, k, op; long int lines_samples = lines*samples; if ((fp=fopen(filename,"rb"))!=NULL) { fseek(fp,0L,SEEK_SET); tipo_float = (float*)malloc(lines_samples*bands*sizeof(float)); switch(dataType) { case 2: tipo_short_int = (short int*)malloc(lines_samples*bands*sizeof(short int)); fread(tipo_short_int,1,(sizeof(short int)*lines_samples*bands),fp); for(i=0; i<lines_samples * bands; i++) tipo_float[i]=(float)tipo_short_int[i]; free(tipo_short_int); break; case 4: fread(tipo_float,1,(sizeof(float)*lines_samples*bands),fp); break; case 5: tipo_double = (double*)malloc(lines_samples*bands*sizeof(double)); fread(tipo_double,1,(sizeof(double)*lines_samples*bands),fp); for(i=0; i<lines_samples * bands; i++) tipo_float[i]=(float)tipo_double[i]; free(tipo_double); break; case 12: tipo_uint = (unsigned int*)malloc(lines_samples*bands*sizeof(unsigned int)); fread(tipo_uint,1,(sizeof(unsigned int)*lines_samples*bands),fp); for(i=0; i<lines_samples * bands; i++) tipo_float[i]=(float)tipo_uint[i]; free(tipo_uint); break; } fclose(fp); if(interleave == NULL) op = 0; else { if(strcmp(interleave, "bsq") == 0) op = 0; if(strcmp(interleave, "bip") == 0) op = 1; if(strcmp(interleave, "bil") == 0) op = 2; } switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) image[i] = tipo_float[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) image[i*lines*samples + j] = tipo_float[j*bands + i]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) image[j*lines*samples + (i*samples+k)] = tipo_float[k+samples*(i*bands+j)]; break; } free(tipo_float); return 0; } return -2; } /* * Author: Luis Ignacio Jimenez * Centre: Universidad de Extremadura * */ int writeResult(double *image, const char* filename, int lines, int samples, int bands, int dataType, char* interleave) { short int *imageSI; float *imageF; double *imageD; int i,j,k,op; if(interleave == NULL) op = 0; else { if(strcmp(interleave, "bsq") == 0) op = 0; if(strcmp(interleave, "bip") == 0) op = 1; if(strcmp(interleave, "bil") == 0) op = 2; } if(dataType == 2) { imageSI = (short int*)malloc(lines*samples*bands*sizeof(short int)); switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) imageSI[i] = (short int)image[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) imageSI[j*bands + i] = (short int)image[i*lines*samples + j]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) imageSI[i*bands*samples + (j*samples + k)] = (short int)image[j*lines*samples + (i*samples + k)]; break; } } if(dataType == 4) { imageF = (float*)malloc(lines*samples*bands*sizeof(float)); switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) imageF[i] = (float)image[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) imageF[j*bands + i] = (float)image[i*lines*samples + j]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) imageF[i*bands*samples + (j*samples + k)] = (float)image[j*lines*samples + (i*samples + k)]; break; } } if(dataType == 5) { imageD = (double*)malloc(lines*samples*bands*sizeof(double)); switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) imageD[i] = image[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) imageD[j*bands + i] = image[i*lines*samples + j]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) imageD[i*bands*samples + (j*samples + k)] = image[j*lines*samples + (i*samples + k)]; break; } } FILE *fp; if ((fp=fopen(filename,"wb"))!=NULL) { fseek(fp,0L,SEEK_SET); switch(dataType) { case 2: fwrite(imageSI,1,(lines*samples*bands * sizeof(short int)),fp); free(imageSI); break; case 4: fwrite(imageF,1,(lines*samples*bands * sizeof(float)),fp); free(imageF); break; case 5: fwrite(imageD,1,(lines*samples*bands * sizeof(double)),fp); free(imageD); break; } fclose(fp); return 0; } return -3; } /* * Author: Luis Ignacio Jimenez * Centre: Universidad de Extremadura * */ int writeHeader(char* filename, int lines, int samples, int bands, int dataType, char* interleave, int byteOrder, char* waveUnit, double* wavelength) { FILE *fp; if ((fp=fopen(filename,"wt"))!=NULL) { fseek(fp,0L,SEEK_SET); fprintf(fp,"ENVI\ndescription = {\nExported from MATLAB}\n"); if(samples != 0) fprintf(fp,"samples = %d", samples); if(lines != 0) fprintf(fp,"\nlines = %d", lines); if(bands != 0) fprintf(fp,"\nbands = %d", bands); if(dataType != 0) fprintf(fp,"\ndata type = %d", dataType); if(interleave != NULL) fprintf(fp,"\ninterleave = %s", interleave); if(byteOrder != 0) fprintf(fp,"\nbyte order = %d", byteOrder); if(waveUnit != NULL) fprintf(fp,"\nwavelength units = %s", waveUnit); if(waveUnit != NULL) { fprintf(fp,"\nwavelength = {\n"); for(int i=0; i<bands; i++) { if(i==0) fprintf(fp, "%f", wavelength[i]); else if(i%3 == 0) fprintf(fp, ", %f\n", wavelength[i]); else fprintf(fp, ", %f", wavelength[i]); } fprintf(fp,"}"); } fclose(fp); return 0; } return -3; } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ double calculateC(int d){ double C=0.0; // Constant = sum of the square distance between central pixel and the rest of the pixel in the window int i,j; double aux; for (i = 0; i <= d; i++){ // recorremos el cuadrante for ( j = 1; j <= d; j++){ aux=(double)(j*j + i*i); C = C + (1/aux); // sumamos la distancia al cuadrado es decir i^2 + j^2 } } C = C * 4; // Se muliplica por 4 porque hay 4 cuadrantes simetricos. return C; } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ void calculateI(double *image, double *I, int samples, int lines, int bands){ int i,j,z; int LS=samples*lines; for (z=0; z< bands; z++){ I[z] = 0.0; for(i=0; i<samples; i++){ for(j=0; j<lines; j++){ I[z] = I[z] + image[i*samples + j + samples*lines*z]; } } I[z]=(I[z]/(LS)); } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ void calculateB(double *B, int d, double C, int ws){ int r,s; for (r = -d ; r <= d; r++){ // recorremos la matriz B for (s = -d ; s <= d; s++){ if( (s == 0) && (r == 0)) { // si estamos en el pixel central le ponemos 0 B[d*ws+d]=0; } else{ // sino calculamos B[r,s] (B[r+d,s+d] porque c++ no permite indices negativos en las matrices. double aux1=(r*r + s*s); aux1=1/aux1; double aux2=1/C; B[(r+d)*(ws)+s+d] = (aux2) * (aux1) ; } } } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ __global__ void Get_EucNorm(double *image,double * imageA, int samples, int lines, int bands, int ws, double C, int d, double *B){ int x= threadIdx.x + blockIdx.x * blockDim.x; extern __shared__ double sdata[]; double EucNormA=0; long int vectA=0; unsigned int i; if ((x >= 0) && (x < (samples*lines))){ // si caen dentro de la imagen // si caen dentro de la imagen for (i=0; i< bands; i++){ vectA = image[x + (samples)*(lines)*i];//central EucNormA = EucNormA + (vectA*vectA); } imageA[x] = EucNormA; } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ __global__ void Get_P(double *image,double * imageP,double * imageA, int samples, int lines, int bands, int ws, double C, int d, double *B, int blockX, int blockY, int valor){ int x=gridDim.x*blockX + blockIdx.x + threadIdx.x -d; int y=gridDim.y*blockY + blockIdx.y + threadIdx.y -d ; int z=gridDim.x*blockX + blockIdx.x; int zz=gridDim.y*blockY + blockIdx.y; int relativeX=threadIdx.x; int relativeY=threadIdx.y; int pos=relativeY*(ws) + relativeX; extern __shared__ double sdata[]; double G=0.0; long int vectA=0; double vectB=0.0; double dotPro=0.0; unsigned int i; double cose; double aux=0; if ((z >= 0) && (z < (samples)) && (zz >= 0) && (zz < (lines))){ // si caen dentro de la imagen sdata[pos]= 0; if((pos + (valor/2) ) < valor){ int vari=(valor/2); sdata[(pos+vari)]=0; } //if ((x >= 0) && (x < (samples)) && (y >= 0) && (y < (lines))){ // si caen dentro de la imagen if(x < 0) x = x + d +1; if(y < 0) y = y + d +1; if(x >= samples) x = x - d -1; if(y >= lines) y = y - d -1; for (i=0; i< bands; i++){ vectA = image[zz*(samples) + z + (samples)*(lines)*i];//central vectB = image[y*(samples) + x + (samples)*(lines)*i]; dotPro = dotPro + (vectA*vectB); __syncthreads(); } aux=aux + (imageA[zz*(samples) + z] * imageA[y*(samples) + x]); cose = (dotPro/aux); if (cose >= 1) G=0.0; else{ G = acos(cose ); if (G<0) G=G*(-1); } sdata[pos] =B[pos]*G; __syncthreads(); for (i=((valor)/2); i>0; i=i/2) { if((pos < i) && ( (pos + i) < ws*ws) ) { sdata[pos] += sdata[pos + i]; } __syncthreads(); } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)){ imageP[zz*(samples) + z] = sdata[0]; }else imageP[zz*(samples) + z] = 0; //} } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ __global__ void Get_Preprocessing(double *image, double *imageP, double *imageOut, int samples, int lines, int bands, double *I){ int x= threadIdx.x + blockIdx.x * blockDim.x; int n= 0; extern __shared__ double fdata[]; if ( threadIdx.x < bands){ // si caen dentro de la imagen fdata[threadIdx.x]=I[threadIdx.x]; __syncthreads(); } if ( x < (samples)*(lines)){ // si caen dentro de la imagen while(n<(bands)){ if((1/imageP[x]) >= 0) imageOut[(x + (samples)*(lines)*n)] = ((1/imageP[x]) * ( image[x + ((samples)*(lines)*n)] - fdata[n]) + fdata[n]); else imageOut[(x + (samples)*(lines)*n)] = image[x + ((samples)*(lines)*n)]; n++; } } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ double dot_product(double* a, double* b, int n){ double result=0.0; int i; for(i=0;i<n;i++) result=result + a[i]*b[i]; return result; } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ int main (int argc, char* argv[]){ /* * ARGUMENTS * * argv[1]: Input image filename * argv[2]: Window size * argv[3]: Output image filename * */ if(argc > 4 || argc < 4) { printf("EXECUTION ERROR SPP Parallel: Parameters are not correct."); printf("./SPP [Image Filename] [Window size] [Output Result File]"); fflush(stdout); exit(-1); } int ws = atoi(argv[2]); // window size if((ws%2)==0) ws=ws-1; char header_filename[MAXLINE]; double* image; double* imageOut; strcpy(header_filename, argv[1]); strcat(header_filename, ".hdr"); int lines = 0, samples= 0, bands= 0, dataType= 0, byteOrder = 0; char *interleave, *waveUnit; interleave = (char*)malloc(MAXCAD*sizeof(char)); waveUnit = (char*)malloc(MAXCAD*sizeof(char)); // Load image int error = readHeader1(header_filename, &lines, &samples, &bands, &dataType, interleave, &byteOrder, waveUnit); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error reading header file: %s.", header_filename); fflush(stdout); exit(-1); } double* wavelength = (double*)malloc(bands*sizeof(double)); strcpy(header_filename,argv[1]); // Second parameter: Header file: strcat(header_filename,".hdr"); error = readHeader2(header_filename, wavelength); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error reading header file: %s.", header_filename); fflush(stdout); exit(-1); } image = (double*)malloc ( samples*lines * bands*sizeof(double) ); imageOut = (double*)malloc ( samples*lines * bands*sizeof(double) ); error = loadImage(argv[1], image, samples, lines, bands, dataType, interleave); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error image header file: %s.", argv[1]); fflush(stdout); return error; } //START CLOCK*************************************** clock_t start, end; start = clock(); //************************************************** //2 STEP: PREPROCESSING double *I = (double*)malloc(bands*sizeof(double)); int d = (ws-1) / 2; double *B = (double*)malloc(ws*ws*sizeof(double)); double *imageA = (double*)malloc ( samples*lines * bands*sizeof(double) ); double *imageP = (double*)malloc ( samples*lines * bands*sizeof(double) ); float C=0.0; C=calculateC(d); calculateI(image, I, samples, lines, bands); calculateB(B, d, C, ws); dim3 Blocks((dimSize),(dimSize),1); dim3 Threads((ws),(ws),1); // GETP KERNELL double *dev_image;// device copies of image, samples, lines, ws, C, double *dev_B; double *dev_imageP; double *dev_imageA; // GETP KERNELL: allocate device copies hipMalloc( (void**)&dev_image, (samples)*(lines)*(bands)*sizeof(double)); hipMalloc( (void**)&dev_B, (ws)*(ws)*sizeof(double)); hipMalloc( (void**)&dev_imageP, (samples)*(lines)*sizeof(double) ); hipMalloc( (void**)&dev_imageA, (samples)*(lines)*sizeof(double) ); // GETP KERNELL: copy inputs to device hipMemcpy( dev_image, image, (samples)*(lines)*(bands)*sizeof(double), hipMemcpyHostToDevice ); hipMemcpy( dev_B, B, (ws)*(ws)*sizeof(double), hipMemcpyHostToDevice ); // GETP KERNELL // int x,y; int valor=1; while(valor<(ws*ws)){ valor=valor*2; } int blocks= (samples*lines)/1024 +1; dim3 Blocks12(blocks); dim3 Threads12(1024); hipLaunchKernelGGL(( Get_EucNorm), dim3(Blocks12),dim3(1024), 0, 0, dev_image,dev_imageA,samples,lines,bands,ws,C,d,dev_B); hipMemcpy( (void*)imageA, dev_imageA, (samples)*(lines)*sizeof( double ) , hipMemcpyDeviceToHost ); int s,o; for(s=0;s<lines;s++){ for(o=0;o<samples;o++){ imageA[s*samples + o]=sqrtf(imageA[s*samples + o]); } } hipMemcpy( dev_imageA, imageA, (samples)*(lines)*sizeof(double), hipMemcpyHostToDevice ); int x,y; for(y=0;y<(floor(lines/dimSize) + 1 );y++){ for(x=0;x<(floor(samples/dimSize) + 1);x++){ hipLaunchKernelGGL(( Get_P), dim3(Blocks),dim3(Threads), valor*sizeof(double), 0, dev_image,dev_imageP, dev_imageA,samples,lines,bands,ws,C,d,dev_B,x,y,valor); } } hipMemcpy( (void*)imageP, dev_imageP, (samples)*(lines)*sizeof( double ) , hipMemcpyDeviceToHost ); for(s=0;s<lines;s++){ for(o=0;o<samples;o++){ double calP=1 + sqrtf(imageP[s*samples + o]); imageP[s*samples + o]=calP*calP; } } hipMemcpy( dev_imageP, imageP, (samples)*(lines)*sizeof(double), hipMemcpyHostToDevice ); dim3 Blocks1((ceil((samples)*(lines))/bands) + 1); dim3 Threads1(bands); // GETPREPROCESSING KERNELL double *dev_imageOut; double *dev_I; // GETPREPROCESSING KERNELL: allocate device copies hipMalloc( (void**)&dev_imageOut, (samples)*(lines)*(bands)*sizeof(double) ); hipMalloc( (void**)&dev_I, (bands)*sizeof(double) ); // GETPREPROCESSING KERNELL: copy inputs to device hipMemcpy( dev_I, I, (bands)*sizeof(double), hipMemcpyHostToDevice ); // operate over the image dev_image,dev_imageP,dev_s,dev_l,dev_bands,dev_ws,dev_c,dev_d,dev_B hipLaunchKernelGGL(( Get_Preprocessing), dim3(Blocks1),dim3(Threads1),(bands)*sizeof(double), 0, dev_image, dev_imageP, dev_imageOut, samples, lines, bands, dev_I); // GETP KERNELL: copy device result back to host copy of c hipMemcpy( imageOut, dev_imageOut, (samples)*(lines)*(bands)*sizeof( double ) , hipMemcpyDeviceToHost ); //END CLOCK***************************************** end = clock(); printf("Parallel SPP: %f segundos", (double)(end - start) / CLOCKS_PER_SEC); fflush(stdout); //************************************************** char headerOut[MAXLINE]; strcpy(headerOut, argv[3]); strcat(headerOut, ".hdr"); error = writeHeader(headerOut, lines, samples, bands, dataType, interleave, byteOrder, waveUnit, wavelength); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error writing header file: %s.", headerOut); fflush(stdout); return error; } error = writeResult(imageOut, argv[3], lines, samples, bands, dataType, interleave); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error writing image file: %s.", argv[3]); fflush(stdout); return error; } hipFree( dev_image ); hipFree( dev_imageP ); hipFree( dev_I); hipFree( dev_B); hipFree( dev_imageOut); hipFree( dev_imageA); free(B); free(imageP); free(I); free(image); free(imageOut); free(imageA); hipDeviceReset(); }
a471ac2756a014899693afd55e1c0fd9056f02b6.cu
#include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <math.h> #include <time.h> #include <cuda.h> #define dimSize 64 #define MAXLINE 200 #define MAXCAD 200 /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ void cleanString(char *cadena, char *out) { int i,j; for( i = j = 0; cadena[i] != 0;++i) { if(isalnum(cadena[i])||cadena[i]=='{'||cadena[i]=='.'||cadena[i]==',') { out[j]=cadena[i]; j++; } } for( i = j; out[i] != 0;++i) out[j]=0; } /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ int readHeader1(char* filename, int *lines, int *samples, int *bands, int *dataType, char* interleave, int *byteOrder, char* waveUnit) { FILE *fp; char line[MAXLINE] = ""; char value [MAXLINE] = ""; if ((fp=fopen(filename,"rt"))!=NULL) { fseek(fp,0L,SEEK_SET); while(fgets(line, MAXLINE, fp)!='\0') { //samples if(strstr(line, "samples")!=NULL && samples !=NULL) { cleanString(strstr(line, "="),value); *samples = atoi(value); } //lines if(strstr(line, "lines")!=NULL && lines !=NULL) { cleanString(strstr(line, "="),value); *lines = atoi(value); } //Bands if(strstr(line, "bands")!=NULL && bands !=NULL) { cleanString(strstr(line, "="),value); *bands = atoi(value); } //Interleave if(strstr(line, "interleave")!=NULL && interleave !=NULL) { cleanString(strstr(line, "="),value); strcpy(interleave,value); } //Data Type if(strstr(line, "data type")!=NULL && dataType !=NULL) { cleanString(strstr(line, "="),value); *dataType = atoi(value); } //Byte Order if(strstr(line, "byte order")!=NULL && byteOrder !=NULL) { cleanString(strstr(line, "="),value); *byteOrder = atoi(value); } //Wavelength Unit if(strstr(line, "wavelength unit")!=NULL && waveUnit !=NULL) { cleanString(strstr(line, "="),value); strcpy(waveUnit,value); } } fclose(fp); return 0; } else return -2; //No file found } /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ int readHeader2(char* filename, double* wavelength) { FILE *fp; char line[MAXLINE] = ""; char value [MAXLINE] = ""; if ((fp=fopen(filename,"rt"))!=NULL) { fseek(fp,0L,SEEK_SET); while(fgets(line, MAXLINE, fp)!='\0') { //Wavelength if(strstr(line, "wavelength =")!=NULL && wavelength !=NULL) { char strAll[100000]=" "; char *pch; int cont = 0; do { fgets(line, 200, fp); cleanString(line,value); strcat(strAll,value); } while(strstr(line, "}")==NULL); pch = strtok(strAll,","); while (pch != NULL) { wavelength[cont]= atof(pch); pch = strtok (NULL, ","); cont++; } } } fclose(fp); return 0; } else return -2; //No file found } /* * Author: Jorge Sevilla Cedillo * Centre: Universidad de Extremadura * */ int loadImage(char* filename, double* image, int lines, int samples, int bands, int dataType, char* interleave) { FILE *fp; short int *tipo_short_int; float *tipo_float; double * tipo_double; unsigned int *tipo_uint; int i, j, k, op; long int lines_samples = lines*samples; if ((fp=fopen(filename,"rb"))!=NULL) { fseek(fp,0L,SEEK_SET); tipo_float = (float*)malloc(lines_samples*bands*sizeof(float)); switch(dataType) { case 2: tipo_short_int = (short int*)malloc(lines_samples*bands*sizeof(short int)); fread(tipo_short_int,1,(sizeof(short int)*lines_samples*bands),fp); for(i=0; i<lines_samples * bands; i++) tipo_float[i]=(float)tipo_short_int[i]; free(tipo_short_int); break; case 4: fread(tipo_float,1,(sizeof(float)*lines_samples*bands),fp); break; case 5: tipo_double = (double*)malloc(lines_samples*bands*sizeof(double)); fread(tipo_double,1,(sizeof(double)*lines_samples*bands),fp); for(i=0; i<lines_samples * bands; i++) tipo_float[i]=(float)tipo_double[i]; free(tipo_double); break; case 12: tipo_uint = (unsigned int*)malloc(lines_samples*bands*sizeof(unsigned int)); fread(tipo_uint,1,(sizeof(unsigned int)*lines_samples*bands),fp); for(i=0; i<lines_samples * bands; i++) tipo_float[i]=(float)tipo_uint[i]; free(tipo_uint); break; } fclose(fp); if(interleave == NULL) op = 0; else { if(strcmp(interleave, "bsq") == 0) op = 0; if(strcmp(interleave, "bip") == 0) op = 1; if(strcmp(interleave, "bil") == 0) op = 2; } switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) image[i] = tipo_float[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) image[i*lines*samples + j] = tipo_float[j*bands + i]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) image[j*lines*samples + (i*samples+k)] = tipo_float[k+samples*(i*bands+j)]; break; } free(tipo_float); return 0; } return -2; } /* * Author: Luis Ignacio Jimenez * Centre: Universidad de Extremadura * */ int writeResult(double *image, const char* filename, int lines, int samples, int bands, int dataType, char* interleave) { short int *imageSI; float *imageF; double *imageD; int i,j,k,op; if(interleave == NULL) op = 0; else { if(strcmp(interleave, "bsq") == 0) op = 0; if(strcmp(interleave, "bip") == 0) op = 1; if(strcmp(interleave, "bil") == 0) op = 2; } if(dataType == 2) { imageSI = (short int*)malloc(lines*samples*bands*sizeof(short int)); switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) imageSI[i] = (short int)image[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) imageSI[j*bands + i] = (short int)image[i*lines*samples + j]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) imageSI[i*bands*samples + (j*samples + k)] = (short int)image[j*lines*samples + (i*samples + k)]; break; } } if(dataType == 4) { imageF = (float*)malloc(lines*samples*bands*sizeof(float)); switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) imageF[i] = (float)image[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) imageF[j*bands + i] = (float)image[i*lines*samples + j]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) imageF[i*bands*samples + (j*samples + k)] = (float)image[j*lines*samples + (i*samples + k)]; break; } } if(dataType == 5) { imageD = (double*)malloc(lines*samples*bands*sizeof(double)); switch(op) { case 0: for(i=0; i<lines*samples*bands; i++) imageD[i] = image[i]; break; case 1: for(i=0; i<bands; i++) for(j=0; j<lines*samples; j++) imageD[j*bands + i] = image[i*lines*samples + j]; break; case 2: for(i=0; i<lines; i++) for(j=0; j<bands; j++) for(k=0; k<samples; k++) imageD[i*bands*samples + (j*samples + k)] = image[j*lines*samples + (i*samples + k)]; break; } } FILE *fp; if ((fp=fopen(filename,"wb"))!=NULL) { fseek(fp,0L,SEEK_SET); switch(dataType) { case 2: fwrite(imageSI,1,(lines*samples*bands * sizeof(short int)),fp); free(imageSI); break; case 4: fwrite(imageF,1,(lines*samples*bands * sizeof(float)),fp); free(imageF); break; case 5: fwrite(imageD,1,(lines*samples*bands * sizeof(double)),fp); free(imageD); break; } fclose(fp); return 0; } return -3; } /* * Author: Luis Ignacio Jimenez * Centre: Universidad de Extremadura * */ int writeHeader(char* filename, int lines, int samples, int bands, int dataType, char* interleave, int byteOrder, char* waveUnit, double* wavelength) { FILE *fp; if ((fp=fopen(filename,"wt"))!=NULL) { fseek(fp,0L,SEEK_SET); fprintf(fp,"ENVI\ndescription = {\nExported from MATLAB}\n"); if(samples != 0) fprintf(fp,"samples = %d", samples); if(lines != 0) fprintf(fp,"\nlines = %d", lines); if(bands != 0) fprintf(fp,"\nbands = %d", bands); if(dataType != 0) fprintf(fp,"\ndata type = %d", dataType); if(interleave != NULL) fprintf(fp,"\ninterleave = %s", interleave); if(byteOrder != 0) fprintf(fp,"\nbyte order = %d", byteOrder); if(waveUnit != NULL) fprintf(fp,"\nwavelength units = %s", waveUnit); if(waveUnit != NULL) { fprintf(fp,"\nwavelength = {\n"); for(int i=0; i<bands; i++) { if(i==0) fprintf(fp, "%f", wavelength[i]); else if(i%3 == 0) fprintf(fp, ", %f\n", wavelength[i]); else fprintf(fp, ", %f", wavelength[i]); } fprintf(fp,"}"); } fclose(fp); return 0; } return -3; } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ double calculateC(int d){ double C=0.0; // Constant = sum of the square distance between central pixel and the rest of the pixel in the window int i,j; double aux; for (i = 0; i <= d; i++){ // recorremos el cuadrante for ( j = 1; j <= d; j++){ aux=(double)(j*j + i*i); C = C + (1/aux); // sumamos la distancia al cuadrado es decir i^2 + j^2 } } C = C * 4; // Se muliplica por 4 porque hay 4 cuadrantes simetricos. return C; } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ void calculateI(double *image, double *I, int samples, int lines, int bands){ int i,j,z; int LS=samples*lines; for (z=0; z< bands; z++){ I[z] = 0.0; for(i=0; i<samples; i++){ for(j=0; j<lines; j++){ I[z] = I[z] + image[i*samples + j + samples*lines*z]; } } I[z]=(I[z]/(LS)); } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ void calculateB(double *B, int d, double C, int ws){ int r,s; for (r = -d ; r <= d; r++){ // recorremos la matriz B for (s = -d ; s <= d; s++){ if( (s == 0) && (r == 0)) { // si estamos en el pixel central le ponemos 0 B[d*ws+d]=0; } else{ // sino calculamos B[r,s] (B[r+d,s+d] porque c++ no permite indices negativos en las matrices. double aux1=(r*r + s*s); aux1=1/aux1; double aux2=1/C; B[(r+d)*(ws)+s+d] = (aux2) * (aux1) ; } } } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ __global__ void Get_EucNorm(double *image,double * imageA, int samples, int lines, int bands, int ws, double C, int d, double *B){ int x= threadIdx.x + blockIdx.x * blockDim.x; extern __shared__ double sdata[]; double EucNormA=0; long int vectA=0; unsigned int i; if ((x >= 0) && (x < (samples*lines))){ // si caen dentro de la imagen // si caen dentro de la imagen for (i=0; i< bands; i++){ vectA = image[x + (samples)*(lines)*i];//central EucNormA = EucNormA + (vectA*vectA); } imageA[x] = EucNormA; } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ __global__ void Get_P(double *image,double * imageP,double * imageA, int samples, int lines, int bands, int ws, double C, int d, double *B, int blockX, int blockY, int valor){ int x=gridDim.x*blockX + blockIdx.x + threadIdx.x -d; int y=gridDim.y*blockY + blockIdx.y + threadIdx.y -d ; int z=gridDim.x*blockX + blockIdx.x; int zz=gridDim.y*blockY + blockIdx.y; int relativeX=threadIdx.x; int relativeY=threadIdx.y; int pos=relativeY*(ws) + relativeX; extern __shared__ double sdata[]; double G=0.0; long int vectA=0; double vectB=0.0; double dotPro=0.0; unsigned int i; double cose; double aux=0; if ((z >= 0) && (z < (samples)) && (zz >= 0) && (zz < (lines))){ // si caen dentro de la imagen sdata[pos]= 0; if((pos + (valor/2) ) < valor){ int vari=(valor/2); sdata[(pos+vari)]=0; } //if ((x >= 0) && (x < (samples)) && (y >= 0) && (y < (lines))){ // si caen dentro de la imagen if(x < 0) x = x + d +1; if(y < 0) y = y + d +1; if(x >= samples) x = x - d -1; if(y >= lines) y = y - d -1; for (i=0; i< bands; i++){ vectA = image[zz*(samples) + z + (samples)*(lines)*i];//central vectB = image[y*(samples) + x + (samples)*(lines)*i]; dotPro = dotPro + (vectA*vectB); __syncthreads(); } aux=aux + (imageA[zz*(samples) + z] * imageA[y*(samples) + x]); cose = (dotPro/aux); if (cose >= 1) G=0.0; else{ G = acos(cose ); if (G<0) G=G*(-1); } sdata[pos] =B[pos]*G; __syncthreads(); for (i=((valor)/2); i>0; i=i/2) { if((pos < i) && ( (pos + i) < ws*ws) ) { sdata[pos] += sdata[pos + i]; } __syncthreads(); } __syncthreads(); if ((threadIdx.x==0) && (threadIdx.y==0)){ imageP[zz*(samples) + z] = sdata[0]; }else imageP[zz*(samples) + z] = 0; //} } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ __global__ void Get_Preprocessing(double *image, double *imageP, double *imageOut, int samples, int lines, int bands, double *I){ int x= threadIdx.x + blockIdx.x * blockDim.x; int n= 0; extern __shared__ double fdata[]; if ( threadIdx.x < bands){ // si caen dentro de la imagen fdata[threadIdx.x]=I[threadIdx.x]; __syncthreads(); } if ( x < (samples)*(lines)){ // si caen dentro de la imagen while(n<(bands)){ if((1/imageP[x]) >= 0) imageOut[(x + (samples)*(lines)*n)] = ((1/imageP[x]) * ( image[x + ((samples)*(lines)*n)] - fdata[n]) + fdata[n]); else imageOut[(x + (samples)*(lines)*n)] = image[x + ((samples)*(lines)*n)]; n++; } } } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ double dot_product(double* a, double* b, int n){ double result=0.0; int i; for(i=0;i<n;i++) result=result + a[i]*b[i]; return result; } /* * Author: Jaime Delgado Granados * Centre: Universidad de Extremadura * */ int main (int argc, char* argv[]){ /* * ARGUMENTS * * argv[1]: Input image filename * argv[2]: Window size * argv[3]: Output image filename * */ if(argc > 4 || argc < 4) { printf("EXECUTION ERROR SPP Parallel: Parameters are not correct."); printf("./SPP [Image Filename] [Window size] [Output Result File]"); fflush(stdout); exit(-1); } int ws = atoi(argv[2]); // window size if((ws%2)==0) ws=ws-1; char header_filename[MAXLINE]; double* image; double* imageOut; strcpy(header_filename, argv[1]); strcat(header_filename, ".hdr"); int lines = 0, samples= 0, bands= 0, dataType= 0, byteOrder = 0; char *interleave, *waveUnit; interleave = (char*)malloc(MAXCAD*sizeof(char)); waveUnit = (char*)malloc(MAXCAD*sizeof(char)); // Load image int error = readHeader1(header_filename, &lines, &samples, &bands, &dataType, interleave, &byteOrder, waveUnit); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error reading header file: %s.", header_filename); fflush(stdout); exit(-1); } double* wavelength = (double*)malloc(bands*sizeof(double)); strcpy(header_filename,argv[1]); // Second parameter: Header file: strcat(header_filename,".hdr"); error = readHeader2(header_filename, wavelength); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error reading header file: %s.", header_filename); fflush(stdout); exit(-1); } image = (double*)malloc ( samples*lines * bands*sizeof(double) ); imageOut = (double*)malloc ( samples*lines * bands*sizeof(double) ); error = loadImage(argv[1], image, samples, lines, bands, dataType, interleave); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error image header file: %s.", argv[1]); fflush(stdout); return error; } //START CLOCK*************************************** clock_t start, end; start = clock(); //************************************************** //2º STEP: PREPROCESSING double *I = (double*)malloc(bands*sizeof(double)); int d = (ws-1) / 2; double *B = (double*)malloc(ws*ws*sizeof(double)); double *imageA = (double*)malloc ( samples*lines * bands*sizeof(double) ); double *imageP = (double*)malloc ( samples*lines * bands*sizeof(double) ); float C=0.0; C=calculateC(d); calculateI(image, I, samples, lines, bands); calculateB(B, d, C, ws); dim3 Blocks((dimSize),(dimSize),1); dim3 Threads((ws),(ws),1); // GETP KERNELL double *dev_image;// device copies of image, samples, lines, ws, C, double *dev_B; double *dev_imageP; double *dev_imageA; // GETP KERNELL: allocate device copies cudaMalloc( (void**)&dev_image, (samples)*(lines)*(bands)*sizeof(double)); cudaMalloc( (void**)&dev_B, (ws)*(ws)*sizeof(double)); cudaMalloc( (void**)&dev_imageP, (samples)*(lines)*sizeof(double) ); cudaMalloc( (void**)&dev_imageA, (samples)*(lines)*sizeof(double) ); // GETP KERNELL: copy inputs to device cudaMemcpy( dev_image, image, (samples)*(lines)*(bands)*sizeof(double), cudaMemcpyHostToDevice ); cudaMemcpy( dev_B, B, (ws)*(ws)*sizeof(double), cudaMemcpyHostToDevice ); // GETP KERNELL // int x,y; int valor=1; while(valor<(ws*ws)){ valor=valor*2; } int blocks= (samples*lines)/1024 +1; dim3 Blocks12(blocks); dim3 Threads12(1024); Get_EucNorm<<<Blocks12,1024>>>(dev_image,dev_imageA,samples,lines,bands,ws,C,d,dev_B); cudaMemcpy( (void*)imageA, dev_imageA, (samples)*(lines)*sizeof( double ) , cudaMemcpyDeviceToHost ); int s,o; for(s=0;s<lines;s++){ for(o=0;o<samples;o++){ imageA[s*samples + o]=sqrtf(imageA[s*samples + o]); } } cudaMemcpy( dev_imageA, imageA, (samples)*(lines)*sizeof(double), cudaMemcpyHostToDevice ); int x,y; for(y=0;y<(floor(lines/dimSize) + 1 );y++){ for(x=0;x<(floor(samples/dimSize) + 1);x++){ Get_P<<<Blocks,Threads, valor*sizeof(double)>>>(dev_image,dev_imageP, dev_imageA,samples,lines,bands,ws,C,d,dev_B,x,y,valor); } } cudaMemcpy( (void*)imageP, dev_imageP, (samples)*(lines)*sizeof( double ) , cudaMemcpyDeviceToHost ); for(s=0;s<lines;s++){ for(o=0;o<samples;o++){ double calP=1 + sqrtf(imageP[s*samples + o]); imageP[s*samples + o]=calP*calP; } } cudaMemcpy( dev_imageP, imageP, (samples)*(lines)*sizeof(double), cudaMemcpyHostToDevice ); dim3 Blocks1((ceil((samples)*(lines))/bands) + 1); dim3 Threads1(bands); // GETPREPROCESSING KERNELL double *dev_imageOut; double *dev_I; // GETPREPROCESSING KERNELL: allocate device copies cudaMalloc( (void**)&dev_imageOut, (samples)*(lines)*(bands)*sizeof(double) ); cudaMalloc( (void**)&dev_I, (bands)*sizeof(double) ); // GETPREPROCESSING KERNELL: copy inputs to device cudaMemcpy( dev_I, I, (bands)*sizeof(double), cudaMemcpyHostToDevice ); // operate over the image dev_image,dev_imageP,dev_s,dev_l,dev_bands,dev_ws,dev_c,dev_d,dev_B Get_Preprocessing<<<Blocks1,Threads1,(bands)*sizeof(double)>>>(dev_image, dev_imageP, dev_imageOut, samples, lines, bands, dev_I); // GETP KERNELL: copy device result back to host copy of c cudaMemcpy( imageOut, dev_imageOut, (samples)*(lines)*(bands)*sizeof( double ) , cudaMemcpyDeviceToHost ); //END CLOCK***************************************** end = clock(); printf("Parallel SPP: %f segundos", (double)(end - start) / CLOCKS_PER_SEC); fflush(stdout); //************************************************** char headerOut[MAXLINE]; strcpy(headerOut, argv[3]); strcat(headerOut, ".hdr"); error = writeHeader(headerOut, lines, samples, bands, dataType, interleave, byteOrder, waveUnit, wavelength); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error writing header file: %s.", headerOut); fflush(stdout); return error; } error = writeResult(imageOut, argv[3], lines, samples, bands, dataType, interleave); if(error != 0) { printf("EXECUTION ERROR SPP Parallel: Error writing image file: %s.", argv[3]); fflush(stdout); return error; } cudaFree( dev_image ); cudaFree( dev_imageP ); cudaFree( dev_I); cudaFree( dev_B); cudaFree( dev_imageOut); cudaFree( dev_imageA); free(B); free(imageP); free(I); free(image); free(imageOut); free(imageA); cudaDeviceReset(); }
7f2e6470317b600598635d8b26873d98831b9483.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===--- omptarget-nvptx.cu - NVPTX OpenMP GPU initialization ---- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the initialization code for the GPU // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" //////////////////////////////////////////////////////////////////////////////// // global data tables //////////////////////////////////////////////////////////////////////////////// extern __device__ omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_State[MAX_SM]; extern __device__ omptarget_nvptx_Queue< omptarget_nvptx_SimpleThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_simpleState[MAX_SM]; extern __device__ __shared__ void *omptarget_nvptx_simpleGlobalData; //////////////////////////////////////////////////////////////////////////////// // init entry points //////////////////////////////////////////////////////////////////////////////// INLINE unsigned nsmid() { unsigned n; asm("mov.u32 %0, %%nsmid;" : "=r"(n)); return n; } INLINE unsigned smid() { unsigned id; asm("mov.u32 %0, %%smid;" : "=r"(id)); ASSERT0(LT_FUSSY, nsmid() <= MAX_SM, "Expected number of SMs is less than reported."); return id; } EXTERN void __kmpc_kernel_init_params(void *Ptr) { PRINT(LD_IO, "call to __kmpc_kernel_init_params with version %f\n", OMPTARGET_NVPTX_VERSION); SetTeamsReductionScratchpadPtr(Ptr); } EXTERN void __kmpc_kernel_init(int ThreadLimit, int16_t RequiresOMPRuntime) { PRINT(LD_IO, "call to __kmpc_kernel_init with version %f\n", OMPTARGET_NVPTX_VERSION); ASSERT0(LT_FUSSY, RequiresOMPRuntime, "Generic always requires initialized runtime."); setExecutionParameters(Generic, RuntimeInitialized); int threadIdInBlock = GetThreadIdInBlock(); ASSERT0(LT_FUSSY, threadIdInBlock == GetMasterThreadID(), "__kmpc_kernel_init() must be called by team master warp only!"); PRINT0(LD_IO, "call to __kmpc_kernel_init for master\n"); // Get a state object from the queue. int slot = smid() % MAX_SM; omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue(); #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 omptarget_nvptx_threadPrivateContext->SetSourceQueue(slot); #endif // init thread private int threadId = GetLogicalThreadIdInBlock(); omptarget_nvptx_threadPrivateContext->InitThreadPrivateContext(threadId); // init team context omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor(); currTeamDescr.InitTeamDescr(); // this thread will start execution... has to update its task ICV // to point to the level zero task ICV. That ICV was init in // InitTeamDescr() omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr( threadId, currTeamDescr.LevelZeroTaskDescr()); // set number of threads and thread limit in team to started value omptarget_nvptx_TaskDescr *currTaskDescr = omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(threadId); currTaskDescr->NThreads() = GetNumberOfWorkersInTeam(); currTaskDescr->ThreadLimit() = ThreadLimit; } EXTERN void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized) { ASSERT0(LT_FUSSY, IsOMPRuntimeInitialized, "Generic always requires initialized runtime."); // Enqueue omp state object for use by another team. #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 int slot = omptarget_nvptx_threadPrivateContext->GetSourceQueue(); #else int slot = smid() % MAX_SM; #endif omptarget_nvptx_device_State[slot].Enqueue( omptarget_nvptx_threadPrivateContext); // Done with work. Kill the workers. omptarget_nvptx_workFn = 0; } EXTERN void __kmpc_spmd_kernel_init(int ThreadLimit, int16_t RequiresOMPRuntime, int16_t RequiresDataSharing) { PRINT0(LD_IO, "call to __kmpc_spmd_kernel_init\n"); if (!RequiresOMPRuntime) { // If OMP runtime is not required don't initialize OMP state. setExecutionParameters(Spmd, RuntimeUninitialized); if (GetThreadIdInBlock() == 0) { int slot = smid() % MAX_SM; omptarget_nvptx_simpleThreadPrivateContext = omptarget_nvptx_device_simpleState[slot].Dequeue(); // Reuse the memory allocated for the full runtime as the preallocated // global memory buffer for the lightweight runtime. omptarget_nvptx_simpleGlobalData = omptarget_nvptx_device_State[slot].Dequeue(); } __syncthreads(); omptarget_nvptx_simpleThreadPrivateContext->Init(); return; } setExecutionParameters(Spmd, RuntimeInitialized); // // Team Context Initialization. // // In SPMD mode there is no master thread so use any cuda thread for team // context initialization. int threadId = GetThreadIdInBlock(); if (threadId == 0) { // Get a state object from the queue. int slot = smid() % MAX_SM; omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue(); omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor(); omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor(); // init team context currTeamDescr.InitTeamDescr(); } __syncthreads(); omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor(); omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor(); // // Initialize task descr for each thread. // omptarget_nvptx_TaskDescr *newTaskDescr = omptarget_nvptx_threadPrivateContext->Level1TaskDescr(threadId); ASSERT0(LT_FUSSY, newTaskDescr, "expected a task descr"); newTaskDescr->InitLevelOneTaskDescr(ThreadLimit, currTeamDescr.LevelZeroTaskDescr()); newTaskDescr->ThreadLimit() = ThreadLimit; // install new top descriptor omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId, newTaskDescr); // init thread private from init value PRINT(LD_PAR, "thread will execute parallel region with id %d in a team of " "%d threads\n", newTaskDescr->ThreadId(), newTaskDescr->ThreadsInTeam()); if (RequiresDataSharing && threadId % WARPSIZE == 0) { // Warp master innitializes data sharing environment. unsigned WID = threadId / WARPSIZE; __kmpc_data_sharing_slot *RootS = currTeamDescr.RootS( WID, WID == WARPSIZE - 1); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; } } EXTERN void __kmpc_spmd_kernel_deinit() { // We're not going to pop the task descr stack of each thread since // there are no more parallel regions in SPMD mode. __syncthreads(); int threadId = GetThreadIdInBlock(); if (isRuntimeUninitialized()) { if (threadId == 0) { // Enqueue omp state object for use by another team. int slot = smid() % MAX_SM; omptarget_nvptx_device_simpleState[slot].Enqueue( omptarget_nvptx_simpleThreadPrivateContext); // Enqueue global memory back. omptarget_nvptx_device_State[slot].Enqueue( reinterpret_cast<omptarget_nvptx_ThreadPrivateContext *>( omptarget_nvptx_simpleGlobalData)); } return; } if (threadId == 0) { // Enqueue omp state object for use by another team. int slot = smid() % MAX_SM; omptarget_nvptx_device_State[slot].Enqueue( omptarget_nvptx_threadPrivateContext); } } // Return true if the current target region is executed in SPMD mode. EXTERN int8_t __kmpc_is_spmd_exec_mode() { return isSPMDMode(); }
7f2e6470317b600598635d8b26873d98831b9483.cu
//===--- omptarget-nvptx.cu - NVPTX OpenMP GPU initialization ---- CUDA -*-===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// // // This file contains the initialization code for the GPU // //===----------------------------------------------------------------------===// #include "omptarget-nvptx.h" //////////////////////////////////////////////////////////////////////////////// // global data tables //////////////////////////////////////////////////////////////////////////////// extern __device__ omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_State[MAX_SM]; extern __device__ omptarget_nvptx_Queue< omptarget_nvptx_SimpleThreadPrivateContext, OMP_STATE_COUNT> omptarget_nvptx_device_simpleState[MAX_SM]; extern __device__ __shared__ void *omptarget_nvptx_simpleGlobalData; //////////////////////////////////////////////////////////////////////////////// // init entry points //////////////////////////////////////////////////////////////////////////////// INLINE unsigned nsmid() { unsigned n; asm("mov.u32 %0, %%nsmid;" : "=r"(n)); return n; } INLINE unsigned smid() { unsigned id; asm("mov.u32 %0, %%smid;" : "=r"(id)); ASSERT0(LT_FUSSY, nsmid() <= MAX_SM, "Expected number of SMs is less than reported."); return id; } EXTERN void __kmpc_kernel_init_params(void *Ptr) { PRINT(LD_IO, "call to __kmpc_kernel_init_params with version %f\n", OMPTARGET_NVPTX_VERSION); SetTeamsReductionScratchpadPtr(Ptr); } EXTERN void __kmpc_kernel_init(int ThreadLimit, int16_t RequiresOMPRuntime) { PRINT(LD_IO, "call to __kmpc_kernel_init with version %f\n", OMPTARGET_NVPTX_VERSION); ASSERT0(LT_FUSSY, RequiresOMPRuntime, "Generic always requires initialized runtime."); setExecutionParameters(Generic, RuntimeInitialized); int threadIdInBlock = GetThreadIdInBlock(); ASSERT0(LT_FUSSY, threadIdInBlock == GetMasterThreadID(), "__kmpc_kernel_init() must be called by team master warp only!"); PRINT0(LD_IO, "call to __kmpc_kernel_init for master\n"); // Get a state object from the queue. int slot = smid() % MAX_SM; omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue(); #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 omptarget_nvptx_threadPrivateContext->SetSourceQueue(slot); #endif // init thread private int threadId = GetLogicalThreadIdInBlock(); omptarget_nvptx_threadPrivateContext->InitThreadPrivateContext(threadId); // init team context omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor(); currTeamDescr.InitTeamDescr(); // this thread will start execution... has to update its task ICV // to point to the level zero task ICV. That ICV was init in // InitTeamDescr() omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr( threadId, currTeamDescr.LevelZeroTaskDescr()); // set number of threads and thread limit in team to started value omptarget_nvptx_TaskDescr *currTaskDescr = omptarget_nvptx_threadPrivateContext->GetTopLevelTaskDescr(threadId); currTaskDescr->NThreads() = GetNumberOfWorkersInTeam(); currTaskDescr->ThreadLimit() = ThreadLimit; } EXTERN void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized) { ASSERT0(LT_FUSSY, IsOMPRuntimeInitialized, "Generic always requires initialized runtime."); // Enqueue omp state object for use by another team. #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 int slot = omptarget_nvptx_threadPrivateContext->GetSourceQueue(); #else int slot = smid() % MAX_SM; #endif omptarget_nvptx_device_State[slot].Enqueue( omptarget_nvptx_threadPrivateContext); // Done with work. Kill the workers. omptarget_nvptx_workFn = 0; } EXTERN void __kmpc_spmd_kernel_init(int ThreadLimit, int16_t RequiresOMPRuntime, int16_t RequiresDataSharing) { PRINT0(LD_IO, "call to __kmpc_spmd_kernel_init\n"); if (!RequiresOMPRuntime) { // If OMP runtime is not required don't initialize OMP state. setExecutionParameters(Spmd, RuntimeUninitialized); if (GetThreadIdInBlock() == 0) { int slot = smid() % MAX_SM; omptarget_nvptx_simpleThreadPrivateContext = omptarget_nvptx_device_simpleState[slot].Dequeue(); // Reuse the memory allocated for the full runtime as the preallocated // global memory buffer for the lightweight runtime. omptarget_nvptx_simpleGlobalData = omptarget_nvptx_device_State[slot].Dequeue(); } __syncthreads(); omptarget_nvptx_simpleThreadPrivateContext->Init(); return; } setExecutionParameters(Spmd, RuntimeInitialized); // // Team Context Initialization. // // In SPMD mode there is no master thread so use any cuda thread for team // context initialization. int threadId = GetThreadIdInBlock(); if (threadId == 0) { // Get a state object from the queue. int slot = smid() % MAX_SM; omptarget_nvptx_threadPrivateContext = omptarget_nvptx_device_State[slot].Dequeue(); omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor(); omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor(); // init team context currTeamDescr.InitTeamDescr(); } __syncthreads(); omptarget_nvptx_TeamDescr &currTeamDescr = getMyTeamDescriptor(); omptarget_nvptx_WorkDescr &workDescr = getMyWorkDescriptor(); // // Initialize task descr for each thread. // omptarget_nvptx_TaskDescr *newTaskDescr = omptarget_nvptx_threadPrivateContext->Level1TaskDescr(threadId); ASSERT0(LT_FUSSY, newTaskDescr, "expected a task descr"); newTaskDescr->InitLevelOneTaskDescr(ThreadLimit, currTeamDescr.LevelZeroTaskDescr()); newTaskDescr->ThreadLimit() = ThreadLimit; // install new top descriptor omptarget_nvptx_threadPrivateContext->SetTopLevelTaskDescr(threadId, newTaskDescr); // init thread private from init value PRINT(LD_PAR, "thread will execute parallel region with id %d in a team of " "%d threads\n", newTaskDescr->ThreadId(), newTaskDescr->ThreadsInTeam()); if (RequiresDataSharing && threadId % WARPSIZE == 0) { // Warp master innitializes data sharing environment. unsigned WID = threadId / WARPSIZE; __kmpc_data_sharing_slot *RootS = currTeamDescr.RootS( WID, WID == WARPSIZE - 1); DataSharingState.SlotPtr[WID] = RootS; DataSharingState.StackPtr[WID] = (void *)&RootS->Data[0]; } } EXTERN void __kmpc_spmd_kernel_deinit() { // We're not going to pop the task descr stack of each thread since // there are no more parallel regions in SPMD mode. __syncthreads(); int threadId = GetThreadIdInBlock(); if (isRuntimeUninitialized()) { if (threadId == 0) { // Enqueue omp state object for use by another team. int slot = smid() % MAX_SM; omptarget_nvptx_device_simpleState[slot].Enqueue( omptarget_nvptx_simpleThreadPrivateContext); // Enqueue global memory back. omptarget_nvptx_device_State[slot].Enqueue( reinterpret_cast<omptarget_nvptx_ThreadPrivateContext *>( omptarget_nvptx_simpleGlobalData)); } return; } if (threadId == 0) { // Enqueue omp state object for use by another team. int slot = smid() % MAX_SM; omptarget_nvptx_device_State[slot].Enqueue( omptarget_nvptx_threadPrivateContext); } } // Return true if the current target region is executed in SPMD mode. EXTERN int8_t __kmpc_is_spmd_exec_mode() { return isSPMDMode(); }
658e4091db3590100967fbce6c9427fd527502b9.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdio.h> #include <hip/hip_runtime.h> #include "parboil.h" //#include "file.h" #include "../benchmark_common.h" #define CUERR { hipError_t err; \ if ((err = hipGetLastError()) != hipSuccess) { \ printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \ return -1; }} // Block index #define bx blockIdx.x #define by blockIdx.y // Thread index #define tx threadIdx.x // Possible values are 2, 4, 8 and 16 #define R 2 inline __device__ float2 operator*( float2 a, float2 b ) { return make_float2( a.x*b.x-a.y*b.y, a.x*b.y+a.y*b.x ); } inline __device__ float2 operator+( float2 a, float2 b ) { return make_float2( a.x + b.x, a.y + b.y ); } inline __device__ float2 operator-( float2 a, float2 b ) { return make_float2( a.x - b.x, a.y - b.y ); } inline __device__ float2 operator*( float2 a, float b ) { return make_float2( b*a.x , b*a.y); } #define COS_PI_8 0.923879533f #define SIN_PI_8 0.382683432f #define exp_1_16 make_float2( COS_PI_8, -SIN_PI_8 ) #define exp_3_16 make_float2( SIN_PI_8, -COS_PI_8 ) #define exp_5_16 make_float2( -SIN_PI_8, -COS_PI_8 ) #define exp_7_16 make_float2( -COS_PI_8, -SIN_PI_8 ) #define exp_9_16 make_float2( -COS_PI_8, SIN_PI_8 ) #define exp_1_8 make_float2( 1, -1 ) #define exp_1_4 make_float2( 0, -1 ) #define exp_3_8 make_float2( -1, -1 ) __device__ void GPU_FFT2( float2 &v1,float2 &v2 ) { float2 v0 = v1; v1 = v0 + v2; v2 = v0 - v2; } __device__ void GPU_FFT4( float2 &v0,float2 &v1,float2 &v2,float2 &v3) { GPU_FFT2(v0, v2); GPU_FFT2(v1, v3); v3 = v3 * exp_1_4; GPU_FFT2(v0, v1); GPU_FFT2(v2, v3); } inline __device__ void GPU_FFT2(float2* v){ GPU_FFT2(v[0],v[1]); } inline __device__ void GPU_FFT4(float2* v){ GPU_FFT4(v[0],v[1],v[2],v[3] ); } inline __device__ void GPU_FFT8(float2* v){ GPU_FFT2(v[0],v[4]); GPU_FFT2(v[1],v[5]); GPU_FFT2(v[2],v[6]); GPU_FFT2(v[3],v[7]); v[5]=(v[5]*exp_1_8)*M_SQRT1_2; v[6]=v[6]*exp_1_4; v[7]=(v[7]*exp_3_8)*M_SQRT1_2; GPU_FFT4(v[0],v[1],v[2],v[3]); GPU_FFT4(v[4],v[5],v[6],v[7]); } inline __device__ void GPU_FFT16( float2 *v ) { GPU_FFT4( v[0], v[4], v[8], v[12] ); GPU_FFT4( v[1], v[5], v[9], v[13] ); GPU_FFT4( v[2], v[6], v[10], v[14] ); GPU_FFT4( v[3], v[7], v[11], v[15] ); v[5] = (v[5] * exp_1_8 ) * M_SQRT1_2; v[6] = v[6] * exp_1_4; v[7] = (v[7] * exp_3_8 ) * M_SQRT1_2; v[9] = v[9] * exp_1_16; v[10] = (v[10] * exp_1_8 ) * M_SQRT1_2; v[11] = v[11] * exp_3_16; v[13] = v[13] * exp_3_16; v[14] = (v[14] * exp_3_8 ) * M_SQRT1_2; v[15] = v[15] * exp_9_16; GPU_FFT4( v[0], v[1], v[2], v[3] ); GPU_FFT4( v[4], v[5], v[6], v[7] ); GPU_FFT4( v[8], v[9], v[10], v[11] ); GPU_FFT4( v[12], v[13], v[14], v[15] ); } __device__ int GPU_expand(int idxL, int N1, int N2 ){ return (idxL/N1)*N1*N2 + (idxL%N1); } __device__ void GPU_FftIteration(int j, int Ns, float2* data0, float2* data1, int N){ float2 v[R]; int idxS = j; float angle = -2*M_PI*(j%Ns)/(Ns*R); for( int r=0; r<R; r++ ) { v[r] = data0[idxS+r*N/R]; v[r] = v[r]*make_float2(cos(r*angle), sin(r*angle)); } #if R == 2 GPU_FFT2( v ); #endif #if R == 4 GPU_FFT4( v ); #endif #if R == 8 GPU_FFT8( v ); #endif #if R == 16 GPU_FFT16( v ); #endif int idxD = GPU_expand(j,Ns,R); for( int r=0; r<R; r++ ){ data1[idxD+r*Ns] = v[r]; } } __global__ void GPU_FFT_Global(int Ns, float2* data0, float2* data1, int N) { data0+=bx*N; data1+=bx*N; GPU_FftIteration( tx, Ns, data0, data1, N); } //int main( int argc, char **argv ) int main_fft(hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { int n_bytes; int N, B; struct pb_TimerSet timers; //struct pb_Parameters *params; /*params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } int err = 0;*/ N= 256; B= 1024; /*if(argc != 3) err |= 1; else { char* numend; N = strtol(argv[1], &numend, 10); if(numend == argv[1]) err |= 2; B = strtol(argv[2], &numend, 10); if(numend == argv[2]) err |= 4; } if(err) { fprintf(stderr, "Expecting two integers for N and B\n"); exit(-1); }*/ n_bytes = N*B*sizeof(float2); pb_InitializeTimerSet(&timers); pb_SwitchToTimer(&timers, pb_TimerID_IO); float2 *source = (float2 *)malloc( n_bytes ); float2 *result = (float2 *)malloc( n_bytes ); char *file = (char *) "FFT/array.bin"; //inputData(file,(float*)source,N*B*2); FILE* fid = fopen(file, "r"); if (fid == NULL) { fprintf(stderr, "Cannot open input file\n"); exit(-1); } fread ((float*)source, sizeof (float), N*B*2, fid); fclose (fid); // allocate device memory pb_SwitchToTimer(&timers, pb_TimerID_COPY); float2 *d_source, *d_work; hipMalloc((void**) &d_source, n_bytes); CUERR; // copy host memory to device hipMemcpyAsync(d_source, source, n_bytes,hipMemcpyHostToDevice, stream_app); CUERR; hipMalloc((void**) &d_work, n_bytes); CUERR; hipMemset(d_work, 0,n_bytes); CUERR; pb_SwitchToTimer(&timers, pb_TimerID_GPU); for( int Ns=1; Ns<N; Ns*=R){ hipLaunchKernelGGL(( GPU_FFT_Global), dim3(dim3(B)), dim3(dim3(N/R)),0,stream_app, Ns, d_source, d_work, N); float2 *tmp = d_source; d_source = d_work; d_work = tmp; } printf("I am out from fft kernel launch\n"); pthread_mutex_unlock (mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); pb_SwitchToTimer(&timers, pb_TimerID_COPY); // copy device memory to host hipMemcpyAsync(result, d_source, n_bytes,hipMemcpyDeviceToHost, stream_app); CUERR; if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); hipFree(d_source); CUERR; hipFree(d_work); CUERR; /*if (params->outFile) { Write result to file pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(params->outFile, (float*)result, N*B*2); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); }*/ free(source); free(result); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); return 0; }
658e4091db3590100967fbce6c9427fd527502b9.cu
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <stdio.h> #include <cuda.h> #include "parboil.h" //#include "file.h" #include "../benchmark_common.h" #define CUERR { cudaError_t err; \ if ((err = cudaGetLastError()) != cudaSuccess) { \ printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \ return -1; }} // Block index #define bx blockIdx.x #define by blockIdx.y // Thread index #define tx threadIdx.x // Possible values are 2, 4, 8 and 16 #define R 2 inline __device__ float2 operator*( float2 a, float2 b ) { return make_float2( a.x*b.x-a.y*b.y, a.x*b.y+a.y*b.x ); } inline __device__ float2 operator+( float2 a, float2 b ) { return make_float2( a.x + b.x, a.y + b.y ); } inline __device__ float2 operator-( float2 a, float2 b ) { return make_float2( a.x - b.x, a.y - b.y ); } inline __device__ float2 operator*( float2 a, float b ) { return make_float2( b*a.x , b*a.y); } #define COS_PI_8 0.923879533f #define SIN_PI_8 0.382683432f #define exp_1_16 make_float2( COS_PI_8, -SIN_PI_8 ) #define exp_3_16 make_float2( SIN_PI_8, -COS_PI_8 ) #define exp_5_16 make_float2( -SIN_PI_8, -COS_PI_8 ) #define exp_7_16 make_float2( -COS_PI_8, -SIN_PI_8 ) #define exp_9_16 make_float2( -COS_PI_8, SIN_PI_8 ) #define exp_1_8 make_float2( 1, -1 ) #define exp_1_4 make_float2( 0, -1 ) #define exp_3_8 make_float2( -1, -1 ) __device__ void GPU_FFT2( float2 &v1,float2 &v2 ) { float2 v0 = v1; v1 = v0 + v2; v2 = v0 - v2; } __device__ void GPU_FFT4( float2 &v0,float2 &v1,float2 &v2,float2 &v3) { GPU_FFT2(v0, v2); GPU_FFT2(v1, v3); v3 = v3 * exp_1_4; GPU_FFT2(v0, v1); GPU_FFT2(v2, v3); } inline __device__ void GPU_FFT2(float2* v){ GPU_FFT2(v[0],v[1]); } inline __device__ void GPU_FFT4(float2* v){ GPU_FFT4(v[0],v[1],v[2],v[3] ); } inline __device__ void GPU_FFT8(float2* v){ GPU_FFT2(v[0],v[4]); GPU_FFT2(v[1],v[5]); GPU_FFT2(v[2],v[6]); GPU_FFT2(v[3],v[7]); v[5]=(v[5]*exp_1_8)*M_SQRT1_2; v[6]=v[6]*exp_1_4; v[7]=(v[7]*exp_3_8)*M_SQRT1_2; GPU_FFT4(v[0],v[1],v[2],v[3]); GPU_FFT4(v[4],v[5],v[6],v[7]); } inline __device__ void GPU_FFT16( float2 *v ) { GPU_FFT4( v[0], v[4], v[8], v[12] ); GPU_FFT4( v[1], v[5], v[9], v[13] ); GPU_FFT4( v[2], v[6], v[10], v[14] ); GPU_FFT4( v[3], v[7], v[11], v[15] ); v[5] = (v[5] * exp_1_8 ) * M_SQRT1_2; v[6] = v[6] * exp_1_4; v[7] = (v[7] * exp_3_8 ) * M_SQRT1_2; v[9] = v[9] * exp_1_16; v[10] = (v[10] * exp_1_8 ) * M_SQRT1_2; v[11] = v[11] * exp_3_16; v[13] = v[13] * exp_3_16; v[14] = (v[14] * exp_3_8 ) * M_SQRT1_2; v[15] = v[15] * exp_9_16; GPU_FFT4( v[0], v[1], v[2], v[3] ); GPU_FFT4( v[4], v[5], v[6], v[7] ); GPU_FFT4( v[8], v[9], v[10], v[11] ); GPU_FFT4( v[12], v[13], v[14], v[15] ); } __device__ int GPU_expand(int idxL, int N1, int N2 ){ return (idxL/N1)*N1*N2 + (idxL%N1); } __device__ void GPU_FftIteration(int j, int Ns, float2* data0, float2* data1, int N){ float2 v[R]; int idxS = j; float angle = -2*M_PI*(j%Ns)/(Ns*R); for( int r=0; r<R; r++ ) { v[r] = data0[idxS+r*N/R]; v[r] = v[r]*make_float2(cos(r*angle), sin(r*angle)); } #if R == 2 GPU_FFT2( v ); #endif #if R == 4 GPU_FFT4( v ); #endif #if R == 8 GPU_FFT8( v ); #endif #if R == 16 GPU_FFT16( v ); #endif int idxD = GPU_expand(j,Ns,R); for( int r=0; r<R; r++ ){ data1[idxD+r*Ns] = v[r]; } } __global__ void GPU_FFT_Global(int Ns, float2* data0, float2* data1, int N) { data0+=bx*N; data1+=bx*N; GPU_FftIteration( tx, Ns, data0, data1, N); } //int main( int argc, char **argv ) int main_fft(cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { int n_bytes; int N, B; struct pb_TimerSet timers; //struct pb_Parameters *params; /*params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } int err = 0;*/ N= 256; B= 1024; /*if(argc != 3) err |= 1; else { char* numend; N = strtol(argv[1], &numend, 10); if(numend == argv[1]) err |= 2; B = strtol(argv[2], &numend, 10); if(numend == argv[2]) err |= 4; } if(err) { fprintf(stderr, "Expecting two integers for N and B\n"); exit(-1); }*/ n_bytes = N*B*sizeof(float2); pb_InitializeTimerSet(&timers); pb_SwitchToTimer(&timers, pb_TimerID_IO); float2 *source = (float2 *)malloc( n_bytes ); float2 *result = (float2 *)malloc( n_bytes ); char *file = (char *) "FFT/array.bin"; //inputData(file,(float*)source,N*B*2); FILE* fid = fopen(file, "r"); if (fid == NULL) { fprintf(stderr, "Cannot open input file\n"); exit(-1); } fread ((float*)source, sizeof (float), N*B*2, fid); fclose (fid); // allocate device memory pb_SwitchToTimer(&timers, pb_TimerID_COPY); float2 *d_source, *d_work; cudaMalloc((void**) &d_source, n_bytes); CUERR; // copy host memory to device cudaMemcpyAsync(d_source, source, n_bytes,cudaMemcpyHostToDevice, stream_app); CUERR; cudaMalloc((void**) &d_work, n_bytes); CUERR; cudaMemset(d_work, 0,n_bytes); CUERR; pb_SwitchToTimer(&timers, pb_TimerID_GPU); for( int Ns=1; Ns<N; Ns*=R){ GPU_FFT_Global<<<dim3(B), dim3(N/R),0,stream_app>>>(Ns, d_source, d_work, N); float2 *tmp = d_source; d_source = d_work; d_work = tmp; } printf("I am out from fft kernel launch\n"); pthread_mutex_unlock (mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); pb_SwitchToTimer(&timers, pb_TimerID_COPY); // copy device memory to host cudaMemcpyAsync(result, d_source, n_bytes,cudaMemcpyDeviceToHost, stream_app); CUERR; if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); cudaFree(d_source); CUERR; cudaFree(d_work); CUERR; /*if (params->outFile) { Write result to file pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(params->outFile, (float*)result, N*B*2); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); }*/ free(source); free(result); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); return 0; }
25da1b281b6d78a99202350e7272d66ecb4724d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2\imgproc\imgproc.hpp> #include <iostream> #include <stdio.h> #include "HPT.h" #include <vector> using namespace cv; using namespace std; Mat image; int trackbarSize; unsigned char* ImageModified; typedef unsigned char uByte; //kernal takes in two arrays and size __global__ void thresholdKernel(unsigned char* data, unsigned char* data2, int size, int thresholdSlider) { int j = (blockIdx.x *blockDim.x) + threadIdx.x; if (j < size) { if (data[j] > thresholdSlider) { data2[j] = 255; } else { data2[j] = 0; } } } //threshold change in cpu void threshold(int threshold, int width, int height, unsigned char* data); //threshold change in gpu bool initializeImageGPU(int width, int height, Mat image); //creates trackbar for image void on_trackbar(int thresholdSlider, void*); void BoxFilter(uByte* s, uByte* d, int w, int h, uByte* k, int kW, int kH, uByte* Temp); int main(int argc, char** argv) { if (argc != 2) { cout << "Usage: display_image ImageToLoadAndDisplay" << endl; return -1; } image = imread(argv[1], CV_LOAD_IMAGE_COLOR); if (!image.data) { cout << "Could not open or find image" << endl; return -1; } cvtColor(image, image, COLOR_RGB2GRAY); threshold(128, image.rows, image.cols, image.data); if (initializeImageGPU(image.rows, image.cols, image)) { cout << "We worked with the GPU" << endl; } else { cout << "It failed." << endl; } namedWindow("Display Window", WINDOW_NORMAL); //createTrackbar("Threshold", "Display Window", &threshold_slider, THRESHOLD_SLIDER_MAX, on_tracker(int, void *, Image, unsigned char* data2, size,threshold_slider)); imshow("Display Window", image); waitKey(0); return 0; } void threshold(int threshold, int width, int height, unsigned char * data) { HighPrecisionTime timeTheModification; double currentTime; timeTheModification.TimeSinceLastCall(); for (int i = 0; i < height *width; i++) { if (data[i] > threshold) { data[i] = 255; } else { data[i] = 0; } } currentTime = timeTheModification.TimeSinceLastCall(); cout << "CPU Threshold: " << currentTime << endl; } bool initializeImageGPU(int width, int height, Mat image) { HighPrecisionTime timeTheModification; double currentTime; bool temp = true; unsigned char* ImageOriginal = nullptr; ImageModified = nullptr; int size = width*height * sizeof(char); trackbarSize = size; hipError_t cudaTest; cudaTest = hipSetDevice(0); if (cudaTest != hipSuccess) { cout << "Error with device" << endl; } else { //cout << "suscsess" << endl; } cudaTest = hipMalloc(&ImageOriginal, size); if (cudaTest != hipSuccess) { cout << "hipMalloc failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } cudaTest = hipMalloc(&ImageModified, size); if (cudaTest != hipSuccess) { cout << "cudaMalloc2 failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } cudaTest = hipDeviceSynchronize(); if (cudaTest != hipSuccess) { cout << "cudaSync failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } cudaTest = hipMemcpy(ImageOriginal, image.data, size, hipMemcpyHostToDevice); if (cudaTest != hipSuccess) { cout << "cudacpy failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } int blocksNeeded = (size + 1023) / 1024; timeTheModification.TimeSinceLastCall(); thresholdKernel << <blocksNeeded, 1024 >> > (ImageOriginal, ImageModified, size, 128); currentTime = timeTheModification.TimeSinceLastCall(); cout << "GPU Threshold: " << currentTime << endl; cudaTest = hipMemcpy(image.data, ImageModified, size, hipMemcpyDeviceToHost); if (cudaTest != hipSuccess) { cout << "cudacpy2 failed!" << endl; temp = false; } int thresholdSlider = 50; namedWindow("Blurred Image", WINDOW_NORMAL); //createTrackbar("Threshold", "BlurredImage", &thresholdSlider, 255, on_trackbar, &image); on_trackbar(thresholdSlider, 0); imshow("BlurredImage", image); waitKey(0); return temp; } void on_trackbar(int thresholdSlider, void*) { uByte*k; uByte* temp; HighPrecisionTime T; double currentTime; trackbarSize = image.cols *image.rows; int blocksNeeded = (trackbarSize + 1023) / 1024; hipDeviceSynchronize(); T.TimeSinceLastCall(); //thresholdKernel << < blocksNeeded, 1024 >> > (image.data, ImageModified, (image.rows*image.cols), thresholdSlider); BoxFilter(image.data, ImageModified, image.rows, image.cols, k, 3, 3, temp); currentTime = T.TimeSinceLastCall(); cout << "CurrentTime: " << currentTime << endl; if (hipMemcpy(image.data, ImageModified, trackbarSize, hipMemcpyDeviceToHost) != hipSuccess) { cout << "Error copying." << endl; } imshow("Display Window", image); } void BoxFilter(uByte * s, uByte * d, int w, int h, uByte * k, int kW, int kH, uByte * Temp) { float currentPixel = 0.0f; for (int i = 1; i < w - 1; i++) { for (int j = 1; j < h - 1; j++) { //set current pixel //c currentPixel = s[i* j]; //w-1,h+1 currentPixel += s[(i - 1)*(j + 1)]; //h+1, w+1 currentPixel += s[(i + 1) * (j + 1)]; //h+1 currentPixel += s[i * (j + 1)]; //w-1 currentPixel += s[(i - 1) * j]; //w+1 currentPixel += s[(i + 1) * j]; //w-1,h-1 currentPixel += s[(i - 1) * (j - 1)]; //h-1 currentPixel += s[i * (j - 1)]; //w+1,h-1 currentPixel += s[(i + 1) * (j - 1)]; d[i * j] = currentPixel / 9.0f; } } } // // // // float denominator = 0.0f; // vector <int> matrix = { (-(w + 1)), (-w), -(w - 1), -1,0,+1,w - 1,w,w + 1 }; // // for (int i = 0; i < kW*kH; i++) { // denominator += k[i]; // } // // if (denominator == 0.0f) { // denominator += 1.0f; // } // // for (int i = 1; i < h - 1; i++) { // for (int j = 1; j < w - 1; j++) { // // } // } // /*for (int j = 0; j<kernel.cols(); j++) { // // double[] m = kernel.get(i, j); // // for (int k = 0; k<m.length; k++) { // m[k] = m[k] / (kernelSize * kernelSize); // } // kernel.put(i, j, m); // }*/ //} // ////(s.data, d.data,s.cols, s.rows, k,3,3,temp.data) // ////int widthMinus1 = w - 1; // //int size = w*h; // //size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; // //size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; // // //int xOffset = w / 2; // //int yOffset = h / 2; // // //float outputValue; // // //// Checking to see if the indexs are within the bounds of the image and not on edges // //if (xIndex < (w - 1) && xIndex >0 && yIndex < h - 1 && yIndex >0) // //{ // // int xPixel = (xIndex - w / 2 + xIndex + w) % w; // // int yPixel = (yIndex - h / 2 + yIndex + h) % h; // // // for (int i = -xOffset; i <= xOffset; i++) { // // for (int j = -yOffset; j <= yOffset; j++) { // // outputValue+= // // } // // // } // // //}
25da1b281b6d78a99202350e7272d66ecb4724d8.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2\imgproc\imgproc.hpp> #include <iostream> #include <stdio.h> #include "HPT.h" #include <vector> using namespace cv; using namespace std; Mat image; int trackbarSize; unsigned char* ImageModified; typedef unsigned char uByte; //kernal takes in two arrays and size __global__ void thresholdKernel(unsigned char* data, unsigned char* data2, int size, int thresholdSlider) { int j = (blockIdx.x *blockDim.x) + threadIdx.x; if (j < size) { if (data[j] > thresholdSlider) { data2[j] = 255; } else { data2[j] = 0; } } } //threshold change in cpu void threshold(int threshold, int width, int height, unsigned char* data); //threshold change in gpu bool initializeImageGPU(int width, int height, Mat image); //creates trackbar for image void on_trackbar(int thresholdSlider, void*); void BoxFilter(uByte* s, uByte* d, int w, int h, uByte* k, int kW, int kH, uByte* Temp); int main(int argc, char** argv) { if (argc != 2) { cout << "Usage: display_image ImageToLoadAndDisplay" << endl; return -1; } image = imread(argv[1], CV_LOAD_IMAGE_COLOR); if (!image.data) { cout << "Could not open or find image" << endl; return -1; } cvtColor(image, image, COLOR_RGB2GRAY); threshold(128, image.rows, image.cols, image.data); if (initializeImageGPU(image.rows, image.cols, image)) { cout << "We worked with the GPU" << endl; } else { cout << "It failed." << endl; } namedWindow("Display Window", WINDOW_NORMAL); //createTrackbar("Threshold", "Display Window", &threshold_slider, THRESHOLD_SLIDER_MAX, on_tracker(int, void *, Image, unsigned char* data2, size,threshold_slider)); imshow("Display Window", image); waitKey(0); return 0; } void threshold(int threshold, int width, int height, unsigned char * data) { HighPrecisionTime timeTheModification; double currentTime; timeTheModification.TimeSinceLastCall(); for (int i = 0; i < height *width; i++) { if (data[i] > threshold) { data[i] = 255; } else { data[i] = 0; } } currentTime = timeTheModification.TimeSinceLastCall(); cout << "CPU Threshold: " << currentTime << endl; } bool initializeImageGPU(int width, int height, Mat image) { HighPrecisionTime timeTheModification; double currentTime; bool temp = true; unsigned char* ImageOriginal = nullptr; ImageModified = nullptr; int size = width*height * sizeof(char); trackbarSize = size; cudaError_t cudaTest; cudaTest = cudaSetDevice(0); if (cudaTest != cudaSuccess) { cout << "Error with device" << endl; } else { //cout << "suscsess" << endl; } cudaTest = cudaMalloc(&ImageOriginal, size); if (cudaTest != cudaSuccess) { cout << "cudaMalloc failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } cudaTest = cudaMalloc(&ImageModified, size); if (cudaTest != cudaSuccess) { cout << "cudaMalloc2 failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } cudaTest = cudaDeviceSynchronize(); if (cudaTest != cudaSuccess) { cout << "cudaSync failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } cudaTest = cudaMemcpy(ImageOriginal, image.data, size, cudaMemcpyHostToDevice); if (cudaTest != cudaSuccess) { cout << "cudacpy failed!" << endl; temp = false; } else { //cout << "suscsess" << endl; } int blocksNeeded = (size + 1023) / 1024; timeTheModification.TimeSinceLastCall(); thresholdKernel << <blocksNeeded, 1024 >> > (ImageOriginal, ImageModified, size, 128); currentTime = timeTheModification.TimeSinceLastCall(); cout << "GPU Threshold: " << currentTime << endl; cudaTest = cudaMemcpy(image.data, ImageModified, size, cudaMemcpyDeviceToHost); if (cudaTest != cudaSuccess) { cout << "cudacpy2 failed!" << endl; temp = false; } int thresholdSlider = 50; namedWindow("Blurred Image", WINDOW_NORMAL); //createTrackbar("Threshold", "BlurredImage", &thresholdSlider, 255, on_trackbar, &image); on_trackbar(thresholdSlider, 0); imshow("BlurredImage", image); waitKey(0); return temp; } void on_trackbar(int thresholdSlider, void*) { uByte*k; uByte* temp; HighPrecisionTime T; double currentTime; trackbarSize = image.cols *image.rows; int blocksNeeded = (trackbarSize + 1023) / 1024; cudaDeviceSynchronize(); T.TimeSinceLastCall(); //thresholdKernel << < blocksNeeded, 1024 >> > (image.data, ImageModified, (image.rows*image.cols), thresholdSlider); BoxFilter(image.data, ImageModified, image.rows, image.cols, k, 3, 3, temp); currentTime = T.TimeSinceLastCall(); cout << "CurrentTime: " << currentTime << endl; if (cudaMemcpy(image.data, ImageModified, trackbarSize, cudaMemcpyDeviceToHost) != cudaSuccess) { cout << "Error copying." << endl; } imshow("Display Window", image); } void BoxFilter(uByte * s, uByte * d, int w, int h, uByte * k, int kW, int kH, uByte * Temp) { float currentPixel = 0.0f; for (int i = 1; i < w - 1; i++) { for (int j = 1; j < h - 1; j++) { //set current pixel //c currentPixel = s[i* j]; //w-1,h+1 currentPixel += s[(i - 1)*(j + 1)]; //h+1, w+1 currentPixel += s[(i + 1) * (j + 1)]; //h+1 currentPixel += s[i * (j + 1)]; //w-1 currentPixel += s[(i - 1) * j]; //w+1 currentPixel += s[(i + 1) * j]; //w-1,h-1 currentPixel += s[(i - 1) * (j - 1)]; //h-1 currentPixel += s[i * (j - 1)]; //w+1,h-1 currentPixel += s[(i + 1) * (j - 1)]; d[i * j] = currentPixel / 9.0f; } } } // // // // float denominator = 0.0f; // vector <int> matrix = { (-(w + 1)), (-w), -(w - 1), -1,0,+1,w - 1,w,w + 1 }; // // for (int i = 0; i < kW*kH; i++) { // denominator += k[i]; // } // // if (denominator == 0.0f) { // denominator += 1.0f; // } // // for (int i = 1; i < h - 1; i++) { // for (int j = 1; j < w - 1; j++) { // // } // } // /*for (int j = 0; j<kernel.cols(); j++) { // // double[] m = kernel.get(i, j); // // for (int k = 0; k<m.length; k++) { // m[k] = m[k] / (kernelSize * kernelSize); // } // kernel.put(i, j, m); // }*/ //} // ////(s.data, d.data,s.cols, s.rows, k,3,3,temp.data) // ////int widthMinus1 = w - 1; // //int size = w*h; // //size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; // //size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; // // //int xOffset = w / 2; // //int yOffset = h / 2; // // //float outputValue; // // //// Checking to see if the indexs are within the bounds of the image and not on edges // //if (xIndex < (w - 1) && xIndex >0 && yIndex < h - 1 && yIndex >0) // //{ // // int xPixel = (xIndex - w / 2 + xIndex + w) % w; // // int yPixel = (yIndex - h / 2 + yIndex + h) % h; // // // for (int i = -xOffset; i <= xOffset; i++) { // // for (int j = -yOffset; j <= yOffset; j++) { // // outputValue+= // // } // // // } // // //}
30702f5cee8f07ccd0a86df2a13304c123784295.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zgesellcmmv.cu, normal z -> s, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_s //#define TEXTURE // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, float alpha, const float * __restrict__ dval, const magma_index_t * __restrict__ dcolind, const magma_index_t * __restrict__ drowptr, const float *__restrict__ dx, float beta, float * __restrict__ dy) { // threads assigned to rows //int Idx = blockDim.x * blockIdx.x + threadIdx.x; //int offset = drowptr[ blockIdx.x ]; //int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; // T threads assigned to each row int idx = threadIdx.x; // local row int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < num_rows ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; float val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * dx [ col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ float read_from_tex( hipTextureObject_t texdx, const int& i) { int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2float(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, hipTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = min( int( sqrt( float( slices ))), 65535 ); int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535); int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 ); int num_tx = blocksize; int Ms = num_threads * sizeof( float ); // special case: alignment 1: if( alignment == 1 ){ Ms = 0; num_tx = 256; int num_blocks = magma_ceildiv( n, 256 ); dimgrid1 = num_blocks; //min( int( sqrt( float( num_blocks ))), 65535 ); dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 ); dimgrid3 = 1; //blocksize = 256; } dim3 block( num_tx, alignment, 1); if( dimgrid3 > 65535 ){ printf("error: too many GPU thread blocks requested.\n"); } dim3 grid( dimgrid1, dimgrid2, 1); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. hipChannelFormatDesc channel_desc; channel_desc = hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned); // Create resource descriptor. struct hipResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = hipResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(float); // Specify texture object parameters. struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.filterMode = hipFilterModePoint; texDesc.readMode = hipReadModeElementType; // Create texture object. hipTextureObject_t texdx = 0; hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); if ( alignment == 1) { if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid2), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } hipDestroyTextureObject(texdx); #else if ( alignment == 1) { if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_S_ZERO) { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() , m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } #endif return MAGMA_SUCCESS; }
30702f5cee8f07ccd0a86df2a13304c123784295.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zgesellcmmv.cu, normal z -> s, Wed Jan 2 14:18:53 2019 */ #include "magmasparse_internal.h" #define PRECISION_s //#define TEXTURE // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning one thread to each row - 1D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_1( int num_rows, int num_cols, int blocksize, int T, float alpha, const float * __restrict__ dval, const magma_index_t * __restrict__ dcolind, const magma_index_t * __restrict__ drowptr, const float *__restrict__ dx, float beta, float * __restrict__ dy) { // threads assigned to rows //int Idx = blockDim.x * blockIdx.x + threadIdx.x; //int offset = drowptr[ blockIdx.x ]; //int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize; // T threads assigned to each row int idx = threadIdx.x; // local row int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * 256 + idx; // global row index // int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) ); int lrow = threadIdx.x%blocksize; // local row; if( row < num_rows ) { int offset = drowptr[ row/blocksize ]; int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize; float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < border; n++) { int col = dcolind [ offset+ blocksize * n + lrow ]; float val = dval[ offset+ blocksize * n + lrow ]; dot = dot + val * dx [ col ]; } if (betazero) { dy[ row ] = dot * alpha; } else { dy[ row ] = dot * alpha + beta * dy [ row ]; } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = dx[ i1 ]; x2 = dx[ i2 ]; v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = dx[ dcolind[ block*kk] ]; v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, float * dx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * dx[ col ]; } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } /************************* same but using texture mem *************************/ #if defined(PRECISION_d) && defined(TEXTURE) __inline__ __device__ float read_from_tex( cudaTextureObject_t texdx, const int& i) { int2 temp = tex1Dfetch<int2>( texdx, i ); return __hiloint2float(temp.y,temp.x); } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_4_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 2 ) { shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_8_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles int kk, i1, i2; float x1, x2, v1, v2; dcolind += offset + ldx; dval += offset + ldx; for ( kk = 0; kk < max_-1; kk+=2 ) { i1 = dcolind[ block*kk]; i2 = dcolind[ block*kk + block]; x1 = read_from_tex( texdx, i1 ); x2 = read_from_tex( texdx, i2 ); v1 = dval[ block*kk ]; v2 = dval[ block*kk + block]; dot += v1 * x1; dot += v2 * x2; } if (kk<max_) { x1 = read_from_tex( texdx, dcolind[ block*kk] ); v1 = dval[ block*kk ]; dot += v1 * x1; } shared[ldx] = dot; __syncthreads(); if( idx < 4 ) { shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_16_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 8 ) { shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } // SELLP SpMV kernel // see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP // A UNIFIED SPARSE MATRIX DATA FORMAT // FOR MODERN PROCESSORS WITH WIDE SIMD UNITS // SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel template<bool betazero> __global__ void zgesellptmv2d_kernel_32_tex( int num_rows, int num_cols, int blocksize, int T, float alpha, float * dval, magma_index_t * dcolind, magma_index_t * drowptr, cudaTextureObject_t texdx, float beta, float * dy) { // T threads assigned to each row int idx = threadIdx.y; // thread in row int idy = threadIdx.x; // local row int ldx = idx * blocksize + idy; int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index int row = bdx * blocksize + idy; // global row index extern __shared__ float shared[]; if(row < num_rows ) { float dot = MAGMA_S_MAKE(0.0, 0.0); int offset = drowptr[ bdx ]; int block = blocksize * T; // total number of threads int max_ = (drowptr[ bdx+1 ]-offset)/block; // number of elements each thread handles for ( int k = 0; k < max_; k++ ) { float val = dval[ offset + ldx + block*k ]; int col = dcolind[ offset + ldx + block*k ]; dot += val * read_from_tex( texdx, col ); } shared[ldx] = dot; __syncthreads(); if( idx < 16 ) { shared[ldx]+=shared[ldx+blocksize*16]; __syncthreads(); if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8]; __syncthreads(); if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4]; __syncthreads(); if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2]; __syncthreads(); if( idx == 0 ) { if (betazero) { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha; } else { dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row]; } } } } } #endif /********************* end of texture versions **************************/ /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is SELLP. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] blocksize magma_int_t number of rows in one ELL-slice @param[in] slices magma_int_t number of slices in matrix @param[in] alignment magma_int_t number of threads assigned to one row @param[in] alpha float scalar multiplier @param[in] dval magmaFloat_ptr array containing values of A in SELLP @param[in] dcolind magmaIndex_ptr columnindices of A in SELLP @param[in] drowptr magmaIndex_ptr rowpointer of SELLP @param[in] dx magmaFloat_ptr input vector x @param[in] beta float scalar multiplier @param[out] dy magmaFloat_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgesellpmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t blocksize, magma_int_t slices, magma_int_t alignment, float alpha, magmaFloat_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowptr, magmaFloat_ptr dx, float beta, magmaFloat_ptr dy, magma_queue_t queue ) { // using a 2D thread grid int num_threads = blocksize*alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = min( int( sqrt( float( slices ))), 65535 ); int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535); int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 ); int num_tx = blocksize; int Ms = num_threads * sizeof( float ); // special case: alignment 1: if( alignment == 1 ){ Ms = 0; num_tx = 256; int num_blocks = magma_ceildiv( n, 256 ); dimgrid1 = num_blocks; //min( int( sqrt( float( num_blocks ))), 65535 ); dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 ); dimgrid3 = 1; //blocksize = 256; } dim3 block( num_tx, alignment, 1); if( dimgrid3 > 65535 ){ printf("error: too many GPU thread blocks requested.\n"); } dim3 grid( dimgrid1, dimgrid2, 1); #if defined(PRECISION_d) && defined(TEXTURE) // Create channel. cudaChannelFormatDesc channel_desc; channel_desc = cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned); // Create resource descriptor. struct cudaResourceDesc resDescdx; memset(&resDescdx, 0, sizeof(resDescdx)); resDescdx.resType = cudaResourceTypeLinear; resDescdx.res.linear.devPtr = (void*)dx; resDescdx.res.linear.desc = channel_desc; resDescdx.res.linear.sizeInBytes = m*sizeof(float); // Specify texture object parameters. struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModePoint; texDesc.readMode = cudaReadModeElementType; // Create texture object. cudaTextureObject_t texdx = 0; cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); if ( alignment == 1) { if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_1<true><<< grid2, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_4_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_4_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_8_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_8_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_16_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_16_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_32_tex<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } else { zgesellptmv2d_kernel_32_tex<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, texdx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } cudaDestroyTextureObject(texdx); #else if ( alignment == 1) { if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_1<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 4){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_4<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_4<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 8){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_8<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_8<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 16){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_16<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_16<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else if ( alignment == 32){ if (beta == MAGMA_S_ZERO) { zgesellptmv2d_kernel_32<true><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } else { zgesellptmv2d_kernel_32<false><<< grid, block, Ms, queue->cuda_stream() >>> ( m, n, blocksize, alignment, alpha, dval, dcolind, drowptr, dx, beta, dy ); } } else { printf("error: alignment %d not supported.\n", int(alignment) ); return MAGMA_ERR_NOT_SUPPORTED; } #endif return MAGMA_SUCCESS; }
78202e48770297921a7c67304d1422c3d59e0778.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------- // INCLUDES //---------------------- #include "dct8x8.cuh" #include "math.h" #include "device_launch_parameters.h" //---------------------- // IMPLEMENTATION //---------------------- //********************************* // Alpha for computing forward 2Dct //********************************* __device__ float alpha(int i) { if (i == 0) return sqrt(0.125); else return sqrt(0.25); } //*********************************************************** // Compute DCT coef of value (u,v) in matrix // @param u : row index // @param v : col index // @param aMatrix : source matrix // @param offset : offset of first element // @return : dct coeff for //*********************************************************** __device__ float computeDCTCoef(int u, int v, float* aMatrix, int offset) { float res = alpha(u)*alpha(v); float tmp = 0.0f; for (int i = 0; i < ROW_NUMBER; ++i) { for (int j = 0; j < COL_NUMBER; ++j) { tmp += cosf(ROW_COEF*u*(2 * i + 1))*cosf(COL_COEF*v*(2 * j + 1))*aMatrix[i * ROW_NUMBER + j]; } } return res*tmp; } //************************************************************* // Compute complete DCT of a 8x8 matrix // See header file for details //************************************************************* __global__ void computeDCT2(float* srcMatrixes, float* dstMatrixes, int numberOfMatrixes) { // each thread compute dct for a row int threadX = threadIdx.x; if (threadX < numberOfMatrixes*ROW_NUMBER) { int offset = (threadX/ROW_NUMBER)*DCT_MATRIX_SIZE; int startIndex = threadX*ROW_NUMBER; int u = (startIndex - offset)/ROW_NUMBER; for (int v = 0; v < COL_NUMBER; ++v) { dstMatrixes[startIndex] = computeDCTCoef(u, v, &srcMatrixes[offset], 0); ++startIndex; } } } //******************************************************************* // Wrapper for calling kernel from Cpp source file // See header file for details //******************************************************************* hipError_t wrapperDCT2(float* srcMatrixes, float* dstMatrixes, int numberOfMatrixes) { hipError_t ret = hipSuccess; // allocate data on device float* devSrcMatrixes; float* devDstMatrixes; size_t matrixesSize = numberOfMatrixes*DCT_MATRIX_SIZE*sizeof(float); ret = hipMalloc(&devSrcMatrixes, matrixesSize); if (ret != hipSuccess) return ret; ret = hipMalloc(&devDstMatrixes, matrixesSize); if (ret != hipSuccess) return ret; // copy source matrixes on device ret = hipMemcpy(devSrcMatrixes, srcMatrixes, matrixesSize, hipMemcpyHostToDevice); if (ret != hipSuccess) return ret; // run kernel computeDCT2 << <1, DCT_KERNEL_THREADS >> >(devSrcMatrixes, devDstMatrixes, numberOfMatrixes); // copy destination matrixes back on host ret = hipMemcpy(dstMatrixes, devDstMatrixes, matrixesSize, hipMemcpyDeviceToHost); if (ret != hipSuccess) return ret; hipFree(devSrcMatrixes); hipFree(devDstMatrixes); return ret; }
78202e48770297921a7c67304d1422c3d59e0778.cu
//---------------------- // INCLUDES //---------------------- #include "dct8x8.cuh" #include "math.h" #include "device_launch_parameters.h" //---------------------- // IMPLEMENTATION //---------------------- //********************************* // Alpha for computing forward 2Dct //********************************* __device__ float alpha(int i) { if (i == 0) return sqrt(0.125); else return sqrt(0.25); } //*********************************************************** // Compute DCT coef of value (u,v) in matrix // @param u : row index // @param v : col index // @param aMatrix : source matrix // @param offset : offset of first element // @return : dct coeff for //*********************************************************** __device__ float computeDCTCoef(int u, int v, float* aMatrix, int offset) { float res = alpha(u)*alpha(v); float tmp = 0.0f; for (int i = 0; i < ROW_NUMBER; ++i) { for (int j = 0; j < COL_NUMBER; ++j) { tmp += cosf(ROW_COEF*u*(2 * i + 1))*cosf(COL_COEF*v*(2 * j + 1))*aMatrix[i * ROW_NUMBER + j]; } } return res*tmp; } //************************************************************* // Compute complete DCT of a 8x8 matrix // See header file for details //************************************************************* __global__ void computeDCT2(float* srcMatrixes, float* dstMatrixes, int numberOfMatrixes) { // each thread compute dct for a row int threadX = threadIdx.x; if (threadX < numberOfMatrixes*ROW_NUMBER) { int offset = (threadX/ROW_NUMBER)*DCT_MATRIX_SIZE; int startIndex = threadX*ROW_NUMBER; int u = (startIndex - offset)/ROW_NUMBER; for (int v = 0; v < COL_NUMBER; ++v) { dstMatrixes[startIndex] = computeDCTCoef(u, v, &srcMatrixes[offset], 0); ++startIndex; } } } //******************************************************************* // Wrapper for calling kernel from Cpp source file // See header file for details //******************************************************************* cudaError_t wrapperDCT2(float* srcMatrixes, float* dstMatrixes, int numberOfMatrixes) { cudaError_t ret = cudaSuccess; // allocate data on device float* devSrcMatrixes; float* devDstMatrixes; size_t matrixesSize = numberOfMatrixes*DCT_MATRIX_SIZE*sizeof(float); ret = cudaMalloc(&devSrcMatrixes, matrixesSize); if (ret != cudaSuccess) return ret; ret = cudaMalloc(&devDstMatrixes, matrixesSize); if (ret != cudaSuccess) return ret; // copy source matrixes on device ret = cudaMemcpy(devSrcMatrixes, srcMatrixes, matrixesSize, cudaMemcpyHostToDevice); if (ret != cudaSuccess) return ret; // run kernel computeDCT2 << <1, DCT_KERNEL_THREADS >> >(devSrcMatrixes, devDstMatrixes, numberOfMatrixes); // copy destination matrixes back on host ret = cudaMemcpy(dstMatrixes, devDstMatrixes, matrixesSize, cudaMemcpyDeviceToHost); if (ret != cudaSuccess) return ret; cudaFree(devSrcMatrixes); cudaFree(devDstMatrixes); return ret; }
2d8426d17c9e067659b6c250f5ffa286ccaf9664.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "decryptKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *deviceDataIn = NULL; hipMalloc(&deviceDataIn, XSIZE*YSIZE); char *deviceDataOut = NULL; hipMalloc(&deviceDataOut, XSIZE*YSIZE); int n = XSIZE*YSIZE; char *key = NULL; hipMalloc(&key, XSIZE*YSIZE); int keySize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( decryptKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceDataIn,deviceDataOut,n,key,keySize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( decryptKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceDataIn,deviceDataOut,n,key,keySize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( decryptKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, deviceDataIn,deviceDataOut,n,key,keySize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2d8426d17c9e067659b6c250f5ffa286ccaf9664.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "decryptKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; char *deviceDataIn = NULL; cudaMalloc(&deviceDataIn, XSIZE*YSIZE); char *deviceDataOut = NULL; cudaMalloc(&deviceDataOut, XSIZE*YSIZE); int n = XSIZE*YSIZE; char *key = NULL; cudaMalloc(&key, XSIZE*YSIZE); int keySize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); decryptKernel<<<gridBlock,threadBlock>>>(deviceDataIn,deviceDataOut,n,key,keySize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { decryptKernel<<<gridBlock,threadBlock>>>(deviceDataIn,deviceDataOut,n,key,keySize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { decryptKernel<<<gridBlock,threadBlock>>>(deviceDataIn,deviceDataOut,n,key,keySize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3fbeae0ab893cdf3a536739fc0ee7fd27630312d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3fbeae0ab893cdf3a536739fc0ee7fd27630312d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixAdd<<<gridBlock,threadBlock>>>(A,B,C,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixAdd<<<gridBlock,threadBlock>>>(A,B,C,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixAdd<<<gridBlock,threadBlock>>>(A,B,C,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
879cb5efefa2b92b3c918ce6dde62e0819e9aa45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "dropout_layer.hpp" #include "math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); caffe_copy(count, bottom_data, top_data); } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
879cb5efefa2b92b3c918ce6dde62e0819e9aa45.cu
#include <vector> #include "dropout_layer.hpp" #include "math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); caffe_copy(count, bottom_data, top_data); } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
a25eb5f7adaec7ba1653a9fd43da6d86c1af459c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "TestKernel.cuh" #include<stdio.h> __global__ void test_mykernel2(int x) { printf("x=%d\n", x); } /* // will cause error __global__ void test_mykernel1(int& x) { x++; printf("x=%d\n", x); } */ template<class T> __global__ void test_mykernel(const T& func, int x) { int y = func(x); printf("y=%d\n", y); }
a25eb5f7adaec7ba1653a9fd43da6d86c1af459c.cu
#include "TestKernel.cuh" #include<stdio.h> __global__ void test_mykernel2(int x) { printf("x=%d\n", x); } /* // will cause error __global__ void test_mykernel1(int& x) { x++; printf("x=%d\n", x); } */ template<class T> __global__ void test_mykernel(const T& func, int x) { int y = func(x); printf("y=%d\n", y); }
a2f31e6b3d388962a66020d3df0c4875fd236949.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ hipComplex helvp(hipComplex z) { hipComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ hipComplex lanna(hipComplex z) { hipComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ hipComplex harva(hipComplex z) { hipComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ hipComplex herve(hipComplex z) { hipComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ hipComplex alver(hipComplex z) { hipComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex alvir(hipComplex z) { hipComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ hipComplex hexva(int m, hipComplex z) { hipComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex halvi(hipComplex z) { hipComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ hipComplex ahilv(hipComplex z) { hipComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ hipComplex halva(hipComplex z) { hipComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ hipComplex aciwa(hipComplex z) { hipComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex aliva(hipComplex z) { hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ hipComplex ariva(hipComplex z) { hipComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thy(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex origo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale =20; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex tin(1/8.0,1/1024.0); hipComplex aon = expc(tin*ai*moux); hipComplex uon= expc(tin*mouy); hipComplex flurn(0.0,0.0); hipComplex accume(0.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex vue = cue; hipComplex rhuva(3.0,0.0); hipComplex rarva(3.0,0.0); hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<5;v++) { cue = cue - aon*alvir(uon*hilva(cue))-uon*hilva(aon*alvir(cue)); } /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
a2f31e6b3d388962a66020d3df0c4875fd236949.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } /* derivative of helva, from Mathematica */ __device__ cuComplex helvp(cuComplex z) { cuComplex out(jnf(2,z.r),jnf(1,z.i)); return out; } __device__ cuComplex lanna(cuComplex z) { cuComplex out(j1f(z.r/j0f(z.i)),j1f(z.i/j1f(z.r))); return out; } __device__ cuComplex harva(cuComplex z) { cuComplex out(jnf(floor(z.i),z.r),jnf(ceil(z.r),z.i)); return out; } __device__ cuComplex herve(cuComplex z) { cuComplex out(jnf(floor(z.r-z.i),z.i),jnf(ceil(z.r+z.i),z.r)); return out; } __device__ cuComplex alver(cuComplex z) { cuComplex out(1.0/j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex alvir(cuComplex z) { cuComplex out(j0f(z.r),1.0/j1f(z.i)); return out; } __device__ cuComplex hexva(int m, cuComplex z) { cuComplex out(jnf(m,z.r),jnf(m,z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex halvi(cuComplex z) { cuComplex out(j1f(z.r),-j0f(z.i)); return out; } __device__ cuComplex ahilv(cuComplex z) { cuComplex out(1.0/j1f(z.r),1.0/j0f(z.i)); return out; } __device__ cuComplex halva(cuComplex z) { cuComplex out(j0f(z.r),j0f(z.i)); return out; } __device__ cuComplex aciwa(cuComplex z) { cuComplex out(j0f(j1f(z.r)),j1f(j0f(z.i))); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex aliva(cuComplex z) { cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i)); return out; } __device__ cuComplex ariva(cuComplex z) { cuComplex out(sinf(z.i),cbrtf(z.r)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * harva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * helva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thy(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * conj(qoo/q * tw*hinva(z)) +hilva( qoo*qoo/(q*q))); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex origo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * powc(alvir(q*z),alvir(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale =20; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex tin(1/8.0,1/1024.0); cuComplex aon = expc(tin*ai*moux); cuComplex uon= expc(tin*mouy); cuComplex flurn(0.0,0.0); cuComplex accume(0.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex vue = cue; cuComplex rhuva(3.0,0.0); cuComplex rarva(3.0,0.0); cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ // almost Klein's j-invariant //cue = (powc(powc(arago(flurn,q*aon),rarva)+ powc(the2(flurn,q),rarva) + powc(the4(flurn,q),rarva),rhuva))/powc(the4(flurn,q)*the3(flurn,q)*the2(flurn,q),rarva); for(v=0;v<5;v++) { cue = cue - aon*alvir(uon*hilva(cue))-uon*hilva(aon*alvir(cue)); } /*cue =cue - powc(conj(cue),conj(cue-aon*conj(cue)))-powc(conj(cue),conj(cue-uon*conj(cue)));*/ double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
bc441cea770192a9de92366620b3b0460f831d15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<cuda_runtime.h> #include "sparse_matrix.h" #include "helper.h" #define ll long long int #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) ll *MAJOR_SORTING_INDEX, *MINOR_SORTING_INDEX; int cmp(const void *l, const void *r); void permuteMatrix(sparse_matrix_t* A, ll permute[]); __global__ void copy_matrix_kernel(ll *A_rows, ll *A_cols, float *A_vals, ll *s_rows, ll *s_cols, float *s_vals, ll *stInd, ll *cum, ll colL, int base){ ll totalCols, index; totalCols = (blockIdx.x > 0) ? cum[blockIdx.x] - cum[blockIdx.x - 1] : cum[blockIdx.x]; index = (blockIdx.x > 0) ? cum[blockIdx.x - 1] : 0; if(threadIdx.x == 0) s_rows[blockIdx.x] = index; if(threadIdx.x < totalCols){ s_cols[index + threadIdx.x] = A_cols[stInd[blockIdx.x] + threadIdx.x] - colL + base; s_vals[index + threadIdx.x] = A_vals[stInd[blockIdx.x] + threadIdx.x]; } } void getSubMatrix(sparse_matrix_t* A, sparse_matrix_t *sub, sparse_matrix_t *d_A, sparse_matrix_t *d_sub, ll rowL, ll rowR, ll colL, ll colR, int cpy2cpu){ ll *stInd, *cum, *d_stInd, *d_cum; ll l, r, maxCols = 0; d_sub->n = rowR - rowL + 1; d_sub->m = colR - colL + 1; d_sub->memtype = d_A->memtype; d_sub->descr = A->descr; d_sub->order = A->order; d_sub->index = A->index; stInd = (ll *)malloc(d_sub->n * sizeof(ll)); cum = (ll *)malloc(d_sub->n * sizeof(ll)); for(int i=rowL;i<=rowR;i++){ l = lowerBound(A->cols, A->rows[i - A->index], A->rows[i - A->index + 1] - 1, colL); r = upperBound(A->cols, A->rows[i - A->index], A->rows[i - A->index + 1] - 1, colR); stInd[i-rowL] = l; cum[i-rowL] = (i > rowL) ? cum[i-rowL-1] + MAX(r-l, 0) : MAX(r-l, 0); maxCols = MAX(r-l, maxCols); } d_sub->nnz = cum[rowR - rowL]; sparseMatrixMalloc(d_sub); hipMalloc((void **)&(d_stInd), d_sub->n * sizeof(ll)); hipMalloc((void **)&(d_cum), d_sub->n * sizeof(ll)); hipMemcpy(d_stInd, stInd, d_sub->n * sizeof(ll), hipMemcpyHostToDevice); hipMemcpy(d_cum, cum, d_sub->n * sizeof(ll), hipMemcpyHostToDevice); hipLaunchKernelGGL(( copy_matrix_kernel), dim3(d_sub->n), dim3(maxCols), 0, 0, d_A->rows, d_A->cols, d_A->vals, d_sub->rows, d_sub->cols, d_sub->vals, d_stInd, d_cum, colL, A->index); if(cpy2cpu) sparseMatrixCopy(d_sub, sub, CPU); } void sparseMatrixCopy(sparse_matrix_t* A, sparse_matrix_t* B, enum memory_type mem){ B->n = A->n; B->m = A->m; B->nnz = A->nnz; B->descr = A->descr; B->order = A->order; B->index = A->index; B->memtype = mem; sparseMatrixMalloc(B); if(A->memtype == CPU && B->memtype == GPU){ if(A->descr == CSR) hipMemcpy(B->rows, A->rows, (A->n+1) * sizeof(ll), hipMemcpyHostToDevice); else hipMemcpy(B->rows, A->rows, A->nnz * sizeof(ll), hipMemcpyHostToDevice); hipMemcpy(B->cols, A->cols, A->nnz * sizeof(ll), hipMemcpyHostToDevice); hipMemcpy(B->vals, A->vals, A->nnz * sizeof(float), hipMemcpyHostToDevice); } else if(A->memtype == GPU && B->memtype == CPU){ if(A->descr == CSR) hipMemcpy(B->rows, A->rows, (A->n+1) * sizeof(ll), hipMemcpyDeviceToHost); else hipMemcpy(B->rows, A->rows, A->nnz * sizeof(ll), hipMemcpyDeviceToHost); hipMemcpy(B->cols, A->cols, A->nnz * sizeof(ll), hipMemcpyDeviceToHost); hipMemcpy(B->vals, A->vals, A->nnz * sizeof(float), hipMemcpyDeviceToHost); } } void sparseMatrixMalloc(sparse_matrix_t* A){ if(A->memtype == CPU){ if(A->descr == COO) A->rows = (ll *)malloc(A->nnz * sizeof(ll)); else A->rows = (ll *)malloc((A->n+1) * sizeof(ll)); A->cols = (ll *)malloc(A->nnz * sizeof(ll)); A->vals = (float *)malloc(A->nnz * sizeof(float)); } else{ if(A->descr == COO) hipMalloc((void **)&(A->rows), A->nnz * sizeof(ll)); else hipMalloc((void **)&(A->rows), (A->n+1) * sizeof(ll)); hipMalloc((void **)&(A->cols), A->nnz * sizeof(ll)); hipMalloc((void **)&(A->vals), A->nnz * sizeof(float)); } } void sparseMatrixFree(sparse_matrix_t* A){ if(A->memtype == CPU){ free(A->rows); free(A->cols); free(A->vals); } else{ hipFree(A->rows); hipFree(A->cols); hipFree(A->vals); } } void coo2csr(sparse_matrix_t* A){ ll *permute, *rowIndex, row; if(A->order == COLUMN_MAJOR){ MAJOR_SORTING_INDEX = A->rows; MINOR_SORTING_INDEX = A->cols; permute = (ll *)malloc(A->nnz * sizeof(ll)); for(ll i=0;i<A->nnz;i++) permute[i] = i; qsort(permute, A->nnz, sizeof(ll), cmp); permuteMatrix(A, permute); } A->descr = CSR; A->order = ROW_MAJOR; rowIndex = (ll *)malloc((A->n + 1) * sizeof(ll)); rowIndex[0] = 0; row = (ll)A->index; for(ll i=0;i<A->nnz;i++){ if(A->rows[i] != row){ while(row != A->rows[i]) rowIndex[(++row) - A->index] = i; } } rowIndex[A->n] = A->nnz; free(A->rows); A->rows = rowIndex; } void printMatrix(sparse_matrix_t* A){ if(A->descr == COO) for(ll i=0;i<A->nnz;i++) printf("%lld %lld %f\n", A->rows[i], A->cols[i], A->vals[i]); else{ ll row = A->index, ind = 1; for(ll i=0;i<A->nnz;i++){ while(i == A->rows[ind]){ ind++; row++; } printf("%lld %lld %f\n", row, A->cols[i], A->vals[i]); } } } int cmp(const void *l, const void *r){ ll lind, rind; lind = *(ll *)l; rind = *(ll *)r; if(MAJOR_SORTING_INDEX[lind] < MAJOR_SORTING_INDEX[rind]) return -1; else if(MAJOR_SORTING_INDEX[lind] == MAJOR_SORTING_INDEX[rind]) return (MINOR_SORTING_INDEX[lind] < MINOR_SORTING_INDEX[rind]) ? -1 : 1; return 1; } void permuteMatrix(sparse_matrix_t* A, ll *permute){ ll *permutedRows, *permutedCols; float *permutedVals; permutedVals = (float *)malloc(A->nnz * sizeof(float)); permutedRows = (ll *)malloc(A->nnz * sizeof(ll)); permutedCols = (ll *)malloc(A->nnz * sizeof(ll)); for(ll i=0;i<A->nnz;i++){ permutedRows[i] = A->rows[permute[i]]; permutedCols[i] = A->cols[permute[i]]; permutedVals[i] = A->vals[permute[i]]; } for(ll i=0;i<A->nnz;i++){ A->rows[i] = permutedRows[i]; A->cols[i] = permutedCols[i]; A->vals[i] = permutedVals[i]; } free(permutedCols); free(permutedRows); free(permutedVals); }
bc441cea770192a9de92366620b3b0460f831d15.cu
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #include<cuda_runtime.h> #include "sparse_matrix.h" #include "helper.h" #define ll long long int #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) ll *MAJOR_SORTING_INDEX, *MINOR_SORTING_INDEX; int cmp(const void *l, const void *r); void permuteMatrix(sparse_matrix_t* A, ll permute[]); __global__ void copy_matrix_kernel(ll *A_rows, ll *A_cols, float *A_vals, ll *s_rows, ll *s_cols, float *s_vals, ll *stInd, ll *cum, ll colL, int base){ ll totalCols, index; totalCols = (blockIdx.x > 0) ? cum[blockIdx.x] - cum[blockIdx.x - 1] : cum[blockIdx.x]; index = (blockIdx.x > 0) ? cum[blockIdx.x - 1] : 0; if(threadIdx.x == 0) s_rows[blockIdx.x] = index; if(threadIdx.x < totalCols){ s_cols[index + threadIdx.x] = A_cols[stInd[blockIdx.x] + threadIdx.x] - colL + base; s_vals[index + threadIdx.x] = A_vals[stInd[blockIdx.x] + threadIdx.x]; } } void getSubMatrix(sparse_matrix_t* A, sparse_matrix_t *sub, sparse_matrix_t *d_A, sparse_matrix_t *d_sub, ll rowL, ll rowR, ll colL, ll colR, int cpy2cpu){ ll *stInd, *cum, *d_stInd, *d_cum; ll l, r, maxCols = 0; d_sub->n = rowR - rowL + 1; d_sub->m = colR - colL + 1; d_sub->memtype = d_A->memtype; d_sub->descr = A->descr; d_sub->order = A->order; d_sub->index = A->index; stInd = (ll *)malloc(d_sub->n * sizeof(ll)); cum = (ll *)malloc(d_sub->n * sizeof(ll)); for(int i=rowL;i<=rowR;i++){ l = lowerBound(A->cols, A->rows[i - A->index], A->rows[i - A->index + 1] - 1, colL); r = upperBound(A->cols, A->rows[i - A->index], A->rows[i - A->index + 1] - 1, colR); stInd[i-rowL] = l; cum[i-rowL] = (i > rowL) ? cum[i-rowL-1] + MAX(r-l, 0) : MAX(r-l, 0); maxCols = MAX(r-l, maxCols); } d_sub->nnz = cum[rowR - rowL]; sparseMatrixMalloc(d_sub); cudaMalloc((void **)&(d_stInd), d_sub->n * sizeof(ll)); cudaMalloc((void **)&(d_cum), d_sub->n * sizeof(ll)); cudaMemcpy(d_stInd, stInd, d_sub->n * sizeof(ll), cudaMemcpyHostToDevice); cudaMemcpy(d_cum, cum, d_sub->n * sizeof(ll), cudaMemcpyHostToDevice); copy_matrix_kernel<<<d_sub->n, maxCols>>>(d_A->rows, d_A->cols, d_A->vals, d_sub->rows, d_sub->cols, d_sub->vals, d_stInd, d_cum, colL, A->index); if(cpy2cpu) sparseMatrixCopy(d_sub, sub, CPU); } void sparseMatrixCopy(sparse_matrix_t* A, sparse_matrix_t* B, enum memory_type mem){ B->n = A->n; B->m = A->m; B->nnz = A->nnz; B->descr = A->descr; B->order = A->order; B->index = A->index; B->memtype = mem; sparseMatrixMalloc(B); if(A->memtype == CPU && B->memtype == GPU){ if(A->descr == CSR) cudaMemcpy(B->rows, A->rows, (A->n+1) * sizeof(ll), cudaMemcpyHostToDevice); else cudaMemcpy(B->rows, A->rows, A->nnz * sizeof(ll), cudaMemcpyHostToDevice); cudaMemcpy(B->cols, A->cols, A->nnz * sizeof(ll), cudaMemcpyHostToDevice); cudaMemcpy(B->vals, A->vals, A->nnz * sizeof(float), cudaMemcpyHostToDevice); } else if(A->memtype == GPU && B->memtype == CPU){ if(A->descr == CSR) cudaMemcpy(B->rows, A->rows, (A->n+1) * sizeof(ll), cudaMemcpyDeviceToHost); else cudaMemcpy(B->rows, A->rows, A->nnz * sizeof(ll), cudaMemcpyDeviceToHost); cudaMemcpy(B->cols, A->cols, A->nnz * sizeof(ll), cudaMemcpyDeviceToHost); cudaMemcpy(B->vals, A->vals, A->nnz * sizeof(float), cudaMemcpyDeviceToHost); } } void sparseMatrixMalloc(sparse_matrix_t* A){ if(A->memtype == CPU){ if(A->descr == COO) A->rows = (ll *)malloc(A->nnz * sizeof(ll)); else A->rows = (ll *)malloc((A->n+1) * sizeof(ll)); A->cols = (ll *)malloc(A->nnz * sizeof(ll)); A->vals = (float *)malloc(A->nnz * sizeof(float)); } else{ if(A->descr == COO) cudaMalloc((void **)&(A->rows), A->nnz * sizeof(ll)); else cudaMalloc((void **)&(A->rows), (A->n+1) * sizeof(ll)); cudaMalloc((void **)&(A->cols), A->nnz * sizeof(ll)); cudaMalloc((void **)&(A->vals), A->nnz * sizeof(float)); } } void sparseMatrixFree(sparse_matrix_t* A){ if(A->memtype == CPU){ free(A->rows); free(A->cols); free(A->vals); } else{ cudaFree(A->rows); cudaFree(A->cols); cudaFree(A->vals); } } void coo2csr(sparse_matrix_t* A){ ll *permute, *rowIndex, row; if(A->order == COLUMN_MAJOR){ MAJOR_SORTING_INDEX = A->rows; MINOR_SORTING_INDEX = A->cols; permute = (ll *)malloc(A->nnz * sizeof(ll)); for(ll i=0;i<A->nnz;i++) permute[i] = i; qsort(permute, A->nnz, sizeof(ll), cmp); permuteMatrix(A, permute); } A->descr = CSR; A->order = ROW_MAJOR; rowIndex = (ll *)malloc((A->n + 1) * sizeof(ll)); rowIndex[0] = 0; row = (ll)A->index; for(ll i=0;i<A->nnz;i++){ if(A->rows[i] != row){ while(row != A->rows[i]) rowIndex[(++row) - A->index] = i; } } rowIndex[A->n] = A->nnz; free(A->rows); A->rows = rowIndex; } void printMatrix(sparse_matrix_t* A){ if(A->descr == COO) for(ll i=0;i<A->nnz;i++) printf("%lld %lld %f\n", A->rows[i], A->cols[i], A->vals[i]); else{ ll row = A->index, ind = 1; for(ll i=0;i<A->nnz;i++){ while(i == A->rows[ind]){ ind++; row++; } printf("%lld %lld %f\n", row, A->cols[i], A->vals[i]); } } } int cmp(const void *l, const void *r){ ll lind, rind; lind = *(ll *)l; rind = *(ll *)r; if(MAJOR_SORTING_INDEX[lind] < MAJOR_SORTING_INDEX[rind]) return -1; else if(MAJOR_SORTING_INDEX[lind] == MAJOR_SORTING_INDEX[rind]) return (MINOR_SORTING_INDEX[lind] < MINOR_SORTING_INDEX[rind]) ? -1 : 1; return 1; } void permuteMatrix(sparse_matrix_t* A, ll *permute){ ll *permutedRows, *permutedCols; float *permutedVals; permutedVals = (float *)malloc(A->nnz * sizeof(float)); permutedRows = (ll *)malloc(A->nnz * sizeof(ll)); permutedCols = (ll *)malloc(A->nnz * sizeof(ll)); for(ll i=0;i<A->nnz;i++){ permutedRows[i] = A->rows[permute[i]]; permutedCols[i] = A->cols[permute[i]]; permutedVals[i] = A->vals[permute[i]]; } for(ll i=0;i<A->nnz;i++){ A->rows[i] = permutedRows[i]; A->cols[i] = permutedCols[i]; A->vals[i] = permutedVals[i]; } free(permutedCols); free(permutedRows); free(permutedVals); }
7892c80d00b532e437c1abfe8cee352600831cfe.hip
// !!! This is a file automatically generated by hipify!!! #include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <hip/hip_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/creation.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct CopyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType a, CudaType& out) { out = a; } }; class CudaCopyOp : public CopyOp { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CopyImpl<T>{}, a, out); }); } }; CHAINERX_REGISTER_OP_CUDA(CopyOp, CudaCopyOp); template <typename InT, typename OutT> struct AsTypeImpl { using InCudaType = cuda_internal::DataType<InT>; using OutCudaType = cuda_internal::DataType<OutT>; __device__ void operator()(int64_t /*i*/, InCudaType a, OutCudaType& out) { out = static_cast<OutCudaType>(a); } }; } // namespace void CudaDevice::AsType(const Array& a, const Array& out) { CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{index()}; auto do_astype = [&](auto in_pt, auto out_pt) { using InT = typename decltype(in_pt)::type; using OutT = typename decltype(out_pt)::type; Elementwise<const InT, OutT>(AsTypeImpl<InT, OutT>{}, a, out); }; VisitDtype(out.dtype(), [&](auto out_pt) { VisitDtype(a.dtype(), do_astype, out_pt); }); } } // namespace cuda } // namespace chainerx
7892c80d00b532e437c1abfe8cee352600831cfe.cu
#include "chainerx/cuda/cuda_device.h" #include <cstdint> #include <cuda_runtime.h> #include "chainerx/array.h" #include "chainerx/cuda/cuda_runtime.h" #include "chainerx/cuda/cuda_set_device_scope.h" #include "chainerx/cuda/elementwise.cuh" #include "chainerx/cuda/op_regist.h" #include "chainerx/device.h" #include "chainerx/dtype.h" #include "chainerx/routines/creation.h" namespace chainerx { namespace cuda { namespace { template <typename T> struct CopyImpl { using CudaType = cuda_internal::DataType<T>; __device__ void operator()(int64_t /*i*/, CudaType a, CudaType& out) { out = a; } }; class CudaCopyOp : public CopyOp { public: void Call(const Array& a, const Array& out) override { Device& device = a.device(); device.CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{device.index()}; VisitDtype(out.dtype(), [&](auto pt) { using T = typename decltype(pt)::type; Elementwise<const T, T>(CopyImpl<T>{}, a, out); }); } }; CHAINERX_REGISTER_OP_CUDA(CopyOp, CudaCopyOp); template <typename InT, typename OutT> struct AsTypeImpl { using InCudaType = cuda_internal::DataType<InT>; using OutCudaType = cuda_internal::DataType<OutT>; __device__ void operator()(int64_t /*i*/, InCudaType a, OutCudaType& out) { out = static_cast<OutCudaType>(a); } }; } // namespace void CudaDevice::AsType(const Array& a, const Array& out) { CheckDevicesCompatible(a, out); CudaSetDeviceScope scope{index()}; auto do_astype = [&](auto in_pt, auto out_pt) { using InT = typename decltype(in_pt)::type; using OutT = typename decltype(out_pt)::type; Elementwise<const InT, OutT>(AsTypeImpl<InT, OutT>{}, a, out); }; VisitDtype(out.dtype(), [&](auto out_pt) { VisitDtype(a.dtype(), do_astype, out_pt); }); } } // namespace cuda } // namespace chainerx
ffae5d6874186d1c791add6af72a2713e7a016f4.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief * ragged_test * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Yiming Wang * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <gmock/gmock.h> #include <gtest/gtest.h> #include <algorithm> #include <numeric> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/tensor.h" #include "k2/csrc/test_utils.h" namespace k2 { class RaggedShapeOpsSuiteTest : public ::testing::Test { protected: RaggedShapeOpsSuiteTest() { ContextPtr context = GetCpuContext(); const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; std::vector<RaggedShapeLayer> axes; axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); simple_shape_ = RaggedShape(axes, true); // random_shape_ is on CPU random_shape_ = RandomRaggedShape(true, // set_row_ids 3, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements } RaggedShape simple_shape_; RaggedShape random_shape_; }; TEST(RaggedShapeTest, TestConstructFromString) { RaggedShape rs(" [ [ x x ] [x] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); RaggedShape rs2(" [ [ [ x x ] ] [[x]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; K2_CHECK_EQ(RaggedShape("[ ]").Dim0(), 0); ASSERT_DEATH(RaggedShape(" [ [ x x ] [x] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x x ] [[x]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x [] x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x | x ] "), ""); for (int i = 0; i < 5; i++) { RaggedShape rs = RandomRaggedShape(true, 2, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements std::ostringstream os; os << rs; RaggedShape rs2; std::istringstream is(os.str()); K2_LOG(INFO) << "Shape is: " << os.str(); is >> rs2; K2_CHECK(is.good()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(rs, rs2) || rs.NumElements() == 0); } } TEST(RaggedTest, TestRaggedFromString) { Ragged<int32_t> rs(" [ [ 1 2 ] [3] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); K2_CHECK_EQ(rs.values.Back(), 3); K2_CHECK_EQ(rs.values[0], 1); Ragged<int32_t> rs2(" [ [ [ 0 5 ] ] [[10]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [0] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [[0]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 [] 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 | 0 ] "), ""); for (int32_t i = 0; i < 5; i++) { Ragged<int32_t> r = RandomRagged<int32_t>(); std::ostringstream os; os << r; Ragged<int32_t> r2(os.str()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(r, r2) || r.values.Dim() == 0); } } template <typename T> void TestMaxPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case const std::vector<int32_t> row_splits = {0}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = 0; std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> max_values(context, num_rows); // just run to check if there's any error MaxPerSublist(ragged, 1, &max_values); EXPECT_EQ(max_values.Dim(), 0); } { const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 2, 8, 0, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> max_values(context, num_rows); T default_value = 2; MaxPerSublist(ragged, default_value, &max_values); // copy memory from GPU/CPU to CPU std::vector<T> cpu_data(max_values.Dim()); max_values.Context()->CopyDataTo( max_values.Dim() * max_values.ElementSize(), max_values.Data(), cpu, cpu_data.data()); std::vector<T> expected_data = {3, default_value, 8, default_value}; EXPECT_EQ(cpu_data, expected_data); } { // test with random large size const int32_t min_num_elements = 2000; // not random shape is on CPU RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_elements, 5000); ASSERT_EQ(shape.NumAxes(), 2); RaggedShape gpu_shape; if (context->GetDeviceType() == kCuda) { // copy shape to GPU const Array1<T> &row_splits = shape.RowSplits(1); RaggedShapeLayer shape_dim; shape_dim.row_splits = row_splits.To(GetCudaContext()); shape_dim.cached_tot_size = shape.NumElements(); std::vector<RaggedShapeLayer> axes = {shape_dim}; gpu_shape = RaggedShape(axes, true); } int32_t num_elems = shape.NumElements(); std::vector<T> data(num_elems); for (int32_t i = 0; i != 10; ++i) { std::iota(data.begin(), data.end(), 0); // randomly set data[pos] = num_elems which is // greater than any element in data int32_t pos = RandInt(0, num_elems - 1); data[pos] = num_elems; // find the corresponding row int32_t num_rows = shape.Dim0(); const int32_t *row_splits_data = shape.RowSplits(1).Data(); int32_t row = 0; for (int32_t i = 0; i < num_rows; ++i) { if (pos >= row_splits_data[i] && pos < row_splits_data[i + 1]) { row = i; break; } } Array1<T> values(context, data); Ragged<T> ragged(context->GetDeviceType() == kCuda ? gpu_shape : shape, values); Array1<T> max_values(context, num_rows); T default_value = 0; MaxPerSublist(ragged, default_value, &max_values); EXPECT_EQ(max_values[row], num_elems); } } } } TEST(RaggedShapeOpsTest, MaxPerSubListTest) { TestMaxPerSubListTest<int32_t>(); } template <typename T> void TestMinPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case std::vector<int32_t> row_splits_vec = {0}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> min_values(context, num_rows); // just run to check if there's any error MinPerSublist(ragged, 1, &min_values); EXPECT_EQ(min_values.Dim(), 0); } { std::vector<int32_t> row_splits_vec = {0, 2, 2, 5, 6}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); const std::vector<T> values_vec = {1, 3, 3, 8, 4, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> min_values(context, num_rows); T default_value = 2; MinPerSublist(ragged, default_value, &min_values); // copy memory from GPU/CPU to CPU min_values = min_values.To(cpu); std::vector<T> cpu_data(min_values.Data(), min_values.Data() + min_values.Dim()); std::vector<T> expected_data = {1, default_value, default_value, -1}; EXPECT_EQ(cpu_data, expected_data); } // May add tests for random large size? (but maybe it's fine to not add as // we have tested large cases in MaxPerSubList) } } TEST(RaggedShapeOpsTest, MinPerSubListTest) { TestMinPerSubListTest<int32_t>(); } template <typename T> void TestAndOrPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // And const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 6, 11, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = -1; AndPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {1, -1, 2, 0}; EXPECT_EQ(cpu_data, expected_data); } { // Or const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 4, 6, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = 0; OrPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {3, 0, 7, 0}; EXPECT_EQ(cpu_data, expected_data); } } } TEST(RaggedShapeOpsTest, AndOrPerSubListTest) { TestAndOrPerSubListTest<int32_t>(); } void TestUnsqueeze(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); src_shape.Populate(); // set row_ids { // axis = 0. RaggedShape shape = Unsqueeze(src_shape, 0); int32_t dim0 = src_shape.Dim0(); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, dim0}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data(dim0, 0); CheckArrayData(row_ids0, data); } { for (size_t i = 0; i != src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = Unsqueeze(src_shape, axis); int32_t tot_size = shape.TotSize(axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { for (int32_t i = 0; i < axis; ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i].row_ids); } } { const Array1<int32_t> &row_splits = dest_axes[axis].row_splits; std::vector<int32_t> data(tot_size + 1); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_splits, data); } { const Array1<int32_t> &row_ids = dest_axes[axis].row_ids; std::vector<int32_t> data(tot_size); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_ids, data); } { for (std::size_t i = axis; i < src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestUnsqueeze) { TestUnsqueeze(simple_shape_); TestUnsqueeze(random_shape_); } TEST(RaggedShapeOpsTest, TestUnsqueezeParallel) { for (int32_t i = 0; i < 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_shapes = RandInt(0, 10); std::vector<RaggedShape *> orig_shapes; for (int32_t i = 0; i < num_shapes; i++) orig_shapes.push_back( new RaggedShape(RandomRaggedShape(false, 2, 5, 0, 1000).To(c))); int32_t axis = 0; // only one supported for now. std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(num_shapes, orig_shapes.data(), axis); for (int32_t i = 0; i < num_shapes; i++) { ASSERT_EQ(unsqueezed[i].Validate(), true); RaggedShape temp = RemoveAxis(unsqueezed[i], axis); ASSERT_EQ(Equal(temp, *(orig_shapes[i])), true); delete orig_shapes[i]; } } } void TestRemoveAxis(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); ASSERT_EQ(src_shape.NumAxes(), 4); { // axis = 0. int32_t axis = 0; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, 3, 7, 10}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data = {0, 0, 0, 1, 1, 1, 1, 2, 2, 2}; CheckArrayData(row_ids0, data); } { for (std::size_t i = 1; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 3 int32_t axis = 3; // the last axis RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRemoveAxis) { TestRemoveAxis(simple_shape_); } TEST(RaggedShapeOpsTest, TestGetOffsets) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { for (int32_t i = 0; i != 2; ++i) { int32_t num_shape = RandInt(10, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } RaggedShape **shapes_ptr = shapes.data(); Array2<int32_t> offsets = GetOffsets(num_shape, shapes_ptr); ASSERT_EQ(offsets.Dim0(), num_axes + 1); ASSERT_EQ(offsets.Dim1(), num_shape + 1); auto acc = offsets.Accessor(); for (int32_t axis = 0; axis <= num_axes; ++axis) { int32_t sum = 0; for (int32_t j = 0; j <= num_shape; ++j) { EXPECT_EQ(acc(axis, j), sum); if (j < num_shape) { sum += (axis == 0 ? 1 : shape_vec[j].TotSize(axis - 1)); } } } } } } // returns a random ragged shape where the dims on axis 1 are all the same // (so: can be transposed). RaggedShape RandomRaggedShapeToTranspose(ContextPtr c) { ContextPtr c_cpu = GetCpuContext(); RaggedShape random = RandomRaggedShape(false, 2, 4, 0, 5000).To(c); int32_t input_dim0 = random.Dim0(), divisor = 1; for (int32_t i = 1; i * i <= input_dim0; i++) { if (input_dim0 % i == 0 && i > divisor) divisor = i; } int32_t output_dim0 = divisor, output_dim1 = input_dim0 / divisor; Array1<int32_t> row_splits = Range<int32_t>(c, output_dim0 + 1, 0, output_dim1); int32_t cached_tot_size = input_dim0; RaggedShape top_level_shape = RaggedShape2(&row_splits, nullptr, cached_tot_size); return ComposeRaggedShapes(top_level_shape, random); } TEST(RaggedShapeOpsTest, TestTranspose) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); RaggedShape shape = Transpose(src_shape); EXPECT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); RaggedShape transposed = Transpose(to_transpose); if (context->GetDeviceType() != kCpu) { to_transpose = to_transpose.To(cpu); transposed = transposed.To(cpu); } for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t i = transposed[index]; // Just make sure this doesn't crash, // don't need the value. std::swap(index[0], index[1]); i = to_transpose[index]; // don't need the value, just need to make // sure it's an allowable index. ++i; // this line just suppresses the warning `variable i set but not // used` } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); std::swap(index[0], index[1]); int32_t i = transposed[index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } template <typename T> void TestTransposeRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); std::vector<T> values = {0, 1, 2, 3, 4, 5, 8, 7, 6, 9, 10, 15}; ASSERT_EQ(values.size(), src_shape.NumElements()); Array1<T> values_array(context, values); Ragged<T> ragged(src_shape, values_array); Ragged<T> ans = Transpose(ragged); RaggedShape shape = ans.shape; // Check shape ASSERT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); // Check values CheckArrayData(ans.values, {0, 1, 2, 4, 5, 8, 6, 9, 3, 7, 10, 15}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); int32_t num_elems = to_transpose.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); Ragged<T> src(to_transpose, src_values); Ragged<T> ans = Transpose(src); if (context->GetDeviceType() == kCuda) { src = src.To(cpu); ans = ans.To(cpu); to_transpose = to_transpose.To(cpu); } RaggedShape transposed = ans.shape; for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = ans[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, src[index]); } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = src[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, ans[index]); } } } } } TEST(RaggedTest, TestTransposeRagged) { TestTransposeRagged<int32_t>(); TestTransposeRagged<double>(); } TEST(RaggedShapeOpsTest, TestRowSplitsPtr) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape = RandomRaggedShape().To(context); ASSERT_GE(shape.NumAxes(), 2); Array1<int32_t *> ptrs = GetRowSplitsPtr(shape); ASSERT_EQ(ptrs.Dim(), shape.NumAxes() - 1); // as num_axes is not so big, access (may copy memory) it in a loop is fine. for (int32_t i = 0; i != ptrs.Dim(); ++i) { EXPECT_EQ(ptrs[i], shape.RowSplits(i + 1).Data()); } } } void TestRaggedShape2(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 2); Array1<int32_t> row_splits = src_shape.RowSplits(1); Array1<int32_t> row_ids = src_shape.RowIds(1); int32_t cached_tot_size = src_shape.TotSize(1); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape2(&row_splits, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // both row_splits and row_ids are non-null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null RaggedShape result = RaggedShape2(&row_splits, nullptr, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids if (row_splits.Dim() == (row_ids.Dim() == 0 ? 1 : row_ids.Back() + 2)) { { // row_splits is null RaggedShape result = RaggedShape2(nullptr, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_splits is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(nullptr, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape2) { TestRaggedShape2(simple_shape_); TestRaggedShape2(random_shape_); } void TestRaggedShape3(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); int32_t cached_tot_size1 = src_shape.TotSize(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); int32_t cached_tot_size2 = src_shape.TotSize(2); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape3(&row_splits1, &row_ids1, cached_tot_size1, &row_splits2, &row_ids2, cached_tot_size2); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } { // row_ids is non-null, cached_tot_size = -1 RaggedShape result = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids bool valid1 = (row_splits1.Dim() == (row_ids1.Dim() == 0 ? 1 : row_ids1.Back() + 2)); bool valid2 = (row_splits2.Dim() == (row_ids2.Dim() == 0 ? 1 : row_ids2.Back() + 2)); if (valid1 && valid2) { RaggedShape result = RaggedShape3(nullptr, &row_ids1, -1, nullptr, &row_ids2, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // TODO(haowen): add more cases for other branches } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape3) { TestRaggedShape3(simple_shape_); TestRaggedShape3(random_shape_); } void TestComposeShape(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); RaggedShape shape1 = RaggedShape2(&row_splits1, nullptr, -1); RaggedShape shape2 = RaggedShape2(&row_splits2, nullptr, -1); RaggedShape result = ComposeRaggedShapes(shape1, shape2); ASSERT_EQ(result.NumAxes(), 3); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); } } TEST_F(RaggedShapeOpsSuiteTest, TestComposeShape) { TestComposeShape(simple_shape_); TestComposeShape(random_shape_); } void TestShapeFromTotSize(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 2); int32_t num_axes = src_shape.NumAxes(); std::vector<int32_t> tot_sizes(num_axes); for (int32_t i = 0; i != num_axes; ++i) { tot_sizes[i] = src_shape.TotSize(i); } RaggedShape result = RaggedShapeFromTotSizes(context, num_axes, tot_sizes.data()); ASSERT_EQ(result.NumAxes(), num_axes); for (int32_t i = 0; i < num_axes; ++i) { EXPECT_EQ(result.TotSize(i), src_shape.TotSize(i)); if (i > 0) { EXPECT_EQ(result.RowSplits(i).Dim(), src_shape.RowSplits(i).Dim()); EXPECT_EQ(result.RowIds(i).Dim(), src_shape.RowIds(i).Dim()); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestShapeFromTotSize) { TestShapeFromTotSize(simple_shape_); TestShapeFromTotSize(random_shape_); } template <typename T> void TestRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // constructed with row_splits and row_ids // RaggedTensor4 t = [ // [ [[ 1, 2], [4]], [[3, 0]] ], // [ [[7, 8, 9]], [[6], [3, 5, 7]], [[2]] ], // [ [[3, 4], [], [8]] ] // ] const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; const std::vector<T> values_vec = {1, 2, 4, 3, 0, 7, 8, 9, 6, 3, 5, 7, 2, 3, 4, 8}; std::vector<RaggedShapeLayer> axes; axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); RaggedShape shape(axes, true); Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); // test Index(axis, i) { // values: [[[ 1, 2], [4]], [[3, 0]]] Ragged<T> sub_raggged = ragged.Index(0, 0); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 2, 3}, {0, 2, 3, 5}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {1, 2, 4, 3, 0}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[7, 8, 9]], [[6], [3, 5, 7]], [[2]]] Ragged<T> sub_raggged = ragged.Index(0, 1); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 1, 3, 4}, {0, 3, 4, 7, 8}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {7, 8, 9, 6, 3, 5, 7, 2}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[3, 4], [], [8]]] Ragged<T> sub_raggged = ragged.Index(0, 2); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 3}, {0, 2, 2, 3}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {3, 4, 8}; CheckArrayData<T>(sub_values, sub_values_vec); } // test operator[](const std::vector<int32_t> &indexes) if (context->GetDeviceType() == kCpu) { { std::vector<int32_t> indexes = {0, 0, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 0); EXPECT_EQ(ragged[indexes], 1); } { std::vector<int32_t> indexes = {0, 1, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 3); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {1, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 6); EXPECT_EQ(ragged[indexes], 8); } { std::vector<int32_t> indexes = {1, 1, 1, 0}; EXPECT_EQ(ragged.shape[indexes], 9); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {2, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 14); EXPECT_EQ(ragged[indexes], 4); } { std::vector<int32_t> indexes = {2, 0, 2, 0}; EXPECT_EQ(ragged.shape[indexes], 15); EXPECT_EQ(ragged[indexes], 8); } } const std::vector<std::vector<int32_t>> row_splits_vec = { row_splits1, row_splits2, row_splits3}; // test To(ctx) { // to GPU Ragged<T> other = ragged.To(GetCudaContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } { // to CPU Ragged<T> other = ragged.To(GetCpuContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } } } } template <typename T, typename OP = LessThan<T>> static void CpuSortSublists(const Array1<int32_t> &row_splits, Array1<T> *src) { K2_CHECK(src->Context()->GetDeviceType() == kCpu); T *p = src->Data(); OP comp = OP(); for (int32_t i = 0; i < row_splits.Dim() - 1; ++i) { int32_t cur = row_splits[i]; int32_t next = row_splits[i + 1]; std::sort(p + cur, p + next, comp); } } template <typename T, typename OP = LessThan<T>> static void TestSortSublists() { auto cpu_context = GetCpuContext(); auto cuda_context = GetCudaContext(); RaggedShape shape = RandomRaggedShape(false, // set_row_ids 2, // min_num_axes 4, // max_num_axes 1, // min_num_elements 2000); // max_num_elements Array1<T> values = RandUniformArray1<T>(shape.Context(), shape.NumElements(), -2000, 2000); Ragged<T> ragged(shape, values); ragged = ragged.To(cuda_context); values = values.To(cpu_context); // to be sorted by cpu Array1<T> unsorted = values.Clone(); Array1<int32_t> order(ragged.Context(), ragged.values.Dim()); SortSublists<T, OP>(&ragged, &order); Array1<int32_t> &segment = ragged.shape.RowSplits(ragged.NumAxes() - 1); CpuSortSublists<T, OP>(segment, &values); int32_t n = order.Dim(); for (int i = 0; i != n; ++i) { EXPECT_EQ(values[i], ragged.values[i]); EXPECT_EQ(ragged.values[i], unsorted[order[i]]); } } TEST(RaggedTest, Ragged) { TestRagged<int32_t>(); TestRagged<double>(); TestSortSublists<int32_t>(); TestSortSublists<double>(); } TEST(RaggedShapeOpsTest, TestAppend) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes[1] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[1] = &shapes[1]; } { // axis == 1 RaggedShape result = Append(1, 2, shapes_ptr.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } } { // axis == 0 RaggedShape result = Append(0, 2, shapes_ptr.data()); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto i = 0; i < 2; ++i) { std::vector<const Array1<int32_t> *> splits_ptr = { &row_splits_vec[i][0], &row_splits_vec[i][1]}; Array1<int32_t> curr_row_splits = SpliceRowSplits(2, splits_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } // only test case axis == 0, test axis==1 with simple case is good // enough as it just calls Stack RaggedShape result = Append(0, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (int32_t axis = 1; axis < num_axes; ++axis) { std::vector<Array1<int32_t>> splits_vec(num_shape); std::vector<const Array1<int32_t> *> splits_vec_ptr(num_shape); for (int32_t n = 0; n != num_shape; ++n) { splits_vec[n] = shape_vec[n].RowSplits(axis); splits_vec_ptr[n] = &splits_vec[n]; } Array1<int32_t> curr_row_splits = SpliceRowSplits(num_shape, splits_vec_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } // check data for (int32_t axis = 1; axis < num_axes; ++axis) { CheckArrayData(result.RowSplits(axis), result_splits[axis - 1]); CheckArrayData(result.RowIds(axis), result_ids[axis - 1]); } } } } } template <typename T> void TestAppendRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // TODO(haowen): remove duplicate code in TestAppend above. // test with simple case could be good enough, as we have tested // Append(RaggedShape&) already. std::vector<Ragged<T>> ragged_vec(2); std::vector<Ragged<T> *> ragged(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<T> values_vec = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[0] = Ragged<T>(shape, values); ragged[0] = &ragged_vec[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; const std::vector<T> values_vec = {20, 21, 23, 28, 30, 32, 35}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[1] = Ragged<T>(shape, values); ragged[1] = &ragged_vec[1]; } { // axis == 0 Ragged<T> result = Append(0, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 1, 1, 1, 2, 3, 4, 4, 5}, {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18, 20, 21, 23, 28, 30, 32, 35}; CheckArrayData(result.values, expected_data); } { // axis == 1 Ragged<T> result = Append(1, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 20, 21, 23, 7, 9, 10, 12, 28, 30, 14, 15, 18, 32, 35}; CheckArrayData(result.values, expected_data); } } } TEST(RaggedTest, TestAppendRagged) { TestAppendRagged<int32_t>(); TestAppendRagged<double>(); } void CheckResultOfIndex(const ContextPtr &context, RaggedShape shape, Array1<int32_t> new2old, RaggedShape result) { K2_CHECK(context->IsCompatible(*shape.Context())); ContextPtr cpu = GetCpuContext(); // will use to copy data int32_t num_axes = shape.NumAxes(); int32_t src_dim0 = shape.Dim0(), result_dim0 = result.Dim0(); if (result_dim0 == 0) { std::vector<int32_t> empty_row_splits = {0}; for (int32_t i = 0; i < num_axes - 1; ++i) { CheckArrayData(result.RowSplits(i + 1), empty_row_splits); EXPECT_EQ(result.RowIds(i + 1).Dim(), 0); } return; } Array2<int32_t> old_offsets(context, num_axes, src_dim0 + 1); auto old_offsets_acc = old_offsets.Accessor(); Array1<int32_t *> row_splits_ptrs = GetRowSplitsPtr(shape); int32_t **row_splits_ptrs_data = row_splits_ptrs.Data(); // Set old_offsets K2_EVAL( context, src_dim0 + 1, lambda_get_old_offsets, (int32_t i)->void { // 0 <= i <= dim0 int32_t cur_offset = i; for (int32_t axis = 0; axis < num_axes; axis++) { old_offsets_acc(axis, i) = cur_offset; if (axis + 1 == num_axes) return; cur_offset = row_splits_ptrs_data[axis][cur_offset]; } }); old_offsets = old_offsets.To(cpu); auto cpu_offsets_acc = old_offsets.Accessor(); shape = shape.To(cpu); new2old = new2old.To(cpu); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto axis = 0; axis < num_axes - 1; ++axis) { Array1<int32_t> curr_row_splits = shape.RowSplits(axis + 1); std::vector<Array1<int32_t>> splits_vec(result_dim0); std::vector<const Array1<int32_t> *> splits_vec_ptr(result_dim0); for (int32_t m = 0; m != result_dim0; ++m) { int32_t old_idx = new2old[m]; int32_t start = cpu_offsets_acc(axis, old_idx); int32_t end = cpu_offsets_acc(axis, old_idx + 1); Array1<int32_t> sub_list = curr_row_splits.Range(start, end - start + 1); Array1<int32_t> copy_sub_list(cpu, sub_list.Dim()); copy_sub_list.CopyFrom(sub_list); int32_t *data = copy_sub_list.Data(); int32_t init = data[0]; for (int32_t n = 0; n != copy_sub_list.Dim(); ++n) { data[n] -= init; } splits_vec[m] = copy_sub_list; splits_vec_ptr[m] = &splits_vec[m]; } Array1<int32_t> result_row_splits = SpliceRowSplits(result_dim0, splits_vec_ptr.data()); result_splits.push_back(result_row_splits); Array1<int32_t> result_row_ids(cpu, result_row_splits.Back()); RowSplitsToRowIds(result_row_splits, &result_row_ids); result_ids.push_back(result_row_ids); } for (int32_t i = 0; i < num_axes - 1; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } TEST(RaggedShapeOpsTest, TestIndex) { for (int i = 0; i < 5; i++) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); std::vector<int32_t> new2old_vec = {2, 1}; Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes_out; RaggedShape result = Index(shape, new2old, &value_indexes_out); // fsa 2, state_idx01 {5}, arc_idx012 {7, 8, 9} // fsa 1, state_idx01 {2, 3, 4}, arc_idx012 {{3},{4, 5}, {6}} CheckArrayData(value_indexes_out, std::vector<int32_t>{7, 8, 9, 3, 4, 5, 6}); CheckResultOfIndex(context, shape, new2old, result); } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(), result_dim0 = RandInt(0, 10); if (dim0 == 0) result_dim0 = 0; std::vector<int32_t> new2old_vec(result_dim0); for (int i = 0; i < result_dim0; i++) new2old_vec[i] = RandInt(0, dim0 - 1); Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes; RaggedShape result = Index(shape, new2old, &value_indexes); CheckResultOfIndex(context, shape, new2old, result); K2_LOG(INFO) << "Value_indexes = " << value_indexes; } } } } } TEST(GetTransposeReordering, NoDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 e4 // row2 f5 // row3 g6 h7 i8 // row4 j9 // row5 k10 l11 std::vector<int32_t> col_indexes{4, 5, 0, 1, 5, 3, 0, 2, 4, 5, 1, 4}; std::vector<int32_t> _row_splits{0, 2, 5, 6, 9, 10, 12}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 6, 3, 10, 7, 5, 0, 8, 11, 1, 4, 9}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, ThreeAxesEmptyCase) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> ragged("[ [ [ ] ] ]"); ragged = ragged.To(context); Array1<int32_t> order = GetTransposeReordering(ragged, 0); } } TEST(GetTransposeReordering, NoDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 // row2 e4 // row3 f5 g6 h7 // row4 i8 // row5 j9 k10 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 3, 0, 2, 1, 0, 1, 3, 5, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 2, 4, 5, 8, 9, 11}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 5, 0, 4, 6, 3, 1, 7, 9, 8, 10}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 std::vector<int32_t> col_indexes{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 3, 5}; std::vector<int32_t> _row_splits{0, 5, 8, 9, 13, 16, 18}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 16, 13, 14, 15, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 5, 8, 9, 13, 16, 18}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 13, 14, 15, 16, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, RandomFsaVecTest) { for (int32_t iter = 0; iter != 8; ++iter) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { int n = RandInt(100, 200); int32_t min_num_fsas = n; int32_t max_num_fsas = n * 2; bool acyclic = false; int32_t max_symbol = 100; int32_t min_num_arcs = min_num_fsas * 10; int32_t max_num_arcs = max_num_fsas * 20; FsaVec fsas = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic, max_symbol, min_num_arcs, max_num_arcs); fsas = fsas.To(context); Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_states = fsas.TotSize(1); int32_t num_arcs = fsas.TotSize(2); Array1<int32_t> order = GetTransposeReordering(dest_states_tensor, num_states); Sort(&order); ASSERT_EQ(order.Dim(), num_arcs); Array1<int32_t> expected = Range<int32_t>(context, num_arcs, 0); CheckArrayData(order, expected); } } } TEST(ChangeSublistSize, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9}); size_delta = -2; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 1}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5}); } } TEST(ChangeSublistSizePinned, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9, 9}); size_delta = -3; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 0, 0}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5, 5}); } } } TEST(ChangeSublistSize, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 21}); // it is an error to use -2 here // because the state (state_idx01 == 2) has only 1 entry size_delta = -1; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 2, 3, 3, 4, 6}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 11}); } } TEST(ChangeSublistSizePinned, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 16, 21}); size_delta = -2; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 1, 1, 1, 1, 1, 2}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); } } TEST(RaggedShapeOpsTest, TestGetCountsPartitioned) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // Testing with simple case is good enough as we have tested GetCounts() // with random large size and GetCountsPartitioned just calls GetCounts. std::vector<int32_t> src_row_splits_vec = {0, 3, 4, 6, 10}; Array1<int32_t> src_row_splits(context, src_row_splits_vec); RaggedShape src_shape = RaggedShape2(&src_row_splits, nullptr, -1); std::vector<int32_t> src_values_vec = {0, 1, 0, 2, 5, 5, 7, 7, 9, 7}; Array1<int32_t> src_values(context, src_values_vec); Ragged<int32_t> src(src_shape, src_values); std::vector<int32_t> ans_row_splits_vec = {0, 2, 4, 7, 10}; Array1<int32_t> ans_row_splits(context, ans_row_splits_vec); RaggedShape ans_shape = RaggedShape2(&ans_row_splits, nullptr, -1); Ragged<int32_t> result = GetCountsPartitioned(src, ans_shape); ASSERT_EQ(result.NumAxes(), 2); // Check row_splits Array1<int32_t> row_splits = result.shape.RowSplits(1).To(cpu); std::vector<int32_t> result_row_splits( row_splits.Data(), row_splits.Data() + row_splits.Dim()); EXPECT_EQ(result_row_splits, ans_row_splits_vec); // check values std::vector<int32_t> expected_data = {2, 1, 1, 0, 0, 2, 0, 3, 0, 1}; Array1<int32_t> values = result.values.To(cpu); std::vector<int32_t> data(values.Data(), values.Data() + values.Dim()); EXPECT_EQ(data, expected_data); } } TEST(RaggedShapeOpsTest, TestStack) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[1] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[1] = &shapes[1]; } std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6}, {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); RaggedShape transpose = Transpose(result); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(transpose.RowSplits(i + 1), expected_row_splits[i]); } } } { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); shape_vec[j] = new_shape; shapes[j] = &shape_vec[j]; } std::vector<RaggedShape> cpu_shapes(num_shape); for (auto i = 0; i != num_shape; ++i) { cpu_shapes[i] = shape_vec[i].To(cpu); } { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } } template <typename T> void TestStackRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<Ragged<T>> ragged_vec(num_shape); std::vector<Ragged<T> *> ragged(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); int32_t num_elems = new_shape.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); ragged_vec[j] = Ragged<T>(new_shape, src_values); ragged[j] = &ragged_vec[j]; } std::vector<Ragged<T>> cpu_ragged_vec(num_shape); for (auto j = 0; j != num_shape; ++j) { cpu_ragged_vec[j] = ragged_vec[j].To(cpu); } { // axis == 0 int32_t axis = 0; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] EXPECT_EQ(value, cpu_ragged_vec[i][index]); } } { // axis == 1 int32_t axis = 1; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t j = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] EXPECT_EQ(value, cpu_ragged_vec[j][index]); } } } } } TEST(RaggedTest, TestStackRagged) { TestStackRagged<int32_t>(); TestStackRagged<double>(); } TEST(RaggedTest, TestMaxSize) { for (int32_t i = 0; i <= 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(c); int32_t axis = RandInt(1, num_axes - 1); int32_t max_size = shape.MaxSize(axis); if (axis == 0) { K2_CHECK(max_size == shape.Dim0()); } else { Array1<int32_t> row_splits = shape.RowSplits(axis).To(GetCpuContext()); int32_t *row_splits_data = row_splits.Data(); int32_t m = 0; for (int32_t i = 0; i + 1 < row_splits.Dim(); i++) { int32_t size = row_splits_data[i + 1] - row_splits_data[i]; if (size > m) m = size; } ASSERT_EQ(m, max_size); } } } TEST(RaggedShapeOpsTest, TestMakeTransposable) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2, 3, 3}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, // 6, 7}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6, 9, 12}, {0, 2, 3, 3, 4, 6, 7, 10, 10, 10, 12, 13, 13}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}, {0, 0, 1, 3, 4, 4, 5, 6, 6, 6, 9, 9, 10}}; RaggedShape result = MakeTransposable(shape); for (int32_t i = 1; i != 3; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); CheckArrayData(result.RowIds(i), expected_row_ids[i - 1]); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t max_size = shape.MaxSize(1); RaggedShape result = MakeTransposable(shape); shape = shape.To(cpu); result = result.To(cpu); EXPECT_EQ(result.Dim0(), dim0); EXPECT_EQ(result.TotSize(1), dim0 * max_size); // check if every sub list in axis 1 has the same size int32_t *row_splits1 = result.RowSplits(1).Data(); for (int32_t j = 0; j != dim0 + 1; ++j) { EXPECT_EQ(row_splits1[j], j * max_size); } if (num_axes > 2) { for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { const std::vector<int32_t> &index = iter.Value(); EXPECT_EQ(shape[index], result[index]); } } } } } } TEST(RaggedShapeOpsTest, PrefixTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 4); EXPECT_EQ(num_axes, 3); { // n == 0 int32_t n = 0; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n > 0 && n < dim0 int32_t n = 2; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n == dim0 int32_t n = 4; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowSplits(2), row_splits2); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t n = RandInt(0, dim0); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. CheckArrayData(result.RowSplits(1), shape.RowSplits(1).Range(0, n + 1)); } } } } TEST(RaggedShapeOpsTest, GetPrefixesTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t ans_num = RandInt(0, 10); std::vector<int32_t> sizes; for (int32_t j = 0; j != ans_num; ++j) sizes.push_back(RandInt(0, dim0)); ASSERT_EQ(sizes.size(), ans_num); std::vector<RaggedShape> ans = GetPrefixes(shape, sizes); ASSERT_EQ(ans.size(), ans_num); for (int32_t j = 0; j != ans_num; ++j) { int32_t n = sizes[j]; RaggedShape ans_j = ans[j]; EXPECT_TRUE(IsCompatible(shape, ans_j)); EXPECT_EQ(ans_j.Dim0(), n); EXPECT_EQ(ans_j.NumAxes(), num_axes); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t m = 1; m != num_axes; ++m) { EXPECT_TRUE(Equal(result.RowSplits(m), ans_j.RowSplits(m))); } } } } } } TEST(RaggedShapeOpsTest, ArangeTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 3, 4, 6, 7, 10}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 2, 3, 3, 3, // 4, 5, 5, 5, 6, 7, 7, 9}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<int32_t> values(shape.NumElements()); std::iota(values.begin(), values.end(), 10); Array1<int32_t> values_array(context, values); Ragged<int32_t> ragged(shape, values_array); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 6); EXPECT_EQ(num_axes, 3); { // axis == 0, begin == end int32_t axis = 0; int32_t begin = 1, end = 1; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), 0); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {1, 1}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); EXPECT_EQ(ragged_result.values.Dim(), 0); } { // axis == 0, begin < end == Dim0() + 1 int32_t axis = 0; int32_t begin = 3, end = 6; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 3, 6}, {0, 1, 4, 5, 7, 7, 8}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {8, 16}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {18, 19, 20, 21, 22, 23, 24, 25}; CheckArrayData(ragged_result.values, expected_values); } { // axis == 1 int32_t axis = 1; int32_t begin = 6, end = 8; std::vector<int32_t> expected_row_splits = {0, 1, 3}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), 2); CheckArrayData(result.RowSplits(1), expected_row_splits); std::pair<int32_t, int32_t> expected_value_range = {12, 15}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {22, 23, 24}; CheckArrayData(ragged_result.values, expected_values); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t num_axes = shape.NumAxes(); int32_t axis = RandInt(0, num_axes - 2); int32_t tot_size = shape.TotSize(axis); int32_t begin = RandInt(0, tot_size); int32_t end = RandInt(begin, tot_size); std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), ::max(0, end - begin)); EXPECT_EQ(result.NumAxes(), num_axes - axis); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. if (begin == end) { CheckArrayData(result.RowSplits(1), std::vector<int32_t>{0}); } else { Array1<int32_t> row_splits1 = shape.RowSplits(axis + 1).Arange(begin, end + 1); row_splits1 = Minus(row_splits1, row_splits1[0]); CheckArrayData(result.RowSplits(1), row_splits1); } EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); } } } } TEST(RaggedShapeOpsTest, AppendMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape appended_axis2_ref = RaggedShape("[ [ [[ x x ][ x ][]] [[x ][x][ x ]] ] [[[x ][ x x][]]]]") .To(c); RaggedShape appended_axis3_ref = RaggedShape("[ [ [[ x x x ]] [[x x x ]] ] [[[x x x]]]]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape appended_axis2 = Append(2, 3, srcs, &merge_map2); RaggedShape appended_axis3 = Append(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "appended_axis2 = " << appended_axis2; K2_LOG(INFO) << "appended_axis3 = " << appended_axis3; K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, StackMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape stacked_ref = RaggedShape( "[ [ [[[ x x ]][[ x ]][[]]] [[[x ]][[x]][[ x ]]] ] " "[[[[x ]][[ x x]][[]]]]]") .To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape stacked_axis2 = Stack(2, 3, srcs, &merge_map2); RaggedShape stacked_axis3 = Stack(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "stacked_axis2 = " << stacked_axis2; K2_LOG(INFO) << "stacked_axis3 = " << stacked_axis3; K2_CHECK(Equal(stacked_axis2, stacked_ref)); K2_CHECK(Equal(stacked_axis2, stacked_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, Merge) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]") .To(c), // m: 0 3 6, m_out: 0 3, 6, shape2 = RaggedShape("[ [ x] [ x x x ] ]") .To(c), // m: 1 4, m_out: 1, 4 7 10 shape3 = RaggedShape("[ [ ] [ x x ] [] ]").To(c); // m: 2 5 8, m_out: ,2 5, RaggedShape ans_ref = RaggedShape("[ [] [x] [x x x] [] [] [x x] [x x] [x] ]").To(c); // This is a mixed-up kind of merge map that doesn't appear naturally (they // are always in-order from each source, right now) but it should still // work. std::vector<uint32_t> merge_map_data = {6, 1, 4, 8, 2, 5, 0, 3}; Array1<uint32_t> merge_map_in(c, merge_map_data); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map_out; RaggedShape merged = Merge(3, srcs, merge_map_in, &merge_map_out); ASSERT_EQ(true, Equal(ans_ref, merged)); std::vector<uint32_t> merge_map_out_data = {1, 4, 7, 10, 2, 5, 0, 3, 6}; CheckArrayData(merge_map_out, merge_map_out_data); } } TEST(RaggedTest, AddSuffixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> suffix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddSuffixToRagged(src, suffix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + suffix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); EXPECT_EQ(dst_cpu[src_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> suffix_cpu = suffix.To(GetCpuContext()); for (int32_t i = 0; i < suffix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i + 1] + i], suffix_cpu[i]); } } } } } TEST(RaggedTest, AddPrefixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> prefix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddPrefixToRagged(src, prefix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + prefix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); std::vector<int32_t> dst_indexes(src_indexes); dst_indexes.back() += 1; // increase the last index by 1 EXPECT_EQ(dst_cpu[dst_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> prefix_cpu = prefix.To(GetCpuContext()); for (int32_t i = 0; i < prefix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i] + i], prefix_cpu[i]); } } } } } TEST(RaggedTest, RemoveValuesLeq) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 3 4 ] [ 5 7 8 ] ]").To(c), s3 = Ragged<int32_t>(" [ [4] [5 7 8]]").To(c), s5 = Ragged<int32_t>(" [ [] [ 7 8]]").To(c); Ragged<int32_t> ans1 = RemoveValuesLeq(r, 3), ans2 = RemoveValuesLeq(r, 5); K2_LOG(INFO) << "ans2 = " << ans2; EXPECT_EQ(true, Equal(ans1, s3)); EXPECT_EQ(true, Equal(ans2, s5)); } } TEST(RaggedTest, IndexArrayRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Array1<float> f(c, std::vector<float>({0.0, 1.0, 2.0, 3.0, 4.0})); Ragged<float> fr = Ragged<float>(" [ [ 2.0 0.0 ] [ 1.0 2.0 3.0 ] ]").To(c), ans = Index(f, r); EXPECT_EQ(true, Equal(ans, fr)); } } TEST(RaggedTest, IndexRaggedRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Ragged<int32_t> s = Ragged<int32_t>(" [ [ 10 10 ] [ 11 ] [ 12 12 ] [ 13 ] [ 14 14] ]") .To(c); // NOLINT Ragged<int32_t> sr1 = Ragged<int32_t>(" [ [ [12 12] [10 10] ] [ [11] [12 12] [13] ] ]") .To(c); // NOLINT Ragged<int32_t> sr2 = Ragged<int32_t>(" [ [ 12 12 10 10 ] [ 11 12 12 13 ] ]") .To(c); // NOLINT EXPECT_EQ(true, Equal(Index(s, r, false), sr1)); EXPECT_EQ(true, Equal(Index(s, r, true), sr2)); } } TEST(RaggedShapeOpsTest, CoveringShape) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { { // simple case RaggedShape shape1 = RaggedShape("[ [ x x ] [] [ x ] ]").To(c), shape2 = RaggedShape("[ [ x] [] [ x x x ] ]").To(c), shape3 = RaggedShape("[ [] [] [ x x ] ]").To(c); RaggedShape expected = RaggedShape("[ [x x] [] [x x x] ]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; RaggedShape ans = CoveringShape(3, srcs); EXPECT_TRUE(Equal(expected, ans)); // test CoveringShapeForwardMap { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2, -1, -1}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape2, ans); std::vector<int32_t> expected_map = {0, -1, 1, 2, 3}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape3, ans); std::vector<int32_t> expected_map = {-1, -1, 0, 1, -1}; CheckArrayData(elem_map, expected_map); } } { // another simple case: only one src RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]").To(c); RaggedShape *srcs[] = {&shape1}; RaggedShape ans = CoveringShape(1, srcs); EXPECT_TRUE(Equal(shape1, ans)); // test CoveringShapeForwardMap Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2}; CheckArrayData(elem_map, expected_map); } { // random case for (int32_t i = 0; i != 1; ++i) { int32_t num_shape = RandInt(1, 100); int32_t dim0 = RandInt(1, 1000); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> row_sizes = RandUniformArray1<int32_t>(c, dim0 + 1, 0, 100); ExclusiveSum(row_sizes, &row_sizes); shape_vec[j] = RaggedShape2(&row_sizes, nullptr, -1); ASSERT_TRUE(shape_vec[j].Context()->IsCompatible(*c)); ASSERT_EQ(shape_vec[j].Dim0(), dim0); shapes[j] = &shape_vec[j]; } RaggedShape ans = CoveringShape(num_shape, shapes.data()); std::vector<Array1<int32_t>> elem_map(num_shape); for (int32_t j = 0; j != num_shape; ++j) { elem_map[j] = CoveringShapeForwardMap(shape_vec[j], ans); } // check ans ASSERT_EQ(ans.NumAxes(), 2); ASSERT_EQ(ans.Dim0(), dim0); ASSERT_TRUE(ans.Context()->IsCompatible(*c)); ContextPtr cpu = GetCpuContext(); ans = ans.To(cpu); for (int32_t j = 0; j != num_shape; ++j) shape_vec[j] = shape_vec[j].To(cpu); for (int32_t d = 0; d != dim0; ++d) { int32_t max_row_size = 0; for (int32_t j = 0; j != num_shape; ++j) max_row_size = ::max( shape_vec[j].RowSplits(1)[d + 1] - shape_vec[j].RowSplits(1)[d], max_row_size); EXPECT_EQ(max_row_size, ans.RowSplits(1)[d + 1] - ans.RowSplits(1)[d]); } // test CoveringShapeForwardMap for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> cur_elem_map = elem_map[j].To(cpu); ASSERT_EQ(cur_elem_map.Dim(), ans.NumElements()); int32_t n = 0; for (RaggedShapeIndexIterator ans_iter = ans.Iterator(); !ans_iter.Done(); ans_iter.Next()) { const std::vector<int32_t> &ans_indexes = ans_iter.Value(); int32_t src_shape_linear_index = cur_elem_map[n]; if (src_shape_linear_index != -1) { EXPECT_EQ(src_shape_linear_index, shape_vec[j][ans_indexes]); } ++n; } } } } } } } // namespace k2
ffae5d6874186d1c791add6af72a2713e7a016f4.cu
/** * @brief * ragged_test * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Yiming Wang * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <gmock/gmock.h> #include <gtest/gtest.h> #include <algorithm> #include <numeric> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/tensor.h" #include "k2/csrc/test_utils.h" namespace k2 { class RaggedShapeOpsSuiteTest : public ::testing::Test { protected: RaggedShapeOpsSuiteTest() { ContextPtr context = GetCpuContext(); const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; std::vector<RaggedShapeLayer> axes; axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); simple_shape_ = RaggedShape(axes, true); // random_shape_ is on CPU random_shape_ = RandomRaggedShape(true, // set_row_ids 3, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements } RaggedShape simple_shape_; RaggedShape random_shape_; }; TEST(RaggedShapeTest, TestConstructFromString) { RaggedShape rs(" [ [ x x ] [x] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); RaggedShape rs2(" [ [ [ x x ] ] [[x]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; K2_CHECK_EQ(RaggedShape("[ ]").Dim0(), 0); ASSERT_DEATH(RaggedShape(" [ [ x x ] [x] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x x ] [[x]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x [] x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x | x ] "), ""); for (int i = 0; i < 5; i++) { RaggedShape rs = RandomRaggedShape(true, 2, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements std::ostringstream os; os << rs; RaggedShape rs2; std::istringstream is(os.str()); K2_LOG(INFO) << "Shape is: " << os.str(); is >> rs2; K2_CHECK(is.good()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(rs, rs2) || rs.NumElements() == 0); } } TEST(RaggedTest, TestRaggedFromString) { Ragged<int32_t> rs(" [ [ 1 2 ] [3] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); K2_CHECK_EQ(rs.values.Back(), 3); K2_CHECK_EQ(rs.values[0], 1); Ragged<int32_t> rs2(" [ [ [ 0 5 ] ] [[10]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [0] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [[0]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 [] 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 | 0 ] "), ""); for (int32_t i = 0; i < 5; i++) { Ragged<int32_t> r = RandomRagged<int32_t>(); std::ostringstream os; os << r; Ragged<int32_t> r2(os.str()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(r, r2) || r.values.Dim() == 0); } } template <typename T> void TestMaxPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case const std::vector<int32_t> row_splits = {0}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = 0; std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> max_values(context, num_rows); // just run to check if there's any error MaxPerSublist(ragged, 1, &max_values); EXPECT_EQ(max_values.Dim(), 0); } { const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 2, 8, 0, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> max_values(context, num_rows); T default_value = 2; MaxPerSublist(ragged, default_value, &max_values); // copy memory from GPU/CPU to CPU std::vector<T> cpu_data(max_values.Dim()); max_values.Context()->CopyDataTo( max_values.Dim() * max_values.ElementSize(), max_values.Data(), cpu, cpu_data.data()); std::vector<T> expected_data = {3, default_value, 8, default_value}; EXPECT_EQ(cpu_data, expected_data); } { // test with random large size const int32_t min_num_elements = 2000; // not random shape is on CPU RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_elements, 5000); ASSERT_EQ(shape.NumAxes(), 2); RaggedShape gpu_shape; if (context->GetDeviceType() == kCuda) { // copy shape to GPU const Array1<T> &row_splits = shape.RowSplits(1); RaggedShapeLayer shape_dim; shape_dim.row_splits = row_splits.To(GetCudaContext()); shape_dim.cached_tot_size = shape.NumElements(); std::vector<RaggedShapeLayer> axes = {shape_dim}; gpu_shape = RaggedShape(axes, true); } int32_t num_elems = shape.NumElements(); std::vector<T> data(num_elems); for (int32_t i = 0; i != 10; ++i) { std::iota(data.begin(), data.end(), 0); // randomly set data[pos] = num_elems which is // greater than any element in data int32_t pos = RandInt(0, num_elems - 1); data[pos] = num_elems; // find the corresponding row int32_t num_rows = shape.Dim0(); const int32_t *row_splits_data = shape.RowSplits(1).Data(); int32_t row = 0; for (int32_t i = 0; i < num_rows; ++i) { if (pos >= row_splits_data[i] && pos < row_splits_data[i + 1]) { row = i; break; } } Array1<T> values(context, data); Ragged<T> ragged(context->GetDeviceType() == kCuda ? gpu_shape : shape, values); Array1<T> max_values(context, num_rows); T default_value = 0; MaxPerSublist(ragged, default_value, &max_values); EXPECT_EQ(max_values[row], num_elems); } } } } TEST(RaggedShapeOpsTest, MaxPerSubListTest) { TestMaxPerSubListTest<int32_t>(); } template <typename T> void TestMinPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case std::vector<int32_t> row_splits_vec = {0}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> min_values(context, num_rows); // just run to check if there's any error MinPerSublist(ragged, 1, &min_values); EXPECT_EQ(min_values.Dim(), 0); } { std::vector<int32_t> row_splits_vec = {0, 2, 2, 5, 6}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); const std::vector<T> values_vec = {1, 3, 3, 8, 4, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> min_values(context, num_rows); T default_value = 2; MinPerSublist(ragged, default_value, &min_values); // copy memory from GPU/CPU to CPU min_values = min_values.To(cpu); std::vector<T> cpu_data(min_values.Data(), min_values.Data() + min_values.Dim()); std::vector<T> expected_data = {1, default_value, default_value, -1}; EXPECT_EQ(cpu_data, expected_data); } // May add tests for random large size? (but maybe it's fine to not add as // we have tested large cases in MaxPerSubList) } } TEST(RaggedShapeOpsTest, MinPerSubListTest) { TestMinPerSubListTest<int32_t>(); } template <typename T> void TestAndOrPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // And const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 6, 11, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = -1; AndPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {1, -1, 2, 0}; EXPECT_EQ(cpu_data, expected_data); } { // Or const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 4, 6, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = 0; OrPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {3, 0, 7, 0}; EXPECT_EQ(cpu_data, expected_data); } } } TEST(RaggedShapeOpsTest, AndOrPerSubListTest) { TestAndOrPerSubListTest<int32_t>(); } void TestUnsqueeze(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); src_shape.Populate(); // set row_ids { // axis = 0. RaggedShape shape = Unsqueeze(src_shape, 0); int32_t dim0 = src_shape.Dim0(); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, dim0}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data(dim0, 0); CheckArrayData(row_ids0, data); } { for (size_t i = 0; i != src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = Unsqueeze(src_shape, axis); int32_t tot_size = shape.TotSize(axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { for (int32_t i = 0; i < axis; ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i].row_ids); } } { const Array1<int32_t> &row_splits = dest_axes[axis].row_splits; std::vector<int32_t> data(tot_size + 1); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_splits, data); } { const Array1<int32_t> &row_ids = dest_axes[axis].row_ids; std::vector<int32_t> data(tot_size); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_ids, data); } { for (std::size_t i = axis; i < src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestUnsqueeze) { TestUnsqueeze(simple_shape_); TestUnsqueeze(random_shape_); } TEST(RaggedShapeOpsTest, TestUnsqueezeParallel) { for (int32_t i = 0; i < 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_shapes = RandInt(0, 10); std::vector<RaggedShape *> orig_shapes; for (int32_t i = 0; i < num_shapes; i++) orig_shapes.push_back( new RaggedShape(RandomRaggedShape(false, 2, 5, 0, 1000).To(c))); int32_t axis = 0; // only one supported for now. std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(num_shapes, orig_shapes.data(), axis); for (int32_t i = 0; i < num_shapes; i++) { ASSERT_EQ(unsqueezed[i].Validate(), true); RaggedShape temp = RemoveAxis(unsqueezed[i], axis); ASSERT_EQ(Equal(temp, *(orig_shapes[i])), true); delete orig_shapes[i]; } } } void TestRemoveAxis(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); ASSERT_EQ(src_shape.NumAxes(), 4); { // axis = 0. int32_t axis = 0; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, 3, 7, 10}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data = {0, 0, 0, 1, 1, 1, 1, 2, 2, 2}; CheckArrayData(row_ids0, data); } { for (std::size_t i = 1; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 3 int32_t axis = 3; // the last axis RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRemoveAxis) { TestRemoveAxis(simple_shape_); } TEST(RaggedShapeOpsTest, TestGetOffsets) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { for (int32_t i = 0; i != 2; ++i) { int32_t num_shape = RandInt(10, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } RaggedShape **shapes_ptr = shapes.data(); Array2<int32_t> offsets = GetOffsets(num_shape, shapes_ptr); ASSERT_EQ(offsets.Dim0(), num_axes + 1); ASSERT_EQ(offsets.Dim1(), num_shape + 1); auto acc = offsets.Accessor(); for (int32_t axis = 0; axis <= num_axes; ++axis) { int32_t sum = 0; for (int32_t j = 0; j <= num_shape; ++j) { EXPECT_EQ(acc(axis, j), sum); if (j < num_shape) { sum += (axis == 0 ? 1 : shape_vec[j].TotSize(axis - 1)); } } } } } } // returns a random ragged shape where the dims on axis 1 are all the same // (so: can be transposed). RaggedShape RandomRaggedShapeToTranspose(ContextPtr c) { ContextPtr c_cpu = GetCpuContext(); RaggedShape random = RandomRaggedShape(false, 2, 4, 0, 5000).To(c); int32_t input_dim0 = random.Dim0(), divisor = 1; for (int32_t i = 1; i * i <= input_dim0; i++) { if (input_dim0 % i == 0 && i > divisor) divisor = i; } int32_t output_dim0 = divisor, output_dim1 = input_dim0 / divisor; Array1<int32_t> row_splits = Range<int32_t>(c, output_dim0 + 1, 0, output_dim1); int32_t cached_tot_size = input_dim0; RaggedShape top_level_shape = RaggedShape2(&row_splits, nullptr, cached_tot_size); return ComposeRaggedShapes(top_level_shape, random); } TEST(RaggedShapeOpsTest, TestTranspose) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); RaggedShape shape = Transpose(src_shape); EXPECT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); RaggedShape transposed = Transpose(to_transpose); if (context->GetDeviceType() != kCpu) { to_transpose = to_transpose.To(cpu); transposed = transposed.To(cpu); } for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t i = transposed[index]; // Just make sure this doesn't crash, // don't need the value. std::swap(index[0], index[1]); i = to_transpose[index]; // don't need the value, just need to make // sure it's an allowable index. ++i; // this line just suppresses the warning `variable i set but not // used` } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); std::swap(index[0], index[1]); int32_t i = transposed[index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } template <typename T> void TestTransposeRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); std::vector<T> values = {0, 1, 2, 3, 4, 5, 8, 7, 6, 9, 10, 15}; ASSERT_EQ(values.size(), src_shape.NumElements()); Array1<T> values_array(context, values); Ragged<T> ragged(src_shape, values_array); Ragged<T> ans = Transpose(ragged); RaggedShape shape = ans.shape; // Check shape ASSERT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); // Check values CheckArrayData(ans.values, {0, 1, 2, 4, 5, 8, 6, 9, 3, 7, 10, 15}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); int32_t num_elems = to_transpose.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); Ragged<T> src(to_transpose, src_values); Ragged<T> ans = Transpose(src); if (context->GetDeviceType() == kCuda) { src = src.To(cpu); ans = ans.To(cpu); to_transpose = to_transpose.To(cpu); } RaggedShape transposed = ans.shape; for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = ans[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, src[index]); } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = src[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, ans[index]); } } } } } TEST(RaggedTest, TestTransposeRagged) { TestTransposeRagged<int32_t>(); TestTransposeRagged<double>(); } TEST(RaggedShapeOpsTest, TestRowSplitsPtr) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape = RandomRaggedShape().To(context); ASSERT_GE(shape.NumAxes(), 2); Array1<int32_t *> ptrs = GetRowSplitsPtr(shape); ASSERT_EQ(ptrs.Dim(), shape.NumAxes() - 1); // as num_axes is not so big, access (may copy memory) it in a loop is fine. for (int32_t i = 0; i != ptrs.Dim(); ++i) { EXPECT_EQ(ptrs[i], shape.RowSplits(i + 1).Data()); } } } void TestRaggedShape2(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 2); Array1<int32_t> row_splits = src_shape.RowSplits(1); Array1<int32_t> row_ids = src_shape.RowIds(1); int32_t cached_tot_size = src_shape.TotSize(1); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape2(&row_splits, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // both row_splits and row_ids are non-null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null RaggedShape result = RaggedShape2(&row_splits, nullptr, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids if (row_splits.Dim() == (row_ids.Dim() == 0 ? 1 : row_ids.Back() + 2)) { { // row_splits is null RaggedShape result = RaggedShape2(nullptr, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_splits is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(nullptr, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape2) { TestRaggedShape2(simple_shape_); TestRaggedShape2(random_shape_); } void TestRaggedShape3(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); int32_t cached_tot_size1 = src_shape.TotSize(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); int32_t cached_tot_size2 = src_shape.TotSize(2); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape3(&row_splits1, &row_ids1, cached_tot_size1, &row_splits2, &row_ids2, cached_tot_size2); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } { // row_ids is non-null, cached_tot_size = -1 RaggedShape result = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids bool valid1 = (row_splits1.Dim() == (row_ids1.Dim() == 0 ? 1 : row_ids1.Back() + 2)); bool valid2 = (row_splits2.Dim() == (row_ids2.Dim() == 0 ? 1 : row_ids2.Back() + 2)); if (valid1 && valid2) { RaggedShape result = RaggedShape3(nullptr, &row_ids1, -1, nullptr, &row_ids2, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // TODO(haowen): add more cases for other branches } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape3) { TestRaggedShape3(simple_shape_); TestRaggedShape3(random_shape_); } void TestComposeShape(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); RaggedShape shape1 = RaggedShape2(&row_splits1, nullptr, -1); RaggedShape shape2 = RaggedShape2(&row_splits2, nullptr, -1); RaggedShape result = ComposeRaggedShapes(shape1, shape2); ASSERT_EQ(result.NumAxes(), 3); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); } } TEST_F(RaggedShapeOpsSuiteTest, TestComposeShape) { TestComposeShape(simple_shape_); TestComposeShape(random_shape_); } void TestShapeFromTotSize(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 2); int32_t num_axes = src_shape.NumAxes(); std::vector<int32_t> tot_sizes(num_axes); for (int32_t i = 0; i != num_axes; ++i) { tot_sizes[i] = src_shape.TotSize(i); } RaggedShape result = RaggedShapeFromTotSizes(context, num_axes, tot_sizes.data()); ASSERT_EQ(result.NumAxes(), num_axes); for (int32_t i = 0; i < num_axes; ++i) { EXPECT_EQ(result.TotSize(i), src_shape.TotSize(i)); if (i > 0) { EXPECT_EQ(result.RowSplits(i).Dim(), src_shape.RowSplits(i).Dim()); EXPECT_EQ(result.RowIds(i).Dim(), src_shape.RowIds(i).Dim()); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestShapeFromTotSize) { TestShapeFromTotSize(simple_shape_); TestShapeFromTotSize(random_shape_); } template <typename T> void TestRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // constructed with row_splits and row_ids // RaggedTensor4 t = [ // [ [[ 1, 2], [4]], [[3, 0]] ], // [ [[7, 8, 9]], [[6], [3, 5, 7]], [[2]] ], // [ [[3, 4], [], [8]] ] // ] const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; const std::vector<T> values_vec = {1, 2, 4, 3, 0, 7, 8, 9, 6, 3, 5, 7, 2, 3, 4, 8}; std::vector<RaggedShapeLayer> axes; axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); RaggedShape shape(axes, true); Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); // test Index(axis, i) { // values: [[[ 1, 2], [4]], [[3, 0]]] Ragged<T> sub_raggged = ragged.Index(0, 0); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 2, 3}, {0, 2, 3, 5}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {1, 2, 4, 3, 0}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[7, 8, 9]], [[6], [3, 5, 7]], [[2]]] Ragged<T> sub_raggged = ragged.Index(0, 1); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 1, 3, 4}, {0, 3, 4, 7, 8}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {7, 8, 9, 6, 3, 5, 7, 2}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[3, 4], [], [8]]] Ragged<T> sub_raggged = ragged.Index(0, 2); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 3}, {0, 2, 2, 3}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {3, 4, 8}; CheckArrayData<T>(sub_values, sub_values_vec); } // test operator[](const std::vector<int32_t> &indexes) if (context->GetDeviceType() == kCpu) { { std::vector<int32_t> indexes = {0, 0, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 0); EXPECT_EQ(ragged[indexes], 1); } { std::vector<int32_t> indexes = {0, 1, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 3); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {1, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 6); EXPECT_EQ(ragged[indexes], 8); } { std::vector<int32_t> indexes = {1, 1, 1, 0}; EXPECT_EQ(ragged.shape[indexes], 9); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {2, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 14); EXPECT_EQ(ragged[indexes], 4); } { std::vector<int32_t> indexes = {2, 0, 2, 0}; EXPECT_EQ(ragged.shape[indexes], 15); EXPECT_EQ(ragged[indexes], 8); } } const std::vector<std::vector<int32_t>> row_splits_vec = { row_splits1, row_splits2, row_splits3}; // test To(ctx) { // to GPU Ragged<T> other = ragged.To(GetCudaContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } { // to CPU Ragged<T> other = ragged.To(GetCpuContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } } } } template <typename T, typename OP = LessThan<T>> static void CpuSortSublists(const Array1<int32_t> &row_splits, Array1<T> *src) { K2_CHECK(src->Context()->GetDeviceType() == kCpu); T *p = src->Data(); OP comp = OP(); for (int32_t i = 0; i < row_splits.Dim() - 1; ++i) { int32_t cur = row_splits[i]; int32_t next = row_splits[i + 1]; std::sort(p + cur, p + next, comp); } } template <typename T, typename OP = LessThan<T>> static void TestSortSublists() { auto cpu_context = GetCpuContext(); auto cuda_context = GetCudaContext(); RaggedShape shape = RandomRaggedShape(false, // set_row_ids 2, // min_num_axes 4, // max_num_axes 1, // min_num_elements 2000); // max_num_elements Array1<T> values = RandUniformArray1<T>(shape.Context(), shape.NumElements(), -2000, 2000); Ragged<T> ragged(shape, values); ragged = ragged.To(cuda_context); values = values.To(cpu_context); // to be sorted by cpu Array1<T> unsorted = values.Clone(); Array1<int32_t> order(ragged.Context(), ragged.values.Dim()); SortSublists<T, OP>(&ragged, &order); Array1<int32_t> &segment = ragged.shape.RowSplits(ragged.NumAxes() - 1); CpuSortSublists<T, OP>(segment, &values); int32_t n = order.Dim(); for (int i = 0; i != n; ++i) { EXPECT_EQ(values[i], ragged.values[i]); EXPECT_EQ(ragged.values[i], unsorted[order[i]]); } } TEST(RaggedTest, Ragged) { TestRagged<int32_t>(); TestRagged<double>(); TestSortSublists<int32_t>(); TestSortSublists<double>(); } TEST(RaggedShapeOpsTest, TestAppend) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes[1] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[1] = &shapes[1]; } { // axis == 1 RaggedShape result = Append(1, 2, shapes_ptr.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } } { // axis == 0 RaggedShape result = Append(0, 2, shapes_ptr.data()); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto i = 0; i < 2; ++i) { std::vector<const Array1<int32_t> *> splits_ptr = { &row_splits_vec[i][0], &row_splits_vec[i][1]}; Array1<int32_t> curr_row_splits = SpliceRowSplits(2, splits_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } // only test case axis == 0, test axis==1 with simple case is good // enough as it just calls Stack RaggedShape result = Append(0, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (int32_t axis = 1; axis < num_axes; ++axis) { std::vector<Array1<int32_t>> splits_vec(num_shape); std::vector<const Array1<int32_t> *> splits_vec_ptr(num_shape); for (int32_t n = 0; n != num_shape; ++n) { splits_vec[n] = shape_vec[n].RowSplits(axis); splits_vec_ptr[n] = &splits_vec[n]; } Array1<int32_t> curr_row_splits = SpliceRowSplits(num_shape, splits_vec_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } // check data for (int32_t axis = 1; axis < num_axes; ++axis) { CheckArrayData(result.RowSplits(axis), result_splits[axis - 1]); CheckArrayData(result.RowIds(axis), result_ids[axis - 1]); } } } } } template <typename T> void TestAppendRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // TODO(haowen): remove duplicate code in TestAppend above. // test with simple case could be good enough, as we have tested // Append(RaggedShape&) already. std::vector<Ragged<T>> ragged_vec(2); std::vector<Ragged<T> *> ragged(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<T> values_vec = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[0] = Ragged<T>(shape, values); ragged[0] = &ragged_vec[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; const std::vector<T> values_vec = {20, 21, 23, 28, 30, 32, 35}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[1] = Ragged<T>(shape, values); ragged[1] = &ragged_vec[1]; } { // axis == 0 Ragged<T> result = Append(0, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 1, 1, 1, 2, 3, 4, 4, 5}, {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18, 20, 21, 23, 28, 30, 32, 35}; CheckArrayData(result.values, expected_data); } { // axis == 1 Ragged<T> result = Append(1, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 20, 21, 23, 7, 9, 10, 12, 28, 30, 14, 15, 18, 32, 35}; CheckArrayData(result.values, expected_data); } } } TEST(RaggedTest, TestAppendRagged) { TestAppendRagged<int32_t>(); TestAppendRagged<double>(); } void CheckResultOfIndex(const ContextPtr &context, RaggedShape shape, Array1<int32_t> new2old, RaggedShape result) { K2_CHECK(context->IsCompatible(*shape.Context())); ContextPtr cpu = GetCpuContext(); // will use to copy data int32_t num_axes = shape.NumAxes(); int32_t src_dim0 = shape.Dim0(), result_dim0 = result.Dim0(); if (result_dim0 == 0) { std::vector<int32_t> empty_row_splits = {0}; for (int32_t i = 0; i < num_axes - 1; ++i) { CheckArrayData(result.RowSplits(i + 1), empty_row_splits); EXPECT_EQ(result.RowIds(i + 1).Dim(), 0); } return; } Array2<int32_t> old_offsets(context, num_axes, src_dim0 + 1); auto old_offsets_acc = old_offsets.Accessor(); Array1<int32_t *> row_splits_ptrs = GetRowSplitsPtr(shape); int32_t **row_splits_ptrs_data = row_splits_ptrs.Data(); // Set old_offsets K2_EVAL( context, src_dim0 + 1, lambda_get_old_offsets, (int32_t i)->void { // 0 <= i <= dim0 int32_t cur_offset = i; for (int32_t axis = 0; axis < num_axes; axis++) { old_offsets_acc(axis, i) = cur_offset; if (axis + 1 == num_axes) return; cur_offset = row_splits_ptrs_data[axis][cur_offset]; } }); old_offsets = old_offsets.To(cpu); auto cpu_offsets_acc = old_offsets.Accessor(); shape = shape.To(cpu); new2old = new2old.To(cpu); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto axis = 0; axis < num_axes - 1; ++axis) { Array1<int32_t> curr_row_splits = shape.RowSplits(axis + 1); std::vector<Array1<int32_t>> splits_vec(result_dim0); std::vector<const Array1<int32_t> *> splits_vec_ptr(result_dim0); for (int32_t m = 0; m != result_dim0; ++m) { int32_t old_idx = new2old[m]; int32_t start = cpu_offsets_acc(axis, old_idx); int32_t end = cpu_offsets_acc(axis, old_idx + 1); Array1<int32_t> sub_list = curr_row_splits.Range(start, end - start + 1); Array1<int32_t> copy_sub_list(cpu, sub_list.Dim()); copy_sub_list.CopyFrom(sub_list); int32_t *data = copy_sub_list.Data(); int32_t init = data[0]; for (int32_t n = 0; n != copy_sub_list.Dim(); ++n) { data[n] -= init; } splits_vec[m] = copy_sub_list; splits_vec_ptr[m] = &splits_vec[m]; } Array1<int32_t> result_row_splits = SpliceRowSplits(result_dim0, splits_vec_ptr.data()); result_splits.push_back(result_row_splits); Array1<int32_t> result_row_ids(cpu, result_row_splits.Back()); RowSplitsToRowIds(result_row_splits, &result_row_ids); result_ids.push_back(result_row_ids); } for (int32_t i = 0; i < num_axes - 1; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } TEST(RaggedShapeOpsTest, TestIndex) { for (int i = 0; i < 5; i++) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); std::vector<int32_t> new2old_vec = {2, 1}; Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes_out; RaggedShape result = Index(shape, new2old, &value_indexes_out); // fsa 2, state_idx01 {5}, arc_idx012 {7, 8, 9} // fsa 1, state_idx01 {2, 3, 4}, arc_idx012 {{3},{4, 5}, {6}} CheckArrayData(value_indexes_out, std::vector<int32_t>{7, 8, 9, 3, 4, 5, 6}); CheckResultOfIndex(context, shape, new2old, result); } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(), result_dim0 = RandInt(0, 10); if (dim0 == 0) result_dim0 = 0; std::vector<int32_t> new2old_vec(result_dim0); for (int i = 0; i < result_dim0; i++) new2old_vec[i] = RandInt(0, dim0 - 1); Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes; RaggedShape result = Index(shape, new2old, &value_indexes); CheckResultOfIndex(context, shape, new2old, result); K2_LOG(INFO) << "Value_indexes = " << value_indexes; } } } } } TEST(GetTransposeReordering, NoDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 e4 // row2 f5 // row3 g6 h7 i8 // row4 j9 // row5 k10 l11 std::vector<int32_t> col_indexes{4, 5, 0, 1, 5, 3, 0, 2, 4, 5, 1, 4}; std::vector<int32_t> _row_splits{0, 2, 5, 6, 9, 10, 12}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 6, 3, 10, 7, 5, 0, 8, 11, 1, 4, 9}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, ThreeAxesEmptyCase) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> ragged("[ [ [ ] ] ]"); ragged = ragged.To(context); Array1<int32_t> order = GetTransposeReordering(ragged, 0); } } TEST(GetTransposeReordering, NoDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 // row2 e4 // row3 f5 g6 h7 // row4 i8 // row5 j9 k10 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 3, 0, 2, 1, 0, 1, 3, 5, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 2, 4, 5, 8, 9, 11}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 5, 0, 4, 6, 3, 1, 7, 9, 8, 10}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 std::vector<int32_t> col_indexes{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 3, 5}; std::vector<int32_t> _row_splits{0, 5, 8, 9, 13, 16, 18}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 16, 13, 14, 15, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 5, 8, 9, 13, 16, 18}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 13, 14, 15, 16, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, RandomFsaVecTest) { for (int32_t iter = 0; iter != 8; ++iter) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { int n = RandInt(100, 200); int32_t min_num_fsas = n; int32_t max_num_fsas = n * 2; bool acyclic = false; int32_t max_symbol = 100; int32_t min_num_arcs = min_num_fsas * 10; int32_t max_num_arcs = max_num_fsas * 20; FsaVec fsas = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic, max_symbol, min_num_arcs, max_num_arcs); fsas = fsas.To(context); Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_states = fsas.TotSize(1); int32_t num_arcs = fsas.TotSize(2); Array1<int32_t> order = GetTransposeReordering(dest_states_tensor, num_states); Sort(&order); ASSERT_EQ(order.Dim(), num_arcs); Array1<int32_t> expected = Range<int32_t>(context, num_arcs, 0); CheckArrayData(order, expected); } } } TEST(ChangeSublistSize, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9}); size_delta = -2; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 1}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5}); } } TEST(ChangeSublistSizePinned, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9, 9}); size_delta = -3; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 0, 0}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5, 5}); } } } TEST(ChangeSublistSize, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 21}); // it is an error to use -2 here // because the state (state_idx01 == 2) has only 1 entry size_delta = -1; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 2, 3, 3, 4, 6}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 11}); } } TEST(ChangeSublistSizePinned, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 16, 21}); size_delta = -2; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 1, 1, 1, 1, 1, 2}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); } } TEST(RaggedShapeOpsTest, TestGetCountsPartitioned) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // Testing with simple case is good enough as we have tested GetCounts() // with random large size and GetCountsPartitioned just calls GetCounts. std::vector<int32_t> src_row_splits_vec = {0, 3, 4, 6, 10}; Array1<int32_t> src_row_splits(context, src_row_splits_vec); RaggedShape src_shape = RaggedShape2(&src_row_splits, nullptr, -1); std::vector<int32_t> src_values_vec = {0, 1, 0, 2, 5, 5, 7, 7, 9, 7}; Array1<int32_t> src_values(context, src_values_vec); Ragged<int32_t> src(src_shape, src_values); std::vector<int32_t> ans_row_splits_vec = {0, 2, 4, 7, 10}; Array1<int32_t> ans_row_splits(context, ans_row_splits_vec); RaggedShape ans_shape = RaggedShape2(&ans_row_splits, nullptr, -1); Ragged<int32_t> result = GetCountsPartitioned(src, ans_shape); ASSERT_EQ(result.NumAxes(), 2); // Check row_splits Array1<int32_t> row_splits = result.shape.RowSplits(1).To(cpu); std::vector<int32_t> result_row_splits( row_splits.Data(), row_splits.Data() + row_splits.Dim()); EXPECT_EQ(result_row_splits, ans_row_splits_vec); // check values std::vector<int32_t> expected_data = {2, 1, 1, 0, 0, 2, 0, 3, 0, 1}; Array1<int32_t> values = result.values.To(cpu); std::vector<int32_t> data(values.Data(), values.Data() + values.Dim()); EXPECT_EQ(data, expected_data); } } TEST(RaggedShapeOpsTest, TestStack) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[1] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[1] = &shapes[1]; } std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6}, {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); RaggedShape transpose = Transpose(result); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(transpose.RowSplits(i + 1), expected_row_splits[i]); } } } { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); shape_vec[j] = new_shape; shapes[j] = &shape_vec[j]; } std::vector<RaggedShape> cpu_shapes(num_shape); for (auto i = 0; i != num_shape; ++i) { cpu_shapes[i] = shape_vec[i].To(cpu); } { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } } template <typename T> void TestStackRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<Ragged<T>> ragged_vec(num_shape); std::vector<Ragged<T> *> ragged(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); int32_t num_elems = new_shape.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); ragged_vec[j] = Ragged<T>(new_shape, src_values); ragged[j] = &ragged_vec[j]; } std::vector<Ragged<T>> cpu_ragged_vec(num_shape); for (auto j = 0; j != num_shape; ++j) { cpu_ragged_vec[j] = ragged_vec[j].To(cpu); } { // axis == 0 int32_t axis = 0; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] EXPECT_EQ(value, cpu_ragged_vec[i][index]); } } { // axis == 1 int32_t axis = 1; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t j = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] EXPECT_EQ(value, cpu_ragged_vec[j][index]); } } } } } TEST(RaggedTest, TestStackRagged) { TestStackRagged<int32_t>(); TestStackRagged<double>(); } TEST(RaggedTest, TestMaxSize) { for (int32_t i = 0; i <= 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(c); int32_t axis = RandInt(1, num_axes - 1); int32_t max_size = shape.MaxSize(axis); if (axis == 0) { K2_CHECK(max_size == shape.Dim0()); } else { Array1<int32_t> row_splits = shape.RowSplits(axis).To(GetCpuContext()); int32_t *row_splits_data = row_splits.Data(); int32_t m = 0; for (int32_t i = 0; i + 1 < row_splits.Dim(); i++) { int32_t size = row_splits_data[i + 1] - row_splits_data[i]; if (size > m) m = size; } ASSERT_EQ(m, max_size); } } } TEST(RaggedShapeOpsTest, TestMakeTransposable) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2, 3, 3}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, // 6, 7}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6, 9, 12}, {0, 2, 3, 3, 4, 6, 7, 10, 10, 10, 12, 13, 13}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}, {0, 0, 1, 3, 4, 4, 5, 6, 6, 6, 9, 9, 10}}; RaggedShape result = MakeTransposable(shape); for (int32_t i = 1; i != 3; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); CheckArrayData(result.RowIds(i), expected_row_ids[i - 1]); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t max_size = shape.MaxSize(1); RaggedShape result = MakeTransposable(shape); shape = shape.To(cpu); result = result.To(cpu); EXPECT_EQ(result.Dim0(), dim0); EXPECT_EQ(result.TotSize(1), dim0 * max_size); // check if every sub list in axis 1 has the same size int32_t *row_splits1 = result.RowSplits(1).Data(); for (int32_t j = 0; j != dim0 + 1; ++j) { EXPECT_EQ(row_splits1[j], j * max_size); } if (num_axes > 2) { for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { const std::vector<int32_t> &index = iter.Value(); EXPECT_EQ(shape[index], result[index]); } } } } } } TEST(RaggedShapeOpsTest, PrefixTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 4); EXPECT_EQ(num_axes, 3); { // n == 0 int32_t n = 0; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n > 0 && n < dim0 int32_t n = 2; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n == dim0 int32_t n = 4; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowSplits(2), row_splits2); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t n = RandInt(0, dim0); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. CheckArrayData(result.RowSplits(1), shape.RowSplits(1).Range(0, n + 1)); } } } } TEST(RaggedShapeOpsTest, GetPrefixesTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t ans_num = RandInt(0, 10); std::vector<int32_t> sizes; for (int32_t j = 0; j != ans_num; ++j) sizes.push_back(RandInt(0, dim0)); ASSERT_EQ(sizes.size(), ans_num); std::vector<RaggedShape> ans = GetPrefixes(shape, sizes); ASSERT_EQ(ans.size(), ans_num); for (int32_t j = 0; j != ans_num; ++j) { int32_t n = sizes[j]; RaggedShape ans_j = ans[j]; EXPECT_TRUE(IsCompatible(shape, ans_j)); EXPECT_EQ(ans_j.Dim0(), n); EXPECT_EQ(ans_j.NumAxes(), num_axes); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t m = 1; m != num_axes; ++m) { EXPECT_TRUE(Equal(result.RowSplits(m), ans_j.RowSplits(m))); } } } } } } TEST(RaggedShapeOpsTest, ArangeTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 3, 4, 6, 7, 10}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 2, 3, 3, 3, // 4, 5, 5, 5, 6, 7, 7, 9}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<int32_t> values(shape.NumElements()); std::iota(values.begin(), values.end(), 10); Array1<int32_t> values_array(context, values); Ragged<int32_t> ragged(shape, values_array); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 6); EXPECT_EQ(num_axes, 3); { // axis == 0, begin == end int32_t axis = 0; int32_t begin = 1, end = 1; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), 0); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {1, 1}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); EXPECT_EQ(ragged_result.values.Dim(), 0); } { // axis == 0, begin < end == Dim0() + 1 int32_t axis = 0; int32_t begin = 3, end = 6; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 3, 6}, {0, 1, 4, 5, 7, 7, 8}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {8, 16}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {18, 19, 20, 21, 22, 23, 24, 25}; CheckArrayData(ragged_result.values, expected_values); } { // axis == 1 int32_t axis = 1; int32_t begin = 6, end = 8; std::vector<int32_t> expected_row_splits = {0, 1, 3}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), 2); CheckArrayData(result.RowSplits(1), expected_row_splits); std::pair<int32_t, int32_t> expected_value_range = {12, 15}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {22, 23, 24}; CheckArrayData(ragged_result.values, expected_values); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t num_axes = shape.NumAxes(); int32_t axis = RandInt(0, num_axes - 2); int32_t tot_size = shape.TotSize(axis); int32_t begin = RandInt(0, tot_size); int32_t end = RandInt(begin, tot_size); std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), std::max(0, end - begin)); EXPECT_EQ(result.NumAxes(), num_axes - axis); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. if (begin == end) { CheckArrayData(result.RowSplits(1), std::vector<int32_t>{0}); } else { Array1<int32_t> row_splits1 = shape.RowSplits(axis + 1).Arange(begin, end + 1); row_splits1 = Minus(row_splits1, row_splits1[0]); CheckArrayData(result.RowSplits(1), row_splits1); } EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); } } } } TEST(RaggedShapeOpsTest, AppendMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape appended_axis2_ref = RaggedShape("[ [ [[ x x ][ x ][]] [[x ][x][ x ]] ] [[[x ][ x x][]]]]") .To(c); RaggedShape appended_axis3_ref = RaggedShape("[ [ [[ x x x ]] [[x x x ]] ] [[[x x x]]]]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape appended_axis2 = Append(2, 3, srcs, &merge_map2); RaggedShape appended_axis3 = Append(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "appended_axis2 = " << appended_axis2; K2_LOG(INFO) << "appended_axis3 = " << appended_axis3; K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, StackMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape stacked_ref = RaggedShape( "[ [ [[[ x x ]][[ x ]][[]]] [[[x ]][[x]][[ x ]]] ] " "[[[[x ]][[ x x]][[]]]]]") .To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape stacked_axis2 = Stack(2, 3, srcs, &merge_map2); RaggedShape stacked_axis3 = Stack(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "stacked_axis2 = " << stacked_axis2; K2_LOG(INFO) << "stacked_axis3 = " << stacked_axis3; K2_CHECK(Equal(stacked_axis2, stacked_ref)); K2_CHECK(Equal(stacked_axis2, stacked_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, Merge) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]") .To(c), // m: 0 3 6, m_out: 0 3, 6, shape2 = RaggedShape("[ [ x] [ x x x ] ]") .To(c), // m: 1 4, m_out: 1, 4 7 10 shape3 = RaggedShape("[ [ ] [ x x ] [] ]").To(c); // m: 2 5 8, m_out: ,2 5, RaggedShape ans_ref = RaggedShape("[ [] [x] [x x x] [] [] [x x] [x x] [x] ]").To(c); // This is a mixed-up kind of merge map that doesn't appear naturally (they // are always in-order from each source, right now) but it should still // work. std::vector<uint32_t> merge_map_data = {6, 1, 4, 8, 2, 5, 0, 3}; Array1<uint32_t> merge_map_in(c, merge_map_data); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map_out; RaggedShape merged = Merge(3, srcs, merge_map_in, &merge_map_out); ASSERT_EQ(true, Equal(ans_ref, merged)); std::vector<uint32_t> merge_map_out_data = {1, 4, 7, 10, 2, 5, 0, 3, 6}; CheckArrayData(merge_map_out, merge_map_out_data); } } TEST(RaggedTest, AddSuffixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> suffix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddSuffixToRagged(src, suffix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + suffix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); EXPECT_EQ(dst_cpu[src_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> suffix_cpu = suffix.To(GetCpuContext()); for (int32_t i = 0; i < suffix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i + 1] + i], suffix_cpu[i]); } } } } } TEST(RaggedTest, AddPrefixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> prefix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddPrefixToRagged(src, prefix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + prefix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); std::vector<int32_t> dst_indexes(src_indexes); dst_indexes.back() += 1; // increase the last index by 1 EXPECT_EQ(dst_cpu[dst_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> prefix_cpu = prefix.To(GetCpuContext()); for (int32_t i = 0; i < prefix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i] + i], prefix_cpu[i]); } } } } } TEST(RaggedTest, RemoveValuesLeq) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 3 4 ] [ 5 7 8 ] ]").To(c), s3 = Ragged<int32_t>(" [ [4] [5 7 8]]").To(c), s5 = Ragged<int32_t>(" [ [] [ 7 8]]").To(c); Ragged<int32_t> ans1 = RemoveValuesLeq(r, 3), ans2 = RemoveValuesLeq(r, 5); K2_LOG(INFO) << "ans2 = " << ans2; EXPECT_EQ(true, Equal(ans1, s3)); EXPECT_EQ(true, Equal(ans2, s5)); } } TEST(RaggedTest, IndexArrayRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Array1<float> f(c, std::vector<float>({0.0, 1.0, 2.0, 3.0, 4.0})); Ragged<float> fr = Ragged<float>(" [ [ 2.0 0.0 ] [ 1.0 2.0 3.0 ] ]").To(c), ans = Index(f, r); EXPECT_EQ(true, Equal(ans, fr)); } } TEST(RaggedTest, IndexRaggedRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Ragged<int32_t> s = Ragged<int32_t>(" [ [ 10 10 ] [ 11 ] [ 12 12 ] [ 13 ] [ 14 14] ]") .To(c); // NOLINT Ragged<int32_t> sr1 = Ragged<int32_t>(" [ [ [12 12] [10 10] ] [ [11] [12 12] [13] ] ]") .To(c); // NOLINT Ragged<int32_t> sr2 = Ragged<int32_t>(" [ [ 12 12 10 10 ] [ 11 12 12 13 ] ]") .To(c); // NOLINT EXPECT_EQ(true, Equal(Index(s, r, false), sr1)); EXPECT_EQ(true, Equal(Index(s, r, true), sr2)); } } TEST(RaggedShapeOpsTest, CoveringShape) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { { // simple case RaggedShape shape1 = RaggedShape("[ [ x x ] [] [ x ] ]").To(c), shape2 = RaggedShape("[ [ x] [] [ x x x ] ]").To(c), shape3 = RaggedShape("[ [] [] [ x x ] ]").To(c); RaggedShape expected = RaggedShape("[ [x x] [] [x x x] ]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; RaggedShape ans = CoveringShape(3, srcs); EXPECT_TRUE(Equal(expected, ans)); // test CoveringShapeForwardMap { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2, -1, -1}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape2, ans); std::vector<int32_t> expected_map = {0, -1, 1, 2, 3}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape3, ans); std::vector<int32_t> expected_map = {-1, -1, 0, 1, -1}; CheckArrayData(elem_map, expected_map); } } { // another simple case: only one src RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]").To(c); RaggedShape *srcs[] = {&shape1}; RaggedShape ans = CoveringShape(1, srcs); EXPECT_TRUE(Equal(shape1, ans)); // test CoveringShapeForwardMap Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2}; CheckArrayData(elem_map, expected_map); } { // random case for (int32_t i = 0; i != 1; ++i) { int32_t num_shape = RandInt(1, 100); int32_t dim0 = RandInt(1, 1000); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> row_sizes = RandUniformArray1<int32_t>(c, dim0 + 1, 0, 100); ExclusiveSum(row_sizes, &row_sizes); shape_vec[j] = RaggedShape2(&row_sizes, nullptr, -1); ASSERT_TRUE(shape_vec[j].Context()->IsCompatible(*c)); ASSERT_EQ(shape_vec[j].Dim0(), dim0); shapes[j] = &shape_vec[j]; } RaggedShape ans = CoveringShape(num_shape, shapes.data()); std::vector<Array1<int32_t>> elem_map(num_shape); for (int32_t j = 0; j != num_shape; ++j) { elem_map[j] = CoveringShapeForwardMap(shape_vec[j], ans); } // check ans ASSERT_EQ(ans.NumAxes(), 2); ASSERT_EQ(ans.Dim0(), dim0); ASSERT_TRUE(ans.Context()->IsCompatible(*c)); ContextPtr cpu = GetCpuContext(); ans = ans.To(cpu); for (int32_t j = 0; j != num_shape; ++j) shape_vec[j] = shape_vec[j].To(cpu); for (int32_t d = 0; d != dim0; ++d) { int32_t max_row_size = 0; for (int32_t j = 0; j != num_shape; ++j) max_row_size = std::max( shape_vec[j].RowSplits(1)[d + 1] - shape_vec[j].RowSplits(1)[d], max_row_size); EXPECT_EQ(max_row_size, ans.RowSplits(1)[d + 1] - ans.RowSplits(1)[d]); } // test CoveringShapeForwardMap for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> cur_elem_map = elem_map[j].To(cpu); ASSERT_EQ(cur_elem_map.Dim(), ans.NumElements()); int32_t n = 0; for (RaggedShapeIndexIterator ans_iter = ans.Iterator(); !ans_iter.Done(); ans_iter.Next()) { const std::vector<int32_t> &ans_indexes = ans_iter.Value(); int32_t src_shape_linear_index = cur_elem_map[n]; if (src_shape_linear_index != -1) { EXPECT_EQ(src_shape_linear_index, shape_vec[j][ans_indexes]); } ++n; } } } } } } } // namespace k2
7176a15d6d26c4390e7c2e3ad049b042dc026e99.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "internal.h" #include <hip/hip_runtime.h> #include "constants.h" #include "host_utility.h" namespace { template<typename SRC_T, typename DST_T> __global__ void check_consistency_kernel(DST_T* dispL, const DST_T* dispR, const SRC_T* srcL, int width, int height, int src_pitch, int dst_pitch, bool subpixel, int LR_max_diff) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; // left-right consistency check, only on leftDisp, but could be done for rightDisp too SRC_T mask = srcL[y * src_pitch + x]; DST_T org = dispL[y * dst_pitch + x]; int d = org; if (subpixel) { d >>= sgm::StereoSGM::SUBPIXEL_SHIFT; } const int k = x - d; if (mask == 0 || org == sgm::INVALID_DISP || (k >= 0 && k < width && LR_max_diff >= 0 && abs(dispR[y * dst_pitch + k] - d) > LR_max_diff)) { // masked or left-right inconsistent pixel -> invalid dispL[y * dst_pitch + x] = static_cast<DST_T>(sgm::INVALID_DISP); } } } // namespace namespace sgm { namespace details { void check_consistency(DeviceImage& dispL, const DeviceImage& dispR, const DeviceImage& srcL, bool subpixel, int LR_max_diff) { SGM_ASSERT(dispL.type == SGM_16U && dispR.type == SGM_16U, ""); const int w = srcL.cols; const int h = srcL.rows; const dim3 block(16, 16); const dim3 grid(divUp(w, block.x), divUp(h, block.y)); if (srcL.type == SGM_8U) { using SRC_T = uint8_t; hipLaunchKernelGGL(( check_consistency_kernel<SRC_T>), dim3(grid), dim3(block), 0, 0, dispL.ptr<uint16_t>(), dispR.ptr<uint16_t>(), srcL.ptr<SRC_T>(), w, h, srcL.step, dispL.step, subpixel, LR_max_diff); } else if (srcL.type == SGM_16U) { using SRC_T = uint16_t; hipLaunchKernelGGL(( check_consistency_kernel<SRC_T>), dim3(grid), dim3(block), 0, 0, dispL.ptr<uint16_t>(), dispR.ptr<uint16_t>(), srcL.ptr<SRC_T>(), w, h, srcL.step, dispL.step, subpixel, LR_max_diff); } else { using SRC_T = uint32_t; hipLaunchKernelGGL(( check_consistency_kernel<SRC_T>), dim3(grid), dim3(block), 0, 0, dispL.ptr<uint16_t>(), dispR.ptr<uint16_t>(), srcL.ptr<SRC_T>(), w, h, srcL.step, dispL.step, subpixel, LR_max_diff); } CUDA_CHECK(hipGetLastError()); } } // namespace details } // namespace sgm
7176a15d6d26c4390e7c2e3ad049b042dc026e99.cu
/* Copyright 2016 Fixstars Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http ://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "internal.h" #include <cuda_runtime.h> #include "constants.h" #include "host_utility.h" namespace { template<typename SRC_T, typename DST_T> __global__ void check_consistency_kernel(DST_T* dispL, const DST_T* dispR, const SRC_T* srcL, int width, int height, int src_pitch, int dst_pitch, bool subpixel, int LR_max_diff) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; // left-right consistency check, only on leftDisp, but could be done for rightDisp too SRC_T mask = srcL[y * src_pitch + x]; DST_T org = dispL[y * dst_pitch + x]; int d = org; if (subpixel) { d >>= sgm::StereoSGM::SUBPIXEL_SHIFT; } const int k = x - d; if (mask == 0 || org == sgm::INVALID_DISP || (k >= 0 && k < width && LR_max_diff >= 0 && abs(dispR[y * dst_pitch + k] - d) > LR_max_diff)) { // masked or left-right inconsistent pixel -> invalid dispL[y * dst_pitch + x] = static_cast<DST_T>(sgm::INVALID_DISP); } } } // namespace namespace sgm { namespace details { void check_consistency(DeviceImage& dispL, const DeviceImage& dispR, const DeviceImage& srcL, bool subpixel, int LR_max_diff) { SGM_ASSERT(dispL.type == SGM_16U && dispR.type == SGM_16U, ""); const int w = srcL.cols; const int h = srcL.rows; const dim3 block(16, 16); const dim3 grid(divUp(w, block.x), divUp(h, block.y)); if (srcL.type == SGM_8U) { using SRC_T = uint8_t; check_consistency_kernel<SRC_T><<<grid, block>>>(dispL.ptr<uint16_t>(), dispR.ptr<uint16_t>(), srcL.ptr<SRC_T>(), w, h, srcL.step, dispL.step, subpixel, LR_max_diff); } else if (srcL.type == SGM_16U) { using SRC_T = uint16_t; check_consistency_kernel<SRC_T><<<grid, block>>>(dispL.ptr<uint16_t>(), dispR.ptr<uint16_t>(), srcL.ptr<SRC_T>(), w, h, srcL.step, dispL.step, subpixel, LR_max_diff); } else { using SRC_T = uint32_t; check_consistency_kernel<SRC_T><<<grid, block>>>(dispL.ptr<uint16_t>(), dispR.ptr<uint16_t>(), srcL.ptr<SRC_T>(), w, h, srcL.step, dispL.step, subpixel, LR_max_diff); } CUDA_CHECK(cudaGetLastError()); } } // namespace details } // namespace sgm
1765bc81c550f604e0253b5301a0459646ae5405.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_4_a; int xdim0_update_halo_kernel4_plus_4_a_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_4_a; int ydim0_update_halo_kernel4_plus_4_a_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_4_a; int xdim1_update_halo_kernel4_plus_4_a_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_4_a; int ydim1_update_halo_kernel4_plus_4_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel4_plus_4_a * (y) + \ xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel4_plus_4_a * (y) + \ xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a * \ (z)) // user function __device__ inline void update_halo_kernel4_plus_4_a_gpu(double *vol_flux_y, double *mass_flux_y, const int *fields) { if (fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(4, 0, 0)]; if (fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(4, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_4_a(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a + idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a + idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_4_a_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_4_a_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 75)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(75, "update_halo_kernel4_plus_4_a"); OPS_kernels[75].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h || ydim0 != ydim0_update_halo_kernel4_plus_4_a_h || xdim1 != xdim1_update_halo_kernel4_plus_4_a_h || ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int)); xdim0_update_halo_kernel4_plus_4_a_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int)); ydim0_update_halo_kernel4_plus_4_a_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int)); xdim1_update_halo_kernel4_plus_4_a_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int)); ydim1_update_halo_kernel4_plus_4_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[75].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_4_a), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[75].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[75].mpi_time += t2 - t1; OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 75; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 75; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_4_a_execute; if (OPS_diags > 1) { ops_timing_realloc(75, "update_halo_kernel4_plus_4_a"); } ops_enqueue_kernel(desc); } #endif
1765bc81c550f604e0253b5301a0459646ae5405.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_4_a; int xdim0_update_halo_kernel4_plus_4_a_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_4_a; int ydim0_update_halo_kernel4_plus_4_a_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_4_a; int xdim1_update_halo_kernel4_plus_4_a_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_4_a; int ydim1_update_halo_kernel4_plus_4_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel4_plus_4_a * (y) + \ xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel4_plus_4_a * (y) + \ xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a * \ (z)) // user function __device__ inline void update_halo_kernel4_plus_4_a_gpu(double *vol_flux_y, double *mass_flux_y, const int *fields) { if (fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(4, 0, 0)]; if (fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(4, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_4_a(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a + idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_a * ydim0_update_halo_kernel4_plus_4_a; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a + idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_a * ydim1_update_halo_kernel4_plus_4_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_4_a_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_4_a_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 75)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(75, "update_halo_kernel4_plus_4_a"); OPS_kernels[75].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_4_a_h || ydim0 != ydim0_update_halo_kernel4_plus_4_a_h || xdim1 != xdim1_update_halo_kernel4_plus_4_a_h || ydim1 != ydim1_update_halo_kernel4_plus_4_a_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_a, &xdim0, sizeof(int)); xdim0_update_halo_kernel4_plus_4_a_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_a, &ydim0, sizeof(int)); ydim0_update_halo_kernel4_plus_4_a_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_a, &xdim1, sizeof(int)); xdim1_update_halo_kernel4_plus_4_a_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_a, &ydim1, sizeof(int)); ydim1_update_halo_kernel4_plus_4_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[75].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel4_plus_4_a<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[75].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[75].mpi_time += t2 - t1; OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[75].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_4_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 75; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 75; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_4_a_execute; if (OPS_diags > 1) { ops_timing_realloc(75, "update_halo_kernel4_plus_4_a"); } ops_enqueue_kernel(desc); } #endif
a27a33837c5b47a5d1edb569db1c3c37bfb8c67c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void spool_bprop_avg( float* param_E, float* param_B, const float* param_I, int param_mode, int param_N, int param_W, int param_H, int param_D, int param_C, int param_WN, int param_HWN, int param_DHWN, int param_magic_H, int param_shift_H, int param_pad_w, int param_pad_h, int param_pad_d, int param_pad_c, int param_str_w, int param_str_h, int param_str_d, int param_str_c, int param_magic_str_w, int param_shift_str_w, int param_magic_str_h, int param_shift_str_h, int param_magic_str_d, int param_shift_str_d, int param_magic_str_c, int param_shift_str_c, int param_S, int param_R, int param_T, int param_J, int param_RS, int param_RST, int param_JRST, int param_magic_S, int param_shift_S, int param_magic_RS, int param_shift_RS, int param_magic_RST, int param_shift_RST, int param_Q, int param_P, int param_M, int param_K, int param_QN, int param_PQN, int param_MPQN ) { *param_E = 0; }
a27a33837c5b47a5d1edb569db1c3c37bfb8c67c.cu
extern "C" __global__ void spool_bprop_avg( float* param_E, float* param_B, const float* param_I, int param_mode, int param_N, int param_W, int param_H, int param_D, int param_C, int param_WN, int param_HWN, int param_DHWN, int param_magic_H, int param_shift_H, int param_pad_w, int param_pad_h, int param_pad_d, int param_pad_c, int param_str_w, int param_str_h, int param_str_d, int param_str_c, int param_magic_str_w, int param_shift_str_w, int param_magic_str_h, int param_shift_str_h, int param_magic_str_d, int param_shift_str_d, int param_magic_str_c, int param_shift_str_c, int param_S, int param_R, int param_T, int param_J, int param_RS, int param_RST, int param_JRST, int param_magic_S, int param_shift_S, int param_magic_RS, int param_shift_RS, int param_magic_RST, int param_shift_RST, int param_Q, int param_P, int param_M, int param_K, int param_QN, int param_PQN, int param_MPQN ) { *param_E = 0; }
7e48857d515128e1ec86b1fcc51f6b5f0368848a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <THH/THHDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0; TORCH_CHECK( (numInputDims == 3 && input.size(0) != 0 && valid_dims) || (numInputDims == 4 && valid_dims && input.size(3) != 0), "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1 || outputH >= 1, "input (H: ", inputH, ", W: ", inputW, ") is too small." " Calculated output H: ", outputH, " W: ", outputW); if (numInputDims == 3) { output.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 3) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel2d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padT, padB, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padT, padB, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } } // namespace TORCH_IMPL_FUNC(replication_pad1d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int64_t padL = paddingSize[0]; int64_t padR = paddingSize[1]; constexpr int64_t planeDim = -2; constexpr int64_t dimw = -1; int64_t numBatch = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { numBatch = input.size(0); } int64_t numPlanes = input.size(planeDim); int64_t inputW = input.size(dimw); int64_t outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 2) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devOutput.size(2); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel1d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } TORCH_IMPL_FUNC(replication_pad1d_backward_out_cuda) ( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, const Tensor& gradInput ) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devGradOutput.size(2); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_backward_kernel) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devGradInput, devGradOutput, padL, padR, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } }); } Tensor& replication_pad2d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad2d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad2d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } TORCH_IMPL_FUNC(replication_pad3d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numBatch = 1; int numInputDims = input.dim(); if (numInputDims == 5) { numBatch = input.size(0); planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = output.size(dimd); int outputH = output.size(dimh); int outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 4) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = ::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = ::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( replication_pad_forward_kernel3d) , dim3(gridSize), dim3(blockSize), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_HIP_KERNEL_LAUNCH_CHECK(); } } } ); } Tensor& replication_pad3d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
7e48857d515128e1ec86b1fcc51f6b5f0368848a.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <THC/THCDeviceUtils.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { __host__ __device__ __forceinline__ int imin(int a, int b) { return a > b ? b : a; } __host__ __device__ __forceinline__ int imax(int a, int b) { return a > b ? a : b; } namespace { template <typename scalar_t> __global__ void replication_pad_forward_kernel1d( PackedTensorAccessor64<scalar_t, 3> input, PackedTensorAccessor64<scalar_t, 3> output, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2)) { return; } int outputPointX = outputPointId % output.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), input.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = input[batch][plane][inputPointX]; output[batch][plane][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 3> gradInput, PackedTensorAccessor64<scalar_t, 3> gradOutput, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2)) { return; } int outputPointX = outputPointId % gradOutput.size(2); int iStartX = imax(0, -padL); int oStartX = imax(0, padL); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(2) + padL - 1) - oStartX + iStartX; scalar_t valueToCopy = gradOutput[batch][plane][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel2d( PackedTensorAccessor64<scalar_t, 4> input, PackedTensorAccessor64<scalar_t, 4> output, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= output.size(2) * output.size(3)) { return; } int outputPointX = outputPointId % output.size(3); int outputPointY = outputPointId / output.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), input.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), input.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 4> gradInput, PackedTensorAccessor64<scalar_t, 4> gradOutput, int padT, int padB, int padL, int padR, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= gradOutput.size(2) * gradOutput.size(3)) { return; } int outputPointX = outputPointId % gradOutput.size(3); int outputPointY = outputPointId / gradOutput.size(3); int iStartX = imax(0, -padL); int iStartY = imax(0, -padT); int oStartX = imax(0, padL); int oStartY = imax(0, padT); int inputPointX = imin(imax(padL, outputPointX), gradInput.size(3) + padL - 1) - oStartX + iStartX; int inputPointY = imin(imax(padT, outputPointY), gradInput.size(2) + padT - 1) - oStartY + iStartY; scalar_t valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); } template <typename scalar_t> __global__ void replication_pad_forward_kernel3d( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (output.size(2) * output.size(3) * output.size(4))) { return; } int outputPointX = outputPointId % output.size(4); int outputPointY = (outputPointId / output.size(4)) % output.size(3); int outputPointZ = outputPointId / (output.size(3) * output.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), input.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), input.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), input.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = input[batch][plane][inputPointZ][inputPointY][inputPointX]; output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy; } template <typename scalar_t> __global__ void replication_pad_backward_kernel( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, int pfront, int pback, int ptop, int pbottom, int pleft, int pright, int y_shift, int z_shift) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y + y_shift; int batch = blockIdx.z + z_shift; if (outputPointId >= (gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4))) { return; } int outputPointX = outputPointId % gradOutput.size(4); int outputPointY = (outputPointId / gradOutput.size(4)) % gradOutput.size(3); int outputPointZ = outputPointId / (gradOutput.size(3) * gradOutput.size(4)); int iStartX = imax(0, -pleft); int iStartY = imax(0, -ptop); int iStartZ = imax(0, -pfront); int oStartX = imax(0, pleft); int oStartY = imax(0, ptop); int oStartZ = imax(0, pfront); int inputPointX = imin(imax(pleft, outputPointX), gradInput.size(4) + pleft - 1) - oStartX + iStartX; int inputPointY = imin(imax(ptop, outputPointY), gradInput.size(3) + ptop - 1) - oStartY + iStartY; int inputPointZ = imin(imax(pfront, outputPointZ), gradInput.size(2) + pfront - 1) - oStartZ + iStartZ; scalar_t valueToCopy = gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX]; gpuAtomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX], valueToCopy); } void replication_pad2d_out_cuda_template( Tensor& output, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0; TORCH_CHECK( (numInputDims == 3 && input.size(0) != 0 && valid_dims) || (numInputDims == 4 && valid_dims && input.size(3) != 0), "Expected 3D or 4D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); if (numInputDims == 4) { numBatch = input.size(0); planeDim++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputH = inputH + padT + padB; int outputW = inputW + padL + padR; TORCH_CHECK(outputW >= 1 || outputH >= 1, "input (H: ", inputH, ", W: ", inputW, ") is too small." " Calculated output H: ", outputH, " W: ", outputW); if (numInputDims == 3) { output.resize_({numPlanes, outputH, outputW}); } else { output.resize_({numBatch, numPlanes, outputH, outputW}); } if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 3) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 4>(); auto devOutput = output_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel2d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, padT, padB, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } void replication_pad2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(paddingSize.size() == 4, "padding Size is expected to be 4"); int padL = paddingSize[0]; int padR = paddingSize[1]; int padT = paddingSize[2]; int padB = paddingSize[3]; int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = input.dim(); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } int iheight = input.size(dimh); int iwidth = input.size(dimw); int oheight = iheight + padT + padB; int owidth = iwidth + padL + padR; TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad2d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 3) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 4>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 4>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, padT, padB, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } static inline void shapeCheck3d( const Tensor& input, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && input.size(0) != 0 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); } static inline void shapeAndGradOutputCheck3d( const Tensor& input, const Tensor& gradOutput, int pleft, int pright, int ptop, int pbottom, int pfront, int pback) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int numInputDims = input.dim(); bool valid_dims = input.size(1) != 0 && input.size(2) != 0 && input.size(3) != 0; TORCH_CHECK( (numInputDims == 4 && valid_dims) || (numInputDims == 5 && valid_dims && input.size(4) != 0), "Expected 4D or 5D (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: ", input.sizes()); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int idepth = input.size(dimd); int iheight = input.size(dimh); int iwidth = input.size(dimw); int odepth = idepth + pfront + pback; int oheight = iheight + ptop + pbottom; int owidth = iwidth + pleft + pright; TORCH_CHECK(owidth >= 1 || oheight >= 1 || odepth >= 1, "input (D: ", idepth, " H: ", iheight, ", W: ", iwidth, ") is too small." " Calculated output D: ", odepth, " H: ", oheight, " W: ", owidth); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); TORCH_CHECK(numPlanes == gradOutput.size(planeDim), "gradOutput width unexpected. Expected: ", numPlanes, ", Got: ", gradOutput.size(planeDim)); TORCH_CHECK(owidth == gradOutput.size(dimw), "gradOutput width unexpected. Expected: ", owidth, ", Got: ", gradOutput.size(dimw)); TORCH_CHECK(oheight == gradOutput.size(dimh), "gradOutput height unexpected. Expected: ", oheight, ", Got: ", gradOutput.size(dimh)); TORCH_CHECK(odepth == gradOutput.size(dimd), "gradOutput depth unexpected. Expected: ", odepth, ", Got: ", gradOutput.size(dimd)); } void replication_pad3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { TORCH_CHECK(paddingSize.size() == 6, "padding Size is expected to be 6"); int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; shapeAndGradOutputCheck3d(input, gradOutput, pleft, pright, ptop, pbottom, pfront, pback); int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numInputDims = input.dim(); if (numInputDims == 5) { planeDim++; dimd++; dimh++; dimw++; } gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 4) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 5>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devGradOutput.size(2) * devGradOutput.size(3) * devGradOutput.size(4); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } } // namespace TORCH_IMPL_FUNC(replication_pad1d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); int64_t padL = paddingSize[0]; int64_t padR = paddingSize[1]; constexpr int64_t planeDim = -2; constexpr int64_t dimw = -1; int64_t numBatch = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { numBatch = input.size(0); } int64_t numPlanes = input.size(planeDim); int64_t inputW = input.size(dimw); int64_t outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 2) { input_ = input.unsqueeze(0); output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 3>(); auto devOutput = output_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devOutput.size(2); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel1d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>(devInput, devOutput, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } TORCH_IMPL_FUNC(replication_pad1d_backward_out_cuda) ( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, const Tensor& gradInput ) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad1d_backward_cuda"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(input), "input tensor must fit into 32-bit index math"); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(gradOutput), "output gradient tensor must fit into 32-bit index math"); int padL = paddingSize[0]; int padR = paddingSize[1]; int planeDim = 0; int dimw = 1; int numInputDims = input.ndimension(); if (numInputDims == 3) { planeDim++; dimw++; } int iwidth = input.size(dimw); int owidth = iwidth + padL + padR; if (gradInput.numel() == 0) { return; } gradInput.zero_(); AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad1d_backward_cuda", [&] { auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; if (numInputDims == 2) { gradInput_ = gradInput.unsqueeze(0); gradOutput_ = gradOutput.unsqueeze(0); } auto devGradInput = gradInput_.packed_accessor64<scalar_t, 3>(); auto devGradOutput = gradOutput_.packed_accessor64<scalar_t, 3>(); int64_t outputPlaneSize = devGradOutput.size(2); int64_t size1 = devGradOutput.size(1); int64_t size0 = devGradOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_backward_kernel <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devGradInput, devGradOutput, padL, padR, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } }); } Tensor& replication_pad2d_out_cuda(const Tensor& input, IntArrayRef paddingSize, Tensor& output) { replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor replication_pad2d_cuda( const Tensor& input, IntArrayRef paddingSize) { auto output = at::empty({0}, input.options()); replication_pad2d_out_cuda_template( output, input, paddingSize); return output; } Tensor& replication_pad2d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_out_cuda"); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad2d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad2d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad2d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } TORCH_IMPL_FUNC(replication_pad3d_out_cuda) ( const Tensor& input, IntArrayRef paddingSize, const Tensor& output ) { int pleft = paddingSize[0]; int pright = paddingSize[1]; int ptop = paddingSize[2]; int pbottom = paddingSize[3]; int pfront = paddingSize[4]; int pback = paddingSize[5]; int planeDim = 0; int dimd = 1; int dimh = 2; int dimw = 3; int numBatch = 1; int numInputDims = input.dim(); if (numInputDims == 5) { numBatch = input.size(0); planeDim++; dimd++; dimh++; dimw++; } int numPlanes = input.size(planeDim); int inputD = input.size(dimd); int inputH = input.size(dimh); int inputW = input.size(dimw); int outputD = output.size(dimd); int outputH = output.size(dimh); int outputW = output.size(dimw); if (input.numel() == 0) { return; } AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(kHalf, input.scalar_type(), "replication_pad3d_cuda", [&] { at::Tensor input_ = input; at::Tensor output_ = output; if (numInputDims == 4) { auto input_ = input.unsqueeze(0); auto output_ = output.unsqueeze(0); } auto devInput = input_.packed_accessor64<scalar_t, 5>(); auto devOutput = output_.packed_accessor64<scalar_t, 5>(); int64_t outputPlaneSize = devOutput.size(2) * devOutput.size(3) * devOutput.size(4); int64_t size1 = devOutput.size(1); int64_t size0 = devOutput.size(0); for (int64_t block_y = 0; block_y < size1; block_y += 65535) { int64_t block_y_size = std::min(size1 - block_y, static_cast<int64_t>(65535)); for (int64_t block_z = 0; block_z < size0; block_z += 65535) { int64_t block_z_size = std::min(size0 - block_z, static_cast<int64_t>(65535)); dim3 gridSize(THCCeilDiv(outputPlaneSize, static_cast<int64_t>(256)), block_y_size, block_z_size); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); replication_pad_forward_kernel3d <<<gridSize, blockSize, 0, at::cuda::getCurrentCUDAStream()>>>( devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright, block_y, block_z); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } } ); } Tensor& replication_pad3d_backward_out_cuda(const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_out_cuda"); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } Tensor replication_pad3d_backward_cuda( const Tensor& gradOutput, const Tensor& input, IntArrayRef paddingSize) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("replication_pad3d_backward_cuda"); auto gradInput = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); replication_pad3d_backward_out_cuda_template( gradInput, gradOutput, input, paddingSize); return gradInput; } } // at::native } // at
c83f063d1afd148bfa371c1339a4194f79fe4f73.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <time.h> #include <stdio.h> #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #define NUMBLOCK 1 #define BLOCKWIDTH 16 #define NUMTHREAD 4 #define ASIZE 4 void printArray(int * image){ int i,j; for (i = 0; i < ASIZE; ++i) { for (j = 0; j < ASIZE; ++j) { printf("%d\t", image[i * ASIZE + j]); } printf("\n"); } printf("\n\n"); } __global__ void prefixSum(int * img, int * integral) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int i; printf("blockIdx = %d, blockDim = %d, threadIdx = %d, img[%d] = %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, img[idx]); printf("blockIdy = %d, blockDimy = %d, threadIdy = %d, img[%d][%d] = %d\n", blockIdx.y, blockDim.y, threadIdx.y, idx,idy, img[idx*ASIZE + idy]); //printf("blockIdx = %d, blockDim = %d, threadIdx = %d, img[%d] = %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, img[idx]); if (idy == 0) { integral[idx*ASIZE+idy] = img[idx*ASIZE+idy]; } else integral[idx*ASIZE+idy] = img[idx*ASIZE+idy] + integral[idx*ASIZE+idy-1]; printf("img[%d][%d] > %d, integral[] > %d\n", idx, idy,img[idx*ASIZE+idy], integral[idx*ASIZE+idy-1]); __syncthreads(); } __global__ void columnSum(int * img, int * integral) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int i; printf("idx > %d, idy > %d, img[] > %d, integral[] > %d\n", idx, idy, img[idx + idy*ASIZE], integral[idx + idy*ASIZE]); if (idx == 0) integral[idx + idy*ASIZE] = img[idx + idy*ASIZE]; else integral[idx + idy*ASIZE] = img[idx + (idy*ASIZE)] + integral[idx + (idy-1)*ASIZE]; __syncthreads(); } int main() { // const int SIZE = ASIZE; //int ASIZE = *(int *) argv[1]; int *IMG_HOST, *INTG_HOST; int *IMG_DEV, *INTG_DEV; //Time initialization float timePassed; size_t size = ASIZE*sizeof(int); IMG_HOST = (int *)malloc(size*size); INTG_HOST = (int *)malloc(size*size); hipMalloc((void **) &IMG_DEV, size*size); hipMalloc((void **) &INTG_DEV, size*size); int i,j, random; for (i = 0; i < ASIZE; ++i) { //srand(i); for (j = 0; j < ASIZE; ++j) { //srand(j); IMG_HOST[i*ASIZE + j] = i*2 + j*4; } } printArray(IMG_HOST); dim3 grid(NUMBLOCK,NUMBLOCK), block(NUMTHREAD,NUMTHREAD); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(IMG_DEV, IMG_HOST, size*size, hipMemcpyHostToDevice); hipEventRecord(start, 0); hipLaunchKernelGGL(( prefixSum) , dim3(grid), dim3(block) , 0, 0, IMG_DEV, INTG_DEV); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timePassed, start,stop); printf("Time Spent Row: %0.5f\n", timePassed); //#################################################################// hipMemcpy(INTG_HOST, INTG_DEV, size*size, hipMemcpyDeviceToHost); printArray(INTG_HOST); //hipMemcpy(INTG_DEV, INTG_HOST, size*size, hipMemcpyHostToDevice); //INTG_HOST = (int *)malloc(size*size); hipEventRecord(start, 0); hipLaunchKernelGGL(( columnSum) , dim3(grid), dim3(block) , 0, 0, INTG_DEV, INTG_DEV); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&timePassed, start,stop); printf("Time Spent Column: %0.5f\n", timePassed); hipMemcpy(INTG_HOST, INTG_DEV, size*size, hipMemcpyDeviceToHost); printArray(INTG_HOST); //Free up the resources free(IMG_HOST); free(INTG_HOST); hipFree(IMG_DEV); hipFree(INTG_DEV); return 0; }
c83f063d1afd148bfa371c1339a4194f79fe4f73.cu
#include <stdlib.h> #include <time.h> #include <stdio.h> #include <cuda.h> #include "cuda_runtime.h" #define NUMBLOCK 1 #define BLOCKWIDTH 16 #define NUMTHREAD 4 #define ASIZE 4 void printArray(int * image){ int i,j; for (i = 0; i < ASIZE; ++i) { for (j = 0; j < ASIZE; ++j) { printf("%d\t", image[i * ASIZE + j]); } printf("\n"); } printf("\n\n"); } __global__ void prefixSum(int * img, int * integral) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int i; printf("blockIdx = %d, blockDim = %d, threadIdx = %d, img[%d] = %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, img[idx]); printf("blockIdy = %d, blockDimy = %d, threadIdy = %d, img[%d][%d] = %d\n", blockIdx.y, blockDim.y, threadIdx.y, idx,idy, img[idx*ASIZE + idy]); //printf("blockIdx = %d, blockDim = %d, threadIdx = %d, img[%d] = %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, img[idx]); if (idy == 0) { integral[idx*ASIZE+idy] = img[idx*ASIZE+idy]; } else integral[idx*ASIZE+idy] = img[idx*ASIZE+idy] + integral[idx*ASIZE+idy-1]; printf("img[%d][%d] > %d, integral[] > %d\n", idx, idy,img[idx*ASIZE+idy], integral[idx*ASIZE+idy-1]); __syncthreads(); } __global__ void columnSum(int * img, int * integral) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int i; printf("idx > %d, idy > %d, img[] > %d, integral[] > %d\n", idx, idy, img[idx + idy*ASIZE], integral[idx + idy*ASIZE]); if (idx == 0) integral[idx + idy*ASIZE] = img[idx + idy*ASIZE]; else integral[idx + idy*ASIZE] = img[idx + (idy*ASIZE)] + integral[idx + (idy-1)*ASIZE]; __syncthreads(); } int main() { // const int SIZE = ASIZE; //int ASIZE = *(int *) argv[1]; int *IMG_HOST, *INTG_HOST; int *IMG_DEV, *INTG_DEV; //Time initialization float timePassed; size_t size = ASIZE*sizeof(int); IMG_HOST = (int *)malloc(size*size); INTG_HOST = (int *)malloc(size*size); cudaMalloc((void **) &IMG_DEV, size*size); cudaMalloc((void **) &INTG_DEV, size*size); int i,j, random; for (i = 0; i < ASIZE; ++i) { //srand(i); for (j = 0; j < ASIZE; ++j) { //srand(j); IMG_HOST[i*ASIZE + j] = i*2 + j*4; } } printArray(IMG_HOST); dim3 grid(NUMBLOCK,NUMBLOCK), block(NUMTHREAD,NUMTHREAD); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(IMG_DEV, IMG_HOST, size*size, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); prefixSum <<< grid, block >>> (IMG_DEV, INTG_DEV); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timePassed, start,stop); printf("Time Spent Row: %0.5f\n", timePassed); //#################################################################// cudaMemcpy(INTG_HOST, INTG_DEV, size*size, cudaMemcpyDeviceToHost); printArray(INTG_HOST); //cudaMemcpy(INTG_DEV, INTG_HOST, size*size, cudaMemcpyHostToDevice); //INTG_HOST = (int *)malloc(size*size); cudaEventRecord(start, 0); columnSum <<< grid, block >>> (INTG_DEV, INTG_DEV); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&timePassed, start,stop); printf("Time Spent Column: %0.5f\n", timePassed); cudaMemcpy(INTG_HOST, INTG_DEV, size*size, cudaMemcpyDeviceToHost); printArray(INTG_HOST); //Free up the resources free(IMG_HOST); free(INTG_HOST); cudaFree(IMG_DEV); cudaFree(INTG_DEV); return 0; }
cf1d2f4d5de14480ee30c28dde8509e473dce10e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include <stdlib.h> #define INITIAL_CAPACITY 1024 /******************** Find the min value **************************/ __global__ void minCompare(int *a, int *set, bool *check, int *capacity) { int cap = capacity[0]; int offset = set[0]; int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int tabx = idx + cap + offset; int taby = idy + cap + offset; if (idx == idy) { return; } int xval = a[tabx]; int yval = a[taby]; if(yval == 0) {} else if (xval == 0) { check[idx] = false; } else if (xval > yval) { check[idx] = false; } } __global__ void cudaMin(int *a, int *set, bool *check, int* min, int *capacity) { int idx = blockIdx.x; if (check[idx]) { min[0] = a[idx + capacity[0] + set[0]]; } } /************************* Find the max value **********************/ __global__ void maxCompare(int *a, bool *check) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx == idy) { return; } int xval = a[idx]; int yval = a[idy]; if (xval < yval) { check[idx] = false; } } __global__ void cudaMax(int *a, bool *check, int* max) { int idx = blockIdx.x; if (check[idx]) { max[0] = a[idx]; } } /*********************** Helper Methods ********************************************/ __global__ void cudaBoolFill(bool *arr, int length) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < length) { arr[i] = true; } } /********************** Min and Max Functions ******************************************/ void findMin(int *arr, const int length, const int offset, int *minimum, int *capacity) { //length - 1 = row, offset = location of first element bool *check; int *set; int *row = (int*) malloc(sizeof(int)); const int intSize = sizeof(int); const int bsize = length * sizeof(bool); hipMalloc((void**)&check, bsize); hipLaunchKernelGGL(( cudaBoolFill), dim3(dim3(length, 1)), dim3(1) , 0, 0, check, length); hipMalloc((void**)&set, intSize); hipMemcpy(set, (int*)&offset, intSize, hipMemcpyHostToDevice); hipMemcpy(row, capacity, intSize, hipMemcpyDeviceToHost); row[0] = row[0] * (length - 1); printf("offset = %d length = %d row = %d\n", offset, length, row[0]); int *row2; hipMalloc((void**) &row2, intSize); hipMemcpy(row2, row, intSize, hipMemcpyHostToDevice); hipLaunchKernelGGL(( minCompare), dim3(dim3(length, length)), dim3(1) , 0, 0, arr, set, check, row2); hipLaunchKernelGGL(( cudaMin), dim3(dim3(length, 1)), dim3(1) , 0, 0, arr, set, check, minimum, row2); hipFree(check); } int findMax(int *arr, const int length) { bool *check; int *max; const int intSize = sizeof(int); const int bsize = length * sizeof(bool); hipMalloc((void**)&check, bsize); hipLaunchKernelGGL(( cudaBoolFill), dim3(dim3(length, 1)), dim3(1) , 0, 0, check, length); hipMalloc((void**)&max, intSize); hipLaunchKernelGGL(( maxCompare), dim3(dim3(length, length)), dim3(1) , 0, 0, arr, check); hipLaunchKernelGGL(( cudaMax), dim3(dim3(length, 1)), dim3(1) , 0, 0, arr, check, max); int maxhost[1]; hipMemcpy(maxhost, max, intSize, hipMemcpyDeviceToHost); hipFree(max); hipFree(check); return maxhost[0]; } /********************* Find the Curl *****************************************/ int findCurl(int *sequence, int *table, int length, int capacity){ int *tempResults; hipMalloc((void **) &tempResults, (length >> 1) * sizeof(int)); int *cap; hipMalloc((void **) &cap, sizeof(int)); hipMemcpy(cap, (int*)&capacity, sizeof(int), hipMemcpyHostToDevice); for(int i(0); i < (length >> 1); ++i) { //int *p = &(table[i][(length - 1) - i]); //findMin(p, length, &(tempResults[i])); findMin(table, i+1, (length - 1) - i, &(tempResults[i]), cap); } int *results = (int *) malloc((length >> 1) * sizeof(int)); hipMemcpy(results, tempResults, (length >> 1) * sizeof(int), hipMemcpyDeviceToHost); for(int i(0); i < (length >> 1); ++i) { printf("%d ", results[i]); } printf("\n"); int curl = findMax(tempResults, length); hipFree(tempResults); return curl; } void printTable(int *table, int length, int capacity) { int *CPUTable; CPUTable = (int *) malloc(capacity * capacity * sizeof(int)); hipMemcpy(CPUTable, table, capacity * capacity * sizeof(int), hipMemcpyDeviceToHost); for(int i(0); i < length; ++i) { for(int j(0); j < length; ++j) { printf("%d ", CPUTable[(i * capacity) + j]); } printf("\n"); } free(CPUTable); } __global__ void fillColumn(int *sequence, int *table, int *seqPosition, int *cap) { int row = threadIdx.x + blockIdx.x * blockDim.x; int index = *seqPosition; int capacity = *cap; int value = 1; if(row == index){} else if(sequence[index - (row + 1)] == sequence[index]) { int t = table[(row * capacity) + (index - (row + 1))]; if(t == 0) { value = 2; } else { value = table[(row * capacity) + (index - (row + 1))] + 1; } } table[(row * capacity) + index] = value; } void initializeTable(int *sequence, int *table, int length, int capacity) { int *index; hipMalloc((void **)&index, sizeof(int)); int *cap; hipMalloc((void **)&cap, sizeof(int)); hipMemcpy(cap, (void *)&capacity, sizeof(int), hipMemcpyHostToDevice); for(int i(0); i < length; ++i) { hipMemcpy(index, (void *)&i, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( fillColumn), dim3(dim3(i + 1, 1)), dim3(1) , 0, 0, sequence, table, index, cap); } hipFree(index); } int main() { int *table; int capacity = INITIAL_CAPACITY; hipMalloc((void**)&table, (INITIAL_CAPACITY * INITIAL_CAPACITY) * sizeof(int)); while (1) { hipMemset(table, 0, (capacity * capacity) * sizeof(int)); char buffer[100]; printf("Input a sequence to curl:\n"); scanf("%s", buffer); int i(0); int sequence[INITIAL_CAPACITY]; for (; buffer[i] != '\0'; ++i) { sequence[i] = buffer[i] - '0'; } int seqLength = i; int sequenceByteSize = seqLength * sizeof(int); int *cudaSequence; hipMalloc((void**)&cudaSequence, sequenceByteSize); hipMemcpy(cudaSequence, sequence, sequenceByteSize, hipMemcpyHostToDevice); initializeTable(cudaSequence, table, seqLength, capacity); clock_t start = clock(); int *size; hipMalloc((void**)&size, sizeof(int)); int *cap; hipMalloc((void **)&cap, sizeof(int)); hipMemcpy(cap, (void *)&capacity, sizeof(int), hipMemcpyHostToDevice); int curl = (seqLength == 1) ? 1: 0; while(curl != 1) { curl = findCurl(cudaSequence, table, seqLength, capacity); printf("curl = %d\n", curl); printTable(table, seqLength, capacity); sequence[seqLength] = curl; hipMemcpy(size, (int*)&seqLength, sizeof(int), hipMemcpyHostToDevice); sequenceByteSize = ++seqLength * sizeof(int); hipMalloc((void**)&cudaSequence, sequenceByteSize); hipMemcpy(cudaSequence, sequence, sequenceByteSize, hipMemcpyHostToDevice); hipLaunchKernelGGL(( fillColumn), dim3(dim3(seqLength, 1)), dim3(1) , 0, 0, cudaSequence, table, size, cap); } clock_t stop = clock(); double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); printf("curl is %d\n\nsequence = ", curl); for(i = 0; i < seqLength; ++i){ printf("%d ", sequence[i]); } printf("\n\n"); hipFree(cudaSequence); } return 0; }
cf1d2f4d5de14480ee30c28dde8509e473dce10e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include <stdlib.h> #define INITIAL_CAPACITY 1024 /******************** Find the min value **************************/ __global__ void minCompare(int *a, int *set, bool *check, int *capacity) { int cap = capacity[0]; int offset = set[0]; int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int tabx = idx + cap + offset; int taby = idy + cap + offset; if (idx == idy) { return; } int xval = a[tabx]; int yval = a[taby]; if(yval == 0) {} else if (xval == 0) { check[idx] = false; } else if (xval > yval) { check[idx] = false; } } __global__ void cudaMin(int *a, int *set, bool *check, int* min, int *capacity) { int idx = blockIdx.x; if (check[idx]) { min[0] = a[idx + capacity[0] + set[0]]; } } /************************* Find the max value **********************/ __global__ void maxCompare(int *a, bool *check) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx == idy) { return; } int xval = a[idx]; int yval = a[idy]; if (xval < yval) { check[idx] = false; } } __global__ void cudaMax(int *a, bool *check, int* max) { int idx = blockIdx.x; if (check[idx]) { max[0] = a[idx]; } } /*********************** Helper Methods ********************************************/ __global__ void cudaBoolFill(bool *arr, int length) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < length) { arr[i] = true; } } /********************** Min and Max Functions ******************************************/ void findMin(int *arr, const int length, const int offset, int *minimum, int *capacity) { //length - 1 = row, offset = location of first element bool *check; int *set; int *row = (int*) malloc(sizeof(int)); const int intSize = sizeof(int); const int bsize = length * sizeof(bool); cudaMalloc((void**)&check, bsize); cudaBoolFill<<< dim3(length, 1), 1 >>>(check, length); cudaMalloc((void**)&set, intSize); cudaMemcpy(set, (int*)&offset, intSize, cudaMemcpyHostToDevice); cudaMemcpy(row, capacity, intSize, cudaMemcpyDeviceToHost); row[0] = row[0] * (length - 1); printf("offset = %d length = %d row = %d\n", offset, length, row[0]); int *row2; cudaMalloc((void**) &row2, intSize); cudaMemcpy(row2, row, intSize, cudaMemcpyHostToDevice); minCompare<<< dim3(length, length), 1 >>>(arr, set, check, row2); cudaMin<<< dim3(length, 1), 1 >>>(arr, set, check, minimum, row2); cudaFree(check); } int findMax(int *arr, const int length) { bool *check; int *max; const int intSize = sizeof(int); const int bsize = length * sizeof(bool); cudaMalloc((void**)&check, bsize); cudaBoolFill<<< dim3(length, 1), 1 >>>(check, length); cudaMalloc((void**)&max, intSize); maxCompare<<< dim3(length, length), 1 >>>(arr, check); cudaMax<<< dim3(length, 1), 1 >>>(arr, check, max); int maxhost[1]; cudaMemcpy(maxhost, max, intSize, cudaMemcpyDeviceToHost); cudaFree(max); cudaFree(check); return maxhost[0]; } /********************* Find the Curl *****************************************/ int findCurl(int *sequence, int *table, int length, int capacity){ int *tempResults; cudaMalloc((void **) &tempResults, (length >> 1) * sizeof(int)); int *cap; cudaMalloc((void **) &cap, sizeof(int)); cudaMemcpy(cap, (int*)&capacity, sizeof(int), cudaMemcpyHostToDevice); for(int i(0); i < (length >> 1); ++i) { //int *p = &(table[i][(length - 1) - i]); //findMin(p, length, &(tempResults[i])); findMin(table, i+1, (length - 1) - i, &(tempResults[i]), cap); } int *results = (int *) malloc((length >> 1) * sizeof(int)); cudaMemcpy(results, tempResults, (length >> 1) * sizeof(int), cudaMemcpyDeviceToHost); for(int i(0); i < (length >> 1); ++i) { printf("%d ", results[i]); } printf("\n"); int curl = findMax(tempResults, length); cudaFree(tempResults); return curl; } void printTable(int *table, int length, int capacity) { int *CPUTable; CPUTable = (int *) malloc(capacity * capacity * sizeof(int)); cudaMemcpy(CPUTable, table, capacity * capacity * sizeof(int), cudaMemcpyDeviceToHost); for(int i(0); i < length; ++i) { for(int j(0); j < length; ++j) { printf("%d ", CPUTable[(i * capacity) + j]); } printf("\n"); } free(CPUTable); } __global__ void fillColumn(int *sequence, int *table, int *seqPosition, int *cap) { int row = threadIdx.x + blockIdx.x * blockDim.x; int index = *seqPosition; int capacity = *cap; int value = 1; if(row == index){} else if(sequence[index - (row + 1)] == sequence[index]) { int t = table[(row * capacity) + (index - (row + 1))]; if(t == 0) { value = 2; } else { value = table[(row * capacity) + (index - (row + 1))] + 1; } } table[(row * capacity) + index] = value; } void initializeTable(int *sequence, int *table, int length, int capacity) { int *index; cudaMalloc((void **)&index, sizeof(int)); int *cap; cudaMalloc((void **)&cap, sizeof(int)); cudaMemcpy(cap, (void *)&capacity, sizeof(int), cudaMemcpyHostToDevice); for(int i(0); i < length; ++i) { cudaMemcpy(index, (void *)&i, sizeof(int), cudaMemcpyHostToDevice); fillColumn<<< dim3(i + 1, 1), 1 >>>(sequence, table, index, cap); } cudaFree(index); } int main() { int *table; int capacity = INITIAL_CAPACITY; cudaMalloc((void**)&table, (INITIAL_CAPACITY * INITIAL_CAPACITY) * sizeof(int)); while (1) { cudaMemset(table, 0, (capacity * capacity) * sizeof(int)); char buffer[100]; printf("Input a sequence to curl:\n"); scanf("%s", buffer); int i(0); int sequence[INITIAL_CAPACITY]; for (; buffer[i] != '\0'; ++i) { sequence[i] = buffer[i] - '0'; } int seqLength = i; int sequenceByteSize = seqLength * sizeof(int); int *cudaSequence; cudaMalloc((void**)&cudaSequence, sequenceByteSize); cudaMemcpy(cudaSequence, sequence, sequenceByteSize, cudaMemcpyHostToDevice); initializeTable(cudaSequence, table, seqLength, capacity); clock_t start = clock(); int *size; cudaMalloc((void**)&size, sizeof(int)); int *cap; cudaMalloc((void **)&cap, sizeof(int)); cudaMemcpy(cap, (void *)&capacity, sizeof(int), cudaMemcpyHostToDevice); int curl = (seqLength == 1) ? 1: 0; while(curl != 1) { curl = findCurl(cudaSequence, table, seqLength, capacity); printf("curl = %d\n", curl); printTable(table, seqLength, capacity); sequence[seqLength] = curl; cudaMemcpy(size, (int*)&seqLength, sizeof(int), cudaMemcpyHostToDevice); sequenceByteSize = ++seqLength * sizeof(int); cudaMalloc((void**)&cudaSequence, sequenceByteSize); cudaMemcpy(cudaSequence, sequence, sequenceByteSize, cudaMemcpyHostToDevice); fillColumn<<< dim3(seqLength, 1), 1 >>>(cudaSequence, table, size, cap); } clock_t stop = clock(); double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); printf("curl is %d\n\nsequence = ", curl); for(i = 0; i < seqLength; ++i){ printf("%d ", sequence[i]); } printf("\n\n"); cudaFree(cudaSequence); } return 0; }
bd498d2933b9d79a90c16df6cedd1e96c9ba7b5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tensor.h" int Tensor::num = 0; __global__ void setDataKernel(Tensor* dev, dtype pValue) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dev->size()) (*dev)(i) = pValue; } void Tensor::operator[](dtype aValue) { for (int i = 0; i < mSize; i++) data[i] = aValue; } void Tensor::operator[](const initializer_list<dtype>& aData) { if (aData.size() == size()) copy(aData.begin(), aData.end(), data); else { printf("%s[size = %d] and input data[size = %d] size are different \n", mName, size(), aData.size()); exit(1); } } void Tensor::printTensor(int floatLength, int floatPrecision, int floor) { cout << " " << mName << " ["; printShape(); cout << "]" << endl; int maximumWidth = floatLength + 2; int indexWidth = floatLength; string gap = " "; // Actual output number of column and row int P_COL, P_ROW; switch (mDimension) { case 1: if (mSize <= maximumWidth) { cout << setw(indexWidth) << "" << " "; for (int x = 0; x < shape(0); x++) { string temp = "[" + to_string(x) + "]"; cout << setw(floatLength) << right << temp << " "; } cout << endl; cout << setw(indexWidth) << "[0]" << " "; for (int x = 0; x < mSize; x++) cout << setw(floatLength) << f_to_s((*this)(x), floatLength, floatPrecision, floor) << " "; cout << endl; } else { cout << setw(indexWidth) << "" << " "; for (int x = 0; x < maximumWidth / 2; x++) { string temp = "[" + to_string(x) + "]"; cout << setw(floatLength) << right << temp << " "; } cout << setw(floatLength) << right << "..." << " "; for (int x = shape(0) - maximumWidth / 2; x < shape(0); x++) { string temp = "[" + to_string(x) + "]"; cout << setw(floatLength) << right << temp << " "; } cout << endl; cout << setw(indexWidth) << "[0]" << " "; for (int x = 0; x < maximumWidth / 2; x++) cout << setw(floatLength) << f_to_s((*this)(x), floatLength, floatPrecision, floor) << " "; cout << setw(floatLength) << right << "..." << " "; for (int x = shape(0) - maximumWidth / 2; x < shape(0); x++) cout << setw(floatLength) << f_to_s((*this)(x), floatLength, floatPrecision, floor) << " "; cout << endl; } break; case 2: if (col() > 10) P_COL = 10; else P_COL = col(); if (row() > 10) P_ROW = 10; else P_ROW = row(); // Print blank cout << setw(indexWidth) << ""; // Print column index for (int col = 0; col < P_COL; col++) { cout << setw(indexWidth) << right << ("[" + to_string(col) + "]") << gap; } cout << endl; // Print data for (int row = 0; row < P_ROW; row++) { cout << setw(indexWidth) << right << ("[" + to_string(row) + "]"); // Print row index for (int col = 0; col < P_COL; col++) cout << setw(floatLength) << right << f_to_s((*this)(col, row), floatLength, floatPrecision, floor) << gap; cout << endl; } break; case 3: if (col() > 10) P_COL = 10; else P_COL = col(); if (row() > 10) P_ROW = 10; else P_ROW = row(); for (int z = 0; z < shape(0); z++) { // Print blank cout << setw(indexWidth) << ("[" + to_string(z) + "]"); // Print column index for (int col = 0; col < P_COL; col++) { cout << setw(indexWidth) << right << ("[" + to_string(col) + "]") << gap; } cout << endl; // Print data for (int row = 0; row < P_ROW; row++) { cout << setw(indexWidth) << right << ("[" + to_string(row) + "]"); // Print row index for (int col = 0; col < P_COL; col++) cout << setw(floatLength) << right << f_to_s((*this)(z, col, row), floatLength, floatPrecision, floor) << gap; cout << endl; } cout << endl; } break; case 4: // NCHW if (col() > 10) P_COL = 10; else P_COL = col(); if (row() > 10) P_ROW = 10; else P_ROW = row(); for (int w = 0; w < shape(0); w++) { for (int z = 0; z < shape(1); z++) { // Print blank cout << setw(indexWidth) << ("[" + to_string(w) + ", " + to_string(z) + "]"); // Print column index for (int col = 0; col < P_COL; col++) { cout << setw(indexWidth) << right << ("[" + to_string(col) + "]") << gap; } cout << endl; // Print data for (int row = 0; row < P_ROW; row++) { cout << setw(indexWidth) << right << ("[" + to_string(row) + "]"); // Print row index for (int col = 0; col < P_COL; col++) cout << setw(floatLength) << right << f_to_s((*this)(w, z, col, row), floatLength, floatPrecision, floor) << gap; cout << endl; } cout << endl; } } break; } } bool Tensor::isSame(Tensor& other) { for (int i = 0; i < mDimension; i++) if (shape(i) != other.shape(i)) return false; return true; } void Tensor::swapDimension(int dim1, int dim2) { hipSetDevice(mDeviceId); if (dim1 >= mDimension || dim1 < 0 || dim2 >= mDimension || dim2 < 0) { cout << "Cannot access " << mName << " " << dim1 << " or " << dim2 << endl; exit(EXIT_FAILURE); } int temp = shape(dim1); mShape[dim1] = shape(dim2); mShape[dim2] = temp; setCumulatedDimension(); CUDA_CHECK(hipMemcpy(devShape, mShape, sizeof(int) * dimension(), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(devCumulatedDimension, cumulatedDimension, sizeof(int) * dimension(), hipMemcpyHostToDevice)); } void Tensor::setShape(const initializer_list<int>& aShape) { mDimension = aShape.size(); mShape = new int[mDimension]; copy(aShape.begin(), aShape.end(), mShape); mSize = 1; initializer_list<int>::iterator iter = aShape.begin(); while (iter != aShape.end()) { mSize *= *iter; iter++; } } void Tensor::reshape(const initializer_list<int>& aShape, bool forceReshape) { int tempSize = 1; initializer_list<int>::iterator iter = aShape.begin(); while (iter != aShape.end()) { tempSize *= *iter; iter++; } if (!forceReshape && tempSize != mSize) { cout << "Size of new shape is different from original tensor size " << tempSize << " != " << mSize << ", if you want to change size forcedly you need to set argument [forceReshape = true]." << endl; exit(1); } delete[] mShape; delete[] cumulatedDimension; mDimension = aShape.size(); mShape = new int[mDimension]; copy(aShape.begin(), aShape.end(), mShape); mSize = tempSize; setCumulatedDimension(); } void Tensor::setCumulatedDimension() { cumulatedDimension = new int[mDimension]; cumulatedDimension[mDimension - 1] = 1; for (int i = mDimension - 2; i >= 0; i--) { cumulatedDimension[i] = cumulatedDimension[i + 1] * shape(i + 1); } }
bd498d2933b9d79a90c16df6cedd1e96c9ba7b5e.cu
#include "tensor.h" int Tensor::num = 0; __global__ void setDataKernel(Tensor* dev, dtype pValue) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dev->size()) (*dev)(i) = pValue; } void Tensor::operator[](dtype aValue) { for (int i = 0; i < mSize; i++) data[i] = aValue; } void Tensor::operator[](const initializer_list<dtype>& aData) { if (aData.size() == size()) copy(aData.begin(), aData.end(), data); else { printf("%s[size = %d] and input data[size = %d] size are different \n", mName, size(), aData.size()); exit(1); } } void Tensor::printTensor(int floatLength, int floatPrecision, int floor) { cout << " " << mName << " ["; printShape(); cout << "]" << endl; int maximumWidth = floatLength + 2; int indexWidth = floatLength; string gap = " "; // Actual output number of column and row int P_COL, P_ROW; switch (mDimension) { case 1: if (mSize <= maximumWidth) { cout << setw(indexWidth) << "" << " "; for (int x = 0; x < shape(0); x++) { string temp = "[" + to_string(x) + "]"; cout << setw(floatLength) << right << temp << " "; } cout << endl; cout << setw(indexWidth) << "[0]" << " "; for (int x = 0; x < mSize; x++) cout << setw(floatLength) << f_to_s((*this)(x), floatLength, floatPrecision, floor) << " "; cout << endl; } else { cout << setw(indexWidth) << "" << " "; for (int x = 0; x < maximumWidth / 2; x++) { string temp = "[" + to_string(x) + "]"; cout << setw(floatLength) << right << temp << " "; } cout << setw(floatLength) << right << "..." << " "; for (int x = shape(0) - maximumWidth / 2; x < shape(0); x++) { string temp = "[" + to_string(x) + "]"; cout << setw(floatLength) << right << temp << " "; } cout << endl; cout << setw(indexWidth) << "[0]" << " "; for (int x = 0; x < maximumWidth / 2; x++) cout << setw(floatLength) << f_to_s((*this)(x), floatLength, floatPrecision, floor) << " "; cout << setw(floatLength) << right << "..." << " "; for (int x = shape(0) - maximumWidth / 2; x < shape(0); x++) cout << setw(floatLength) << f_to_s((*this)(x), floatLength, floatPrecision, floor) << " "; cout << endl; } break; case 2: if (col() > 10) P_COL = 10; else P_COL = col(); if (row() > 10) P_ROW = 10; else P_ROW = row(); // Print blank cout << setw(indexWidth) << ""; // Print column index for (int col = 0; col < P_COL; col++) { cout << setw(indexWidth) << right << ("[" + to_string(col) + "]") << gap; } cout << endl; // Print data for (int row = 0; row < P_ROW; row++) { cout << setw(indexWidth) << right << ("[" + to_string(row) + "]"); // Print row index for (int col = 0; col < P_COL; col++) cout << setw(floatLength) << right << f_to_s((*this)(col, row), floatLength, floatPrecision, floor) << gap; cout << endl; } break; case 3: if (col() > 10) P_COL = 10; else P_COL = col(); if (row() > 10) P_ROW = 10; else P_ROW = row(); for (int z = 0; z < shape(0); z++) { // Print blank cout << setw(indexWidth) << ("[" + to_string(z) + "]"); // Print column index for (int col = 0; col < P_COL; col++) { cout << setw(indexWidth) << right << ("[" + to_string(col) + "]") << gap; } cout << endl; // Print data for (int row = 0; row < P_ROW; row++) { cout << setw(indexWidth) << right << ("[" + to_string(row) + "]"); // Print row index for (int col = 0; col < P_COL; col++) cout << setw(floatLength) << right << f_to_s((*this)(z, col, row), floatLength, floatPrecision, floor) << gap; cout << endl; } cout << endl; } break; case 4: // NCHW if (col() > 10) P_COL = 10; else P_COL = col(); if (row() > 10) P_ROW = 10; else P_ROW = row(); for (int w = 0; w < shape(0); w++) { for (int z = 0; z < shape(1); z++) { // Print blank cout << setw(indexWidth) << ("[" + to_string(w) + ", " + to_string(z) + "]"); // Print column index for (int col = 0; col < P_COL; col++) { cout << setw(indexWidth) << right << ("[" + to_string(col) + "]") << gap; } cout << endl; // Print data for (int row = 0; row < P_ROW; row++) { cout << setw(indexWidth) << right << ("[" + to_string(row) + "]"); // Print row index for (int col = 0; col < P_COL; col++) cout << setw(floatLength) << right << f_to_s((*this)(w, z, col, row), floatLength, floatPrecision, floor) << gap; cout << endl; } cout << endl; } } break; } } bool Tensor::isSame(Tensor& other) { for (int i = 0; i < mDimension; i++) if (shape(i) != other.shape(i)) return false; return true; } void Tensor::swapDimension(int dim1, int dim2) { cudaSetDevice(mDeviceId); if (dim1 >= mDimension || dim1 < 0 || dim2 >= mDimension || dim2 < 0) { cout << "Cannot access " << mName << " " << dim1 << " or " << dim2 << endl; exit(EXIT_FAILURE); } int temp = shape(dim1); mShape[dim1] = shape(dim2); mShape[dim2] = temp; setCumulatedDimension(); CUDA_CHECK(cudaMemcpy(devShape, mShape, sizeof(int) * dimension(), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(devCumulatedDimension, cumulatedDimension, sizeof(int) * dimension(), cudaMemcpyHostToDevice)); } void Tensor::setShape(const initializer_list<int>& aShape) { mDimension = aShape.size(); mShape = new int[mDimension]; copy(aShape.begin(), aShape.end(), mShape); mSize = 1; initializer_list<int>::iterator iter = aShape.begin(); while (iter != aShape.end()) { mSize *= *iter; iter++; } } void Tensor::reshape(const initializer_list<int>& aShape, bool forceReshape) { int tempSize = 1; initializer_list<int>::iterator iter = aShape.begin(); while (iter != aShape.end()) { tempSize *= *iter; iter++; } if (!forceReshape && tempSize != mSize) { cout << "Size of new shape is different from original tensor size " << tempSize << " != " << mSize << ", if you want to change size forcedly you need to set argument [forceReshape = true]." << endl; exit(1); } delete[] mShape; delete[] cumulatedDimension; mDimension = aShape.size(); mShape = new int[mDimension]; copy(aShape.begin(), aShape.end(), mShape); mSize = tempSize; setCumulatedDimension(); } void Tensor::setCumulatedDimension() { cumulatedDimension = new int[mDimension]; cumulatedDimension[mDimension - 1] = 1; for (int i = mDimension - 2; i >= 0; i--) { cumulatedDimension[i] = cumulatedDimension[i + 1] * shape(i + 1); } }
8a02eb8569c2428303ca5d866bfb98e431e072b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <cutil.h> #include <miscmath.h> #include <amgx_cusparse.h> #include <thrust/copy.h> #include <basic_types.h> #include <util.h> #include <ld_functions.h> #include <logger.h> #include <thrust/logical.h> #include <profile.h> #include <sm_utils.inl> #include <texture.h> #include <typeinfo> #include "solvers/dense_lu_solver.h" #include "solvers/block_common_solver.h" #include "amgx_types/util.h" #include <algorithm> //trick to get nvcc to print a defined value //(__CUDA_ARCH__) @ compile time: // namespace //unnamed namespace { struct X { private: X(void) {} }; template<int N> struct Print_N_atCompile { X array[N]; }; }//end unnamed namespace namespace amgx { namespace dense_lu_solver { enum { WARP_SIZE = 32, CTA_SIZE = 128 }; // // supporting kernels // template< typename T, typename Tv, int WARP_SIZE > __global__ void csr_to_dense_kernel( const int num_rows, const int num_cols, const int block_num_rows, const int block_num_cols, const int *__restrict A_csr_rows, const int *__restrict A_csr_cols, const T *__restrict A_csr_vals, const int *__restrict A_csr_diag, T *__restrict A_dense, const int lda) { // Note: // To handle block CSR, the sparsity pattern csr_rows and csr_cols only store // the typical csr info assuming each block is a scalar. // The values in csr_vals has all entries in the blocks, using row major to // store the block. So we need the number of entries in each block as stride. const int block_mxn = block_num_rows * block_num_cols; // Each lane copies one entry in a block and iterate through row sparsity pattern. // Essentially one warp per row-block. For 4x4, we have 16 working threads per warp. const int lane_id = threadIdx.x % WARP_SIZE; // find the (row,col) local to a block const int block_row = lane_id / block_num_cols; const int block_col = lane_id % block_num_cols; // These are wasted threads per warp if ( block_row >= block_num_rows ) { return; } // The first row to consider. One row per warp. int row = (blockIdx.x * blockDim.x + threadIdx.x) / WARP_SIZE; const int row_offset = blockDim.x * gridDim.x / WARP_SIZE; for ( ; row < num_rows ; row += row_offset ) { int dense_row = row * block_num_rows + block_row; // Iterate over each row and copy the elements into col-major A_dense int row_end = A_csr_rows[row + 1]; for (int row_it = A_csr_rows[row]; row_it < row_end ; ++row_it ) { int col = A_csr_cols[row_it]; if ( col >= num_rows ) { continue; } // Skip entries corresponding to halo int dense_col = col * block_num_cols + block_col; A_dense[dense_col * lda + dense_row] = A_csr_vals[block_mxn * row_it + lane_id]; } // copy diagonal block if ( A_csr_diag ) { int diag_it = A_csr_diag[row]; int dense_col = row * block_num_cols + block_col; // diag means row=col A_dense[dense_col * lda + dense_row] = A_csr_vals[block_mxn * diag_it + lane_id]; } } } template< int N, bool ROW_MAJOR, int WARP_SIZE, typename Value_type > static __device__ __forceinline__ Value_type reduce_distributed_vectors( Value_type x, int is_leader, unsigned int active_mask ) { if ( N & (N - 1) ) { #pragma unroll for ( int i = 1 ; i < N ; ++i ) { Value_type other_x = utils::shfl_down( x, ROW_MAJOR ? i : N * i, WARP_SIZE, active_mask ); if ( is_leader ) { x += other_x; } } } else { #pragma unroll for ( int i = 1 ; i < N ; i <<= 1 ) { x += utils::shfl_xor( x, ROW_MAJOR ? i : N * i, WARP_SIZE, active_mask ); } } return x; } template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void b_minus_A_halo_x( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *new_rhs, const int num_owned_rows) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_NxN; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN]; // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } // Determine which NxN block the threads work with. int a_row_it = num_owned_rows; if ( is_active ) { a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; } unsigned int active_mask = utils::ballot(is_active); // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_owned_rows; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = a_row_it; // Load one block of B. Vector_type my_bmAx(0); if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id_mod_NxN_div_N]); } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_bmAx = b[N * a_row_id + lane_id_mod_NxN_mod_N]; } } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } // Determine if the column is halo column int a_col_is_valid = (a_col_id != -1) && (a_col_id >= num_owned_rows); // Count the number of active columns. // int vote = __ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < NxN ; k += N ) { int my_k = k + lane_id_mod_NxN_div_N; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, WARP_SIZE, active_mask); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k, WARP_SIZE, active_mask ); Vector_type my_x(0); if ( uniform_a_col_id != -1 && uniform_a_col_is_valid) { my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]); //printf("loading entry %d, num_rows = %d, my_x = %f\n",uniform_a_col_id,num_owned_rows,my_x); } my_s_mem[lane_id_mod_NxN] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < N ; ++i ) { int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N]; } else { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N]; } } } // Loop over k } // Loop over aColIt Vector_type my_Einv = (lane_id_mod_NxN == 0 || lane_id_mod_NxN == 5 || lane_id_mod_NxN == 10 || lane_id_mod_NxN == 15) ? 1. : 0.; // Reduce bmAx terms. int is_leader = lane_id_mod_NxN_div_N == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_NxN_mod_N == 0; } my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader, active_mask ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_s_mem[lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_s_mem[lane_id_mod_NxN_mod_N] = my_bmAx; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N]; } else { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_div_N]; } // Reduce bmAx terms. my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader, active_mask ); // Store the results. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { new_rhs[N * a_row_id + lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { new_rhs[N * a_row_id + lane_id_mod_NxN_mod_N] = my_bmAx; } } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Vector_type, typename Matrix_type, int N> void distributed_rhs_mod_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *new_rhs, const int num_owned_rows, const int row_major, const int has_external_diag ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ROWS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = ::min( 4096, (num_owned_rows + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. int code = 2 * (row_major ? 1 : 0) + (has_external_diag ? 1 : 0); switch ( code ) { case 0: // Column-major, no external diagonal. hipLaunchKernelGGL(( b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; case 1: // Column-major, external diagonal. hipLaunchKernelGGL(( b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; case 2: // Row-major, no external diagonal. hipLaunchKernelGGL(( b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; case 3: // Row-major, external diagonal. hipLaunchKernelGGL(( b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I> void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: distributed_rhs_mod( const Vector_d &x, const Vector_d &b, Vector_d &new_rhs) { const Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); switch (A->get_block_dimy()) { case 1: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 1>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 2: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 2>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 3: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 3>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 4: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 4>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 5: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 5>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; } } // copy non zero elements only template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: csr_to_dense( ) { const Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); const int block_size = 256; const int num_warps = block_size / WARP_SIZE; const int grid_size = ::min(4096, (A->get_num_rows() + num_warps - 1) / num_warps); hipStream_t stream = thrust::global_thread_handle::get_stream(); hipLaunchKernelGGL(( csr_to_dense_kernel<Matrix_data, Vector_data, WARP_SIZE>) , dim3(grid_size), dim3(block_size), 0, stream, A->get_num_rows(), A->get_num_cols(), A->get_block_dimx(), A->get_block_dimy(), A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->hasProps(DIAG) ? A->diag.raw() : NULL, m_dense_A, m_lda ); cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::cudense_getrf() { int wsize = 0; cusolverStatus_t status1 = cusolverDnXgetrf_bufferSize(m_cuds_handle, m_num_rows, m_num_cols, m_dense_A, m_lda, &wsize); if ( status1 != CUSOLVER_STATUS_SUCCESS) { FatalError( "Failed kernel in DenseLU", AMGX_ERR_INTERNAL); } cudaCheckError(); allocMem(m_trf_wspace, wsize, false); status1 = cusolverDnXgetrf(m_cuds_handle, m_num_rows, m_num_cols, m_dense_A, m_lda, m_trf_wspace, m_ipiv, m_cuds_info); if ( status1 != CUSOLVER_STATUS_SUCCESS) { FatalError( "failed kernel in Dense LU is likely due to invalid input parameters", AMGX_ERR_INTERNAL); } else { int t_info; hipMemcpy(&t_info, m_cuds_info, sizeof(int), hipMemcpyDefault); if (t_info != 0) { FatalError( "Fail to get info from cudense", AMGX_ERR_INTERNAL); } else { // We follow the standard established by Lapack and used in cudense. if (t_info > 0) { FatalError( "Dense LU factorization failed due to a singular matrix", AMGX_ERR_INTERNAL); } else if (t_info < 0) { FatalError( "Invalid input parameter(s) to dense LU", AMGX_ERR_INTERNAL); } } } cudaCheckError(); if (m_trf_wspace) { thrust::global_thread_handle::hipFreeAsync(m_trf_wspace); m_trf_wspace = 0; } } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::cudense_getrs( Vector_d &x ) { //Solve L*X = RHS cusolverStatus_t status = cusolverDnXgetrs(m_cuds_handle, HIPBLAS_OP_N, m_num_rows, 1, m_dense_A, m_lda, m_ipiv, (Matrix_data *)(x.raw()), m_num_rows, m_cuds_info); if (status != CUSOLVER_STATUS_SUCCESS) { FatalError( "cuSolver trsv failed to solve Lx=rhs", AMGX_ERR_INTERNAL); } cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > template< class DataType, class IndexType > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: allocMem(DataType *&ptr, IndexType numEntry, bool initToZero) { if ( ptr != NULL ) { thrust::global_thread_handle::hipFreeAsync(ptr); } cudaCheckError(); size_t sz = numEntry * sizeof(DataType); thrust::global_thread_handle::hipMalloc((void **)&ptr, sz); cudaCheckError(); if (initToZero) { hipMemset(ptr, 0x0, sz); cudaCheckError(); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: DenseLUSolver(AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng) : Solver<Config_d>(cfg, cfg_scope, tmng), m_cuds_handle(0), m_num_rows(0), m_num_cols(0), m_lda(0), m_dense_A(0), m_ipiv(0), m_cuds_info(0), m_trf_wspace(0) { // Allocate a handle for cudense cusolverStatus_t status = hipsolverDnCreate(&m_cuds_handle); if ( status != CUSOLVER_STATUS_SUCCESS ) { FatalError( "Could not create the CUDENSE handle", AMGX_ERR_CUDA_FAILURE ); } // Allocate a handle for cublas hipblasStatus_t cublasStatus = hipblasCreate(&m_cublas_handle); if ( cublasStatus != HIPBLAS_STATUS_SUCCESS ) { FatalError( "Could not create the CUBLAS handle", AMGX_ERR_CUDA_FAILURE ); } // Define the cudense stream. status = hipsolverDnSetStream(m_cuds_handle, thrust::global_thread_handle::get_stream()); if ( status != CUSOLVER_STATUS_SUCCESS ) { FatalError( "Could not set the stream for CUDENSE", AMGX_ERR_CUDA_FAILURE ); } // Make sure we don't run more than 1 iteration. this->set_max_iters(1); allocMem(m_cuds_info, sizeof(int), false); // Determine if the scalable coarse solve optimisation is enabled m_enable_exact_solve = ( cfg.getParameter<AlgorithmType>( "algorithm", cfg_scope) == CLASSICAL && cfg.getParameter<int>( "exact_coarse_solve", cfg_scope) == 1); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::~DenseLUSolver() { if (m_cuds_handle) { hipsolverDnDestroy(m_cuds_handle); } if (m_cublas_handle) { hipblasDestroy(m_cublas_handle); } if (m_dense_A) { thrust::global_thread_handle::hipFreeAsync(m_dense_A); } if (m_ipiv) { thrust::global_thread_handle::hipFreeAsync(m_ipiv); } if (m_trf_wspace) { thrust::global_thread_handle::hipFreeAsync(m_trf_wspace); } if (m_cuds_info) { thrust::global_thread_handle::hipFreeAsync(m_cuds_info); } cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::solve_init(Vector_d &, Vector_d &, bool) {} template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::solve_finalize(Vector_d &, Vector_d &) {} // Offset the local row offsets to global row offsets template <class IndexType> __global__ void local_row_offsets_to_global( int num_rows, int offset, IndexType* local_Arows) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i >= num_rows) { return; } local_Arows[i] += offset; } // Offset local packed column indices to global unpacked indices template <class IndexType, class L2GType> __global__ void local_col_indices_to_global( int nnz, int num_rows, int offset, IndexType* local_Acols, L2GType* l2g) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i >= nnz) { return; } if(local_Acols[i] >= num_rows) { local_Acols[i] = l2g[local_Acols[i] - num_rows]; } else { local_Acols[i] += offset; } } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: solver_setup(bool reuse_matrix_structure) { Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); if (!A) { FatalError("DenseLUSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } ViewType oldView = A->currentView(); A->setViewExterior(); if(A->is_matrix_distributed() && m_enable_exact_solve) { int rank = A->manager->global_id(); int nranks = A->manager->get_num_partitions(); MPI_Comm comm = A->manager->getComms()->get_mpi_comm(); int offset, num_rows, nnz; A->getOffsetAndSizeForView(OWNED, &offset, &num_rows); A->getNnzForView(OWNED, &nnz); m_num_rows = A->manager->num_rows_global * A->get_block_dimx(); m_num_cols = A->manager->num_rows_global * A->get_block_dimy(); m_lda = m_num_rows; // col-major // Allocate mem for cudense pivoting sequence. allocMem(m_ipiv, m_num_rows, false); // Allocate memory to store the dense A and initialize to zero. allocMem(m_dense_A, m_num_cols * m_num_rows, true); // Much of the data can be reused if we are performing a resetup if (!reuse_matrix_structure) { // Gather the number of non zeros on each rank A->manager->getComms()->all_gather(nnz, nz_all, nranks); // Gather the number of rows on each rank A->manager->getComms()->all_gather(num_rows, row_all, nranks); // Get the number of non zeros on all ranks m_nnz_global = thrust::reduce(nz_all.begin(), nz_all.end()); // Turn the non-zero counts into displacements nz_displs.resize(nranks); thrust::exclusive_scan(nz_all.begin(), nz_all.end(), nz_displs.begin()); // Turn the number of rows into displacements row_displs.resize(nranks); thrust::exclusive_scan(row_all.begin(), row_all.end(), row_displs.begin()); IVector_d local_Acols_d(nnz); IVector_d local_Arows_d(num_rows); thrust::copy(A->col_indices.begin(), A->col_indices.begin() + nnz, local_Acols_d.begin()); thrust::copy(A->row_offsets.begin(), A->row_offsets.begin() + num_rows, local_Arows_d.begin()); // XXX Local to global map is the current limiting factor to enabling this // code for the aggregation based path. It's not clear whether there is // a structure that provides the same inverse mapping with aggregation. // Note that at one point inverse_renumbering was tested for aggregation // but didn't appear to work in all cases. // Convert the local column indices and row offsets to the global index space constexpr int nthreads = 128; int nblocks = nnz / nthreads + 1; hipLaunchKernelGGL(( local_col_indices_to_global), dim3(nblocks), dim3(nthreads), 0, 0, nnz, num_rows, row_displs[rank], local_Acols_d.raw(), A->manager->local_to_global_map.raw()); nblocks = num_rows / nthreads + 1; hipLaunchKernelGGL(( local_row_offsets_to_global), dim3(nblocks), dim3(nthreads), 0, 0, num_rows, nz_displs[rank], local_Arows_d.raw()); // Copy the transformed indices to the host IVector_h local_Acols_h(nnz); IVector_h local_Arows_h(num_rows); thrust::copy(local_Acols_d.begin(), local_Acols_d.end(), local_Acols_h.begin()); thrust::copy(local_Arows_d.begin(), local_Arows_d.end(), local_Arows_h.begin()); // Gather the local matrix structure redundantly to every rank IVector_h Acols_global_h(m_nnz_global); A->manager->getComms()->all_gather_v(local_Acols_h, nnz, Acols_global_h, nz_all, nz_displs); // Note: Copy the local data to global without guard value IVector_h Arows_global_h(m_num_rows + 1); A->manager->getComms()->all_gather_v(local_Arows_h, num_rows, Arows_global_h, row_all, row_displs); // Manually set the guard value on the global matrix Arows_global_h[m_num_rows] = m_nnz_global; Acols_global.resize(m_nnz_global); Arows_global.resize(m_num_rows + 1); thrust::copy(Acols_global_h.begin(), Acols_global_h.end(), Acols_global.begin()); thrust::copy(Arows_global_h.begin(), Arows_global_h.end(), Arows_global.begin()); } // Fetch to the host a copy of the local sparse matrix MVector_h local_Avals_h(nnz); thrust::copy(A->values.begin(), A->values.begin() + nnz, local_Avals_h.begin()); // Gather the matrix values to all ranks MVector_h Avals_global_h(m_nnz_global); A->manager->getComms()->all_gather_v(local_Avals_h, nnz, Avals_global_h, nz_all, nz_displs); allocMem(m_dense_A, m_num_cols * m_lda, true); MVector_d Avals_global(m_nnz_global); thrust::copy(Avals_global_h.begin(), Avals_global_h.end(), Avals_global.begin()); const int block_size = 256; const int num_warps = block_size / WARP_SIZE; const int grid_size = ::min(4096, (A->get_num_rows() + num_warps - 1) / num_warps); hipStream_t stream = thrust::global_thread_handle::get_stream(); hipLaunchKernelGGL(( csr_to_dense_kernel<Matrix_data, Vector_data, WARP_SIZE>), dim3(grid_size), dim3(block_size), 0, stream, m_num_rows, m_num_cols, A->get_block_dimx(), A->get_block_dimy(), Arows_global.raw(), Acols_global.raw(), Avals_global.raw(), A->hasProps(DIAG) ? A->diag.raw() : NULL, m_dense_A, m_lda); hipStreamSynchronize(stream); cudaCheckError(); } else { ViewType oldView = A->currentView(); A->setViewExterior(); m_num_rows = A->get_num_rows() * A->get_block_dimx(); // don't use A->get_num_cols() because A is rectangular. // Only the diagonal block owned by this rank is factored. m_num_cols = A->get_num_rows() * A->get_block_dimy(); m_lda = m_num_rows; // col-major // Allocate mem for cudense pivoting sequence. allocMem(m_ipiv, m_num_rows, false); // Allocate memory to store the dense A and initialize to zero. allocMem(m_dense_A, m_num_cols * m_lda, true); csr_to_dense(); // copy sparse A to dense_A } cudense_getrf(); // do LU factor A->setView(oldView); } // There is one subtle point here (for inexact solve): // We only do LU on the diagonal blocks associated with each rank. // Halo is used to update the right-hand-side (RHS) vector. // For multi GPU cases, this is essentially block Jacobi. Since the block size // is the size of each partition, this is OK trade-off between accuracy and runtime. template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > bool DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: solve_iteration(Vector_d &rhs, Vector_d &x, bool xIsZero) { Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); ViewType oldView = A->currentView(); A->setViewExterior(); if(A->is_matrix_distributed() && m_enable_exact_solve) { int offset, num_rows; A->getOffsetAndSizeForView(OWNED, &offset, &num_rows); cusolverStatus_t status; if (A->is_matrix_distributed()) { int rank = A->manager->global_id(); int nranks = A->manager->get_num_partitions(); MPI_Comm comm = A->manager->getComms()->get_mpi_comm(); // Make host copy of the RHS MVector_h rhs_local_h(num_rows); thrust::copy(rhs.begin(), rhs.begin() + num_rows, rhs_local_h.begin()); // Gather the local RHS from all ranks to global vectors on all ranks MVector_h rhs_global_h(m_num_rows); A->manager->getComms()->all_gather_v(rhs_local_h, num_rows, rhs_global_h, row_all, row_displs); //Solve L*X = RHS MVector_d x_global(m_num_rows); thrust::copy(rhs_global_h.begin(), rhs_global_h.end(), x_global.begin()); status = cusolverDnXgetrs(m_cuds_handle, HIPBLAS_OP_N, m_num_rows, 1, m_dense_A, m_lda, m_ipiv, x_global.raw(), m_num_rows, m_cuds_info); // Copy the local portion of the solution back into x thrust::copy(x_global.begin() + row_displs[rank], x_global.begin() + row_displs[rank] + num_rows, x.begin()); } else { //Solve L*X = RHS thrust::copy(rhs.begin(), rhs.begin() + num_rows, x.begin()); status = cusolverDnXgetrs(m_cuds_handle, HIPBLAS_OP_N, m_num_rows, 1, m_dense_A, m_lda, m_ipiv, (Matrix_data *)(x.raw()), m_num_rows, m_cuds_info); } if (status != CUSOLVER_STATUS_SUCCESS) { FatalError("cuSolver trsv failed to solve Lx=rhs", AMGX_ERR_INTERNAL); } } else { Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); ViewType oldView = A->currentView(); A->setViewExterior(); if ((!A->is_matrix_singleGPU()) && (!xIsZero)) { // Modify rhs to include contribution from halo nodes // i.e. new_rhs = b - A_halo*x; // Note: dense_lu solver doesn't support latency hiding A->manager->exchange_halo_async(x, x.tag); A->manager->exchange_halo_wait(x, x.tag); Vector_d new_rhs(rhs.size()); distributed_rhs_mod(x, rhs, new_rhs); thrust::copy(new_rhs.begin(), new_rhs.begin() + m_num_rows, x.begin()); cudaCheckError(); } else { x.copy(rhs); } cudense_getrs(x); // triangular solves } //Speculative send of x vector x.dirtybit = 1; A->setView(oldView); return true; // direct solver always converges } #define AMGX_CASE_LINE(CASE) template class DenseLUSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) // AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace dense_lu } // namespace amgx
8a02eb8569c2428303ca5d866bfb98e431e072b1.cu
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <cutil.h> #include <miscmath.h> #include <amgx_cusparse.h> #include <thrust/copy.h> #include <basic_types.h> #include <util.h> #include <ld_functions.h> #include <logger.h> #include <thrust/logical.h> #include <profile.h> #include <sm_utils.inl> #include <texture.h> #include <typeinfo> #include "solvers/dense_lu_solver.h" #include "solvers/block_common_solver.h" #include "amgx_types/util.h" #include <algorithm> //trick to get nvcc to print a defined value //(__CUDA_ARCH__) @ compile time: // namespace //unnamed namespace { struct X { private: X(void) {} }; template<int N> struct Print_N_atCompile { X array[N]; }; }//end unnamed namespace namespace amgx { namespace dense_lu_solver { enum { WARP_SIZE = 32, CTA_SIZE = 128 }; // // supporting kernels // template< typename T, typename Tv, int WARP_SIZE > __global__ void csr_to_dense_kernel( const int num_rows, const int num_cols, const int block_num_rows, const int block_num_cols, const int *__restrict A_csr_rows, const int *__restrict A_csr_cols, const T *__restrict A_csr_vals, const int *__restrict A_csr_diag, T *__restrict A_dense, const int lda) { // Note: // To handle block CSR, the sparsity pattern csr_rows and csr_cols only store // the typical csr info assuming each block is a scalar. // The values in csr_vals has all entries in the blocks, using row major to // store the block. So we need the number of entries in each block as stride. const int block_mxn = block_num_rows * block_num_cols; // Each lane copies one entry in a block and iterate through row sparsity pattern. // Essentially one warp per row-block. For 4x4, we have 16 working threads per warp. const int lane_id = threadIdx.x % WARP_SIZE; // find the (row,col) local to a block const int block_row = lane_id / block_num_cols; const int block_col = lane_id % block_num_cols; // These are wasted threads per warp if ( block_row >= block_num_rows ) { return; } // The first row to consider. One row per warp. int row = (blockIdx.x * blockDim.x + threadIdx.x) / WARP_SIZE; const int row_offset = blockDim.x * gridDim.x / WARP_SIZE; for ( ; row < num_rows ; row += row_offset ) { int dense_row = row * block_num_rows + block_row; // Iterate over each row and copy the elements into col-major A_dense int row_end = A_csr_rows[row + 1]; for (int row_it = A_csr_rows[row]; row_it < row_end ; ++row_it ) { int col = A_csr_cols[row_it]; if ( col >= num_rows ) { continue; } // Skip entries corresponding to halo int dense_col = col * block_num_cols + block_col; A_dense[dense_col * lda + dense_row] = A_csr_vals[block_mxn * row_it + lane_id]; } // copy diagonal block if ( A_csr_diag ) { int diag_it = A_csr_diag[row]; int dense_col = row * block_num_cols + block_col; // diag means row=col A_dense[dense_col * lda + dense_row] = A_csr_vals[block_mxn * diag_it + lane_id]; } } } template< int N, bool ROW_MAJOR, int WARP_SIZE, typename Value_type > static __device__ __forceinline__ Value_type reduce_distributed_vectors( Value_type x, int is_leader, unsigned int active_mask ) { if ( N & (N - 1) ) { #pragma unroll for ( int i = 1 ; i < N ; ++i ) { Value_type other_x = utils::shfl_down( x, ROW_MAJOR ? i : N * i, WARP_SIZE, active_mask ); if ( is_leader ) { x += other_x; } } } else { #pragma unroll for ( int i = 1 ; i < N ; i <<= 1 ) { x += utils::shfl_xor( x, ROW_MAJOR ? i : N * i, WARP_SIZE, active_mask ); } } return x; } template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void b_minus_A_halo_x( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *new_rhs, const int num_owned_rows) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_NxN; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN]; // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } // Determine which NxN block the threads work with. int a_row_it = num_owned_rows; if ( is_active ) { a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; } unsigned int active_mask = utils::ballot(is_active); // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_owned_rows; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = a_row_it; // Load one block of B. Vector_type my_bmAx(0); if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id_mod_NxN_div_N]); } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_bmAx = b[N * a_row_id + lane_id_mod_NxN_mod_N]; } } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } // Determine if the column is halo column int a_col_is_valid = (a_col_id != -1) && (a_col_id >= num_owned_rows); // Count the number of active columns. // int vote = __ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < NxN ; k += N ) { int my_k = k + lane_id_mod_NxN_div_N; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, WARP_SIZE, active_mask); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k, WARP_SIZE, active_mask ); Vector_type my_x(0); if ( uniform_a_col_id != -1 && uniform_a_col_is_valid) { my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]); //printf("loading entry %d, num_rows = %d, my_x = %f\n",uniform_a_col_id,num_owned_rows,my_x); } my_s_mem[lane_id_mod_NxN] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < N ; ++i ) { int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N]; } else { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N]; } } } // Loop over k } // Loop over aColIt Vector_type my_Einv = (lane_id_mod_NxN == 0 || lane_id_mod_NxN == 5 || lane_id_mod_NxN == 10 || lane_id_mod_NxN == 15) ? 1. : 0.; // Reduce bmAx terms. int is_leader = lane_id_mod_NxN_div_N == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_NxN_mod_N == 0; } my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader, active_mask ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_s_mem[lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_s_mem[lane_id_mod_NxN_mod_N] = my_bmAx; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N]; } else { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_div_N]; } // Reduce bmAx terms. my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader, active_mask ); // Store the results. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { new_rhs[N * a_row_id + lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { new_rhs[N * a_row_id + lane_id_mod_NxN_mod_N] = my_bmAx; } } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Vector_type, typename Matrix_type, int N> void distributed_rhs_mod_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *new_rhs, const int num_owned_rows, const int row_major, const int has_external_diag ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ROWS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = std::min( 4096, (num_owned_rows + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. int code = 2 * (row_major ? 1 : 0) + (has_external_diag ? 1 : 0); switch ( code ) { case 0: // Column-major, no external diagonal. b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, false> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; case 1: // Column-major, external diagonal. b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, true> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; case 2: // Row-major, no external diagonal. b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, false> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; case 3: // Row-major, external diagonal. b_minus_A_halo_x<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, true> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, new_rhs, num_owned_rows ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I> void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: distributed_rhs_mod( const Vector_d &x, const Vector_d &b, Vector_d &new_rhs) { const Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); switch (A->get_block_dimy()) { case 1: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 1>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 2: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 2>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 3: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 3>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 4: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 4>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; case 5: distributed_rhs_mod_dispatch<Vector_data, Matrix_data, 5>(A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->diag.raw(), x.raw(), b.raw(), new_rhs.raw(), A->get_num_rows(), A->getBlockFormat() == ROW_MAJOR, A->hasProps(DIAG)); break; } } // copy non zero elements only template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: csr_to_dense( ) { const Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); const int block_size = 256; const int num_warps = block_size / WARP_SIZE; const int grid_size = std::min(4096, (A->get_num_rows() + num_warps - 1) / num_warps); cudaStream_t stream = thrust::global_thread_handle::get_stream(); csr_to_dense_kernel<Matrix_data, Vector_data, WARP_SIZE> <<< grid_size, block_size, 0, stream>>>( A->get_num_rows(), A->get_num_cols(), A->get_block_dimx(), A->get_block_dimy(), A->row_offsets.raw(), A->col_indices.raw(), A->values.raw(), A->hasProps(DIAG) ? A->diag.raw() : NULL, m_dense_A, m_lda ); cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::cudense_getrf() { int wsize = 0; cusolverStatus_t status1 = cusolverDnXgetrf_bufferSize(m_cuds_handle, m_num_rows, m_num_cols, m_dense_A, m_lda, &wsize); if ( status1 != CUSOLVER_STATUS_SUCCESS) { FatalError( "Failed kernel in DenseLU", AMGX_ERR_INTERNAL); } cudaCheckError(); allocMem(m_trf_wspace, wsize, false); status1 = cusolverDnXgetrf(m_cuds_handle, m_num_rows, m_num_cols, m_dense_A, m_lda, m_trf_wspace, m_ipiv, m_cuds_info); if ( status1 != CUSOLVER_STATUS_SUCCESS) { FatalError( "failed kernel in Dense LU is likely due to invalid input parameters", AMGX_ERR_INTERNAL); } else { int t_info; cudaMemcpy(&t_info, m_cuds_info, sizeof(int), cudaMemcpyDefault); if (t_info != 0) { FatalError( "Fail to get info from cudense", AMGX_ERR_INTERNAL); } else { // We follow the standard established by Lapack and used in cudense. if (t_info > 0) { FatalError( "Dense LU factorization failed due to a singular matrix", AMGX_ERR_INTERNAL); } else if (t_info < 0) { FatalError( "Invalid input parameter(s) to dense LU", AMGX_ERR_INTERNAL); } } } cudaCheckError(); if (m_trf_wspace) { thrust::global_thread_handle::cudaFreeAsync(m_trf_wspace); m_trf_wspace = 0; } } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::cudense_getrs( Vector_d &x ) { //Solve L*X = RHS cusolverStatus_t status = cusolverDnXgetrs(m_cuds_handle, CUBLAS_OP_N, m_num_rows, 1, m_dense_A, m_lda, m_ipiv, (Matrix_data *)(x.raw()), m_num_rows, m_cuds_info); if (status != CUSOLVER_STATUS_SUCCESS) { FatalError( "cuSolver trsv failed to solve Lx=rhs", AMGX_ERR_INTERNAL); } cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > template< class DataType, class IndexType > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: allocMem(DataType *&ptr, IndexType numEntry, bool initToZero) { if ( ptr != NULL ) { thrust::global_thread_handle::cudaFreeAsync(ptr); } cudaCheckError(); size_t sz = numEntry * sizeof(DataType); thrust::global_thread_handle::cudaMalloc((void **)&ptr, sz); cudaCheckError(); if (initToZero) { cudaMemset(ptr, 0x0, sz); cudaCheckError(); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: DenseLUSolver(AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng) : Solver<Config_d>(cfg, cfg_scope, tmng), m_cuds_handle(0), m_num_rows(0), m_num_cols(0), m_lda(0), m_dense_A(0), m_ipiv(0), m_cuds_info(0), m_trf_wspace(0) { // Allocate a handle for cudense cusolverStatus_t status = cusolverDnCreate(&m_cuds_handle); if ( status != CUSOLVER_STATUS_SUCCESS ) { FatalError( "Could not create the CUDENSE handle", AMGX_ERR_CUDA_FAILURE ); } // Allocate a handle for cublas cublasStatus_t cublasStatus = cublasCreate(&m_cublas_handle); if ( cublasStatus != CUBLAS_STATUS_SUCCESS ) { FatalError( "Could not create the CUBLAS handle", AMGX_ERR_CUDA_FAILURE ); } // Define the cudense stream. status = cusolverDnSetStream(m_cuds_handle, thrust::global_thread_handle::get_stream()); if ( status != CUSOLVER_STATUS_SUCCESS ) { FatalError( "Could not set the stream for CUDENSE", AMGX_ERR_CUDA_FAILURE ); } // Make sure we don't run more than 1 iteration. this->set_max_iters(1); allocMem(m_cuds_info, sizeof(int), false); // Determine if the scalable coarse solve optimisation is enabled m_enable_exact_solve = ( cfg.getParameter<AlgorithmType>( "algorithm", cfg_scope) == CLASSICAL && cfg.getParameter<int>( "exact_coarse_solve", cfg_scope) == 1); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::~DenseLUSolver() { if (m_cuds_handle) { cusolverDnDestroy(m_cuds_handle); } if (m_cublas_handle) { cublasDestroy(m_cublas_handle); } if (m_dense_A) { thrust::global_thread_handle::cudaFreeAsync(m_dense_A); } if (m_ipiv) { thrust::global_thread_handle::cudaFreeAsync(m_ipiv); } if (m_trf_wspace) { thrust::global_thread_handle::cudaFreeAsync(m_trf_wspace); } if (m_cuds_info) { thrust::global_thread_handle::cudaFreeAsync(m_cuds_info); } cudaCheckError(); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::solve_init(Vector_d &, Vector_d &, bool) {} template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >::solve_finalize(Vector_d &, Vector_d &) {} // Offset the local row offsets to global row offsets template <class IndexType> __global__ void local_row_offsets_to_global( int num_rows, int offset, IndexType* local_Arows) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i >= num_rows) { return; } local_Arows[i] += offset; } // Offset local packed column indices to global unpacked indices template <class IndexType, class L2GType> __global__ void local_col_indices_to_global( int nnz, int num_rows, int offset, IndexType* local_Acols, L2GType* l2g) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i >= nnz) { return; } if(local_Acols[i] >= num_rows) { local_Acols[i] = l2g[local_Acols[i] - num_rows]; } else { local_Acols[i] += offset; } } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: solver_setup(bool reuse_matrix_structure) { Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); if (!A) { FatalError("DenseLUSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } ViewType oldView = A->currentView(); A->setViewExterior(); if(A->is_matrix_distributed() && m_enable_exact_solve) { int rank = A->manager->global_id(); int nranks = A->manager->get_num_partitions(); MPI_Comm comm = A->manager->getComms()->get_mpi_comm(); int offset, num_rows, nnz; A->getOffsetAndSizeForView(OWNED, &offset, &num_rows); A->getNnzForView(OWNED, &nnz); m_num_rows = A->manager->num_rows_global * A->get_block_dimx(); m_num_cols = A->manager->num_rows_global * A->get_block_dimy(); m_lda = m_num_rows; // col-major // Allocate mem for cudense pivoting sequence. allocMem(m_ipiv, m_num_rows, false); // Allocate memory to store the dense A and initialize to zero. allocMem(m_dense_A, m_num_cols * m_num_rows, true); // Much of the data can be reused if we are performing a resetup if (!reuse_matrix_structure) { // Gather the number of non zeros on each rank A->manager->getComms()->all_gather(nnz, nz_all, nranks); // Gather the number of rows on each rank A->manager->getComms()->all_gather(num_rows, row_all, nranks); // Get the number of non zeros on all ranks m_nnz_global = thrust::reduce(nz_all.begin(), nz_all.end()); // Turn the non-zero counts into displacements nz_displs.resize(nranks); thrust::exclusive_scan(nz_all.begin(), nz_all.end(), nz_displs.begin()); // Turn the number of rows into displacements row_displs.resize(nranks); thrust::exclusive_scan(row_all.begin(), row_all.end(), row_displs.begin()); IVector_d local_Acols_d(nnz); IVector_d local_Arows_d(num_rows); thrust::copy(A->col_indices.begin(), A->col_indices.begin() + nnz, local_Acols_d.begin()); thrust::copy(A->row_offsets.begin(), A->row_offsets.begin() + num_rows, local_Arows_d.begin()); // XXX Local to global map is the current limiting factor to enabling this // code for the aggregation based path. It's not clear whether there is // a structure that provides the same inverse mapping with aggregation. // Note that at one point inverse_renumbering was tested for aggregation // but didn't appear to work in all cases. // Convert the local column indices and row offsets to the global index space constexpr int nthreads = 128; int nblocks = nnz / nthreads + 1; local_col_indices_to_global<<<nblocks, nthreads>>>(nnz, num_rows, row_displs[rank], local_Acols_d.raw(), A->manager->local_to_global_map.raw()); nblocks = num_rows / nthreads + 1; local_row_offsets_to_global<<<nblocks, nthreads>>>(num_rows, nz_displs[rank], local_Arows_d.raw()); // Copy the transformed indices to the host IVector_h local_Acols_h(nnz); IVector_h local_Arows_h(num_rows); thrust::copy(local_Acols_d.begin(), local_Acols_d.end(), local_Acols_h.begin()); thrust::copy(local_Arows_d.begin(), local_Arows_d.end(), local_Arows_h.begin()); // Gather the local matrix structure redundantly to every rank IVector_h Acols_global_h(m_nnz_global); A->manager->getComms()->all_gather_v(local_Acols_h, nnz, Acols_global_h, nz_all, nz_displs); // Note: Copy the local data to global without guard value IVector_h Arows_global_h(m_num_rows + 1); A->manager->getComms()->all_gather_v(local_Arows_h, num_rows, Arows_global_h, row_all, row_displs); // Manually set the guard value on the global matrix Arows_global_h[m_num_rows] = m_nnz_global; Acols_global.resize(m_nnz_global); Arows_global.resize(m_num_rows + 1); thrust::copy(Acols_global_h.begin(), Acols_global_h.end(), Acols_global.begin()); thrust::copy(Arows_global_h.begin(), Arows_global_h.end(), Arows_global.begin()); } // Fetch to the host a copy of the local sparse matrix MVector_h local_Avals_h(nnz); thrust::copy(A->values.begin(), A->values.begin() + nnz, local_Avals_h.begin()); // Gather the matrix values to all ranks MVector_h Avals_global_h(m_nnz_global); A->manager->getComms()->all_gather_v(local_Avals_h, nnz, Avals_global_h, nz_all, nz_displs); allocMem(m_dense_A, m_num_cols * m_lda, true); MVector_d Avals_global(m_nnz_global); thrust::copy(Avals_global_h.begin(), Avals_global_h.end(), Avals_global.begin()); const int block_size = 256; const int num_warps = block_size / WARP_SIZE; const int grid_size = std::min(4096, (A->get_num_rows() + num_warps - 1) / num_warps); cudaStream_t stream = thrust::global_thread_handle::get_stream(); csr_to_dense_kernel<Matrix_data, Vector_data, WARP_SIZE><<<grid_size, block_size, 0, stream>>>( m_num_rows, m_num_cols, A->get_block_dimx(), A->get_block_dimy(), Arows_global.raw(), Acols_global.raw(), Avals_global.raw(), A->hasProps(DIAG) ? A->diag.raw() : NULL, m_dense_A, m_lda); cudaStreamSynchronize(stream); cudaCheckError(); } else { ViewType oldView = A->currentView(); A->setViewExterior(); m_num_rows = A->get_num_rows() * A->get_block_dimx(); // don't use A->get_num_cols() because A is rectangular. // Only the diagonal block owned by this rank is factored. m_num_cols = A->get_num_rows() * A->get_block_dimy(); m_lda = m_num_rows; // col-major // Allocate mem for cudense pivoting sequence. allocMem(m_ipiv, m_num_rows, false); // Allocate memory to store the dense A and initialize to zero. allocMem(m_dense_A, m_num_cols * m_lda, true); csr_to_dense(); // copy sparse A to dense_A } cudense_getrf(); // do LU factor A->setView(oldView); } // There is one subtle point here (for inexact solve): // We only do LU on the diagonal blocks associated with each rank. // Halo is used to update the right-hand-side (RHS) vector. // For multi GPU cases, this is essentially block Jacobi. Since the block size // is the size of each partition, this is OK trade-off between accuracy and runtime. template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > bool DenseLUSolver<TemplateConfig<AMGX_device, V, M, I> >:: solve_iteration(Vector_d &rhs, Vector_d &x, bool xIsZero) { Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); ViewType oldView = A->currentView(); A->setViewExterior(); if(A->is_matrix_distributed() && m_enable_exact_solve) { int offset, num_rows; A->getOffsetAndSizeForView(OWNED, &offset, &num_rows); cusolverStatus_t status; if (A->is_matrix_distributed()) { int rank = A->manager->global_id(); int nranks = A->manager->get_num_partitions(); MPI_Comm comm = A->manager->getComms()->get_mpi_comm(); // Make host copy of the RHS MVector_h rhs_local_h(num_rows); thrust::copy(rhs.begin(), rhs.begin() + num_rows, rhs_local_h.begin()); // Gather the local RHS from all ranks to global vectors on all ranks MVector_h rhs_global_h(m_num_rows); A->manager->getComms()->all_gather_v(rhs_local_h, num_rows, rhs_global_h, row_all, row_displs); //Solve L*X = RHS MVector_d x_global(m_num_rows); thrust::copy(rhs_global_h.begin(), rhs_global_h.end(), x_global.begin()); status = cusolverDnXgetrs(m_cuds_handle, CUBLAS_OP_N, m_num_rows, 1, m_dense_A, m_lda, m_ipiv, x_global.raw(), m_num_rows, m_cuds_info); // Copy the local portion of the solution back into x thrust::copy(x_global.begin() + row_displs[rank], x_global.begin() + row_displs[rank] + num_rows, x.begin()); } else { //Solve L*X = RHS thrust::copy(rhs.begin(), rhs.begin() + num_rows, x.begin()); status = cusolverDnXgetrs(m_cuds_handle, CUBLAS_OP_N, m_num_rows, 1, m_dense_A, m_lda, m_ipiv, (Matrix_data *)(x.raw()), m_num_rows, m_cuds_info); } if (status != CUSOLVER_STATUS_SUCCESS) { FatalError("cuSolver trsv failed to solve Lx=rhs", AMGX_ERR_INTERNAL); } } else { Matrix_d *A = dynamic_cast<Matrix_d *>(Base::m_A); ViewType oldView = A->currentView(); A->setViewExterior(); if ((!A->is_matrix_singleGPU()) && (!xIsZero)) { // Modify rhs to include contribution from halo nodes // i.e. new_rhs = b - A_halo*x; // Note: dense_lu solver doesn't support latency hiding A->manager->exchange_halo_async(x, x.tag); A->manager->exchange_halo_wait(x, x.tag); Vector_d new_rhs(rhs.size()); distributed_rhs_mod(x, rhs, new_rhs); thrust::copy(new_rhs.begin(), new_rhs.begin() + m_num_rows, x.begin()); cudaCheckError(); } else { x.copy(rhs); } cudense_getrs(x); // triangular solves } //Speculative send of x vector x.dirtybit = 1; A->setView(oldView); return true; // direct solver always converges } #define AMGX_CASE_LINE(CASE) template class DenseLUSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) // AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // namespace dense_lu } // namespace amgx
d2b9b6d0e0c7360fcab54a9e2e44ce8e47bdf2e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/sleep/kern.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./kern.cuh" namespace { static __global__ void kern(uint64_t cycles) { uint64_t start = clock64(); for (;;) { if (clock64() - start > cycles) return; } } } void megdnn::cuda::sleep(hipStream_t stream, uint64_t cycles) { hipLaunchKernelGGL(( kern), dim3(1), dim3(1), 0, stream , cycles); after_kernel_launch(); } // vim: syntax=cpp.doxygen
d2b9b6d0e0c7360fcab54a9e2e44ce8e47bdf2e4.cu
/** * \file dnn/src/cuda/sleep/kern.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "./kern.cuh" namespace { static __global__ void kern(uint64_t cycles) { uint64_t start = clock64(); for (;;) { if (clock64() - start > cycles) return; } } } void megdnn::cuda::sleep(cudaStream_t stream, uint64_t cycles) { kern<<< 1, 1, 0, stream >>>(cycles); after_kernel_launch(); } // vim: syntax=cpp.doxygen
113bbbc22d552dd598f6137903e7fc5d00d3bec0.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2018- Xilinx, Inc (Alessandro Pappalardo) Copyright (c) 2016- Facebook, Inc (Adam Paszke) Copyright (c) 2014- Facebook, Inc (Soumith Chintala) Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) Copyright (c) 2011-2013 NYU (Clement Farabet) Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) Copyright (c) 2006 Idiap Research Institute (Samy Bengio) Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the names of Xilinx, Facebook, Deepmind Technologies, NYU, NEC Laboratories America and IDIAP Research Institute nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <THH.h> #include <THHApply.cuh> #include <common.h> #include <math_generics.cuh> #include "quantized_fused_rnn_kernel.h" template <typename T> struct TensorSigmoidOp { __device__ __forceinline__ void operator()(T* out, T* in) const { T one = (T) 1.0; *out = one / (one + math_generics::exp(- *in)); } __device__ __forceinline__ void operator()(T* v) const { T one = (T) 1.0; *v = one / (one + math_generics::exp(- *v)); } }; template <typename T> struct TensorTanhOp { __device__ __forceinline__ void operator()(T* out, T* in) const { *out = math_generics::tanh(*in); } __device__ __forceinline__ void operator()(T* v) const { *v = math_generics::tanh(*v); } }; template <typename T> struct FixedPointQuantizationOp { __device__ __forceinline__ void operator()(T* out, T* in, T *min_val, T *max_val, T *pre_scale, T *post_scale) const { T clipped_value = math_generics::max(math_generics::min(*in, *max_val), *min_val); T rounded_value = math_generics::round(clipped_value * (*pre_scale)); *out = rounded_value * (*post_scale); } }; template <typename T> struct QuantizedTensorSigmoidOp { __device__ __forceinline__ void operator()(T* out, T* in, T *quantization_bit_width) const { if (*quantization_bit_width == (T) 32.0) { TensorSigmoidOp<T>()(out, in); } else { T one = (T) 1.0; T two = (T) 2.0; T pre_scale = math_generics::pow(two, *quantization_bit_width); T post_scale = one / pre_scale; T min_val = (T) 0.0; T max_val = one - post_scale; TensorSigmoidOp<T>()(out, in); FixedPointQuantizationOp<T>()(out, out, &min_val, &max_val, &pre_scale, &post_scale); } } }; template <typename T> struct QuantizedTensorTanhOp { __device__ __forceinline__ void operator()(T* out, T* in, T *quantization_bit_width) const { if (*quantization_bit_width == (T) 32.0) { TensorTanhOp<T>()(out, in); } else { T one = (T) 1.0; T two = (T) 2.0; T pre_scale = math_generics::pow(two, *quantization_bit_width - one); T post_scale = math_generics::pow(two, - *quantization_bit_width + one); T min_val = (T) -1.0; T max_val = one - post_scale; TensorTanhOp<T>()(out, in); FixedPointQuantizationOp<T>()(out, out, &min_val, &max_val, &pre_scale, &post_scale); } } }; #include "generic/quantized_generic_fused_rnn_kernel.cu" #include <THHGenerateFloatType.h> #include "generic/quantized_generic_fused_rnn_kernel.cu" #include <THHGenerateDoubleType.h>
113bbbc22d552dd598f6137903e7fc5d00d3bec0.cu
/* Copyright (c) 2018- Xilinx, Inc (Alessandro Pappalardo) Copyright (c) 2016- Facebook, Inc (Adam Paszke) Copyright (c) 2014- Facebook, Inc (Soumith Chintala) Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) Copyright (c) 2011-2013 NYU (Clement Farabet) Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) Copyright (c) 2006 Idiap Research Institute (Samy Bengio) Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the names of Xilinx, Facebook, Deepmind Technologies, NYU, NEC Laboratories America and IDIAP Research Institute nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <THC.h> #include <THCApply.cuh> #include <common.h> #include <math_generics.cuh> #include "quantized_fused_rnn_kernel.h" template <typename T> struct TensorSigmoidOp { __device__ __forceinline__ void operator()(T* out, T* in) const { T one = (T) 1.0; *out = one / (one + math_generics::exp(- *in)); } __device__ __forceinline__ void operator()(T* v) const { T one = (T) 1.0; *v = one / (one + math_generics::exp(- *v)); } }; template <typename T> struct TensorTanhOp { __device__ __forceinline__ void operator()(T* out, T* in) const { *out = math_generics::tanh(*in); } __device__ __forceinline__ void operator()(T* v) const { *v = math_generics::tanh(*v); } }; template <typename T> struct FixedPointQuantizationOp { __device__ __forceinline__ void operator()(T* out, T* in, T *min_val, T *max_val, T *pre_scale, T *post_scale) const { T clipped_value = math_generics::max(math_generics::min(*in, *max_val), *min_val); T rounded_value = math_generics::round(clipped_value * (*pre_scale)); *out = rounded_value * (*post_scale); } }; template <typename T> struct QuantizedTensorSigmoidOp { __device__ __forceinline__ void operator()(T* out, T* in, T *quantization_bit_width) const { if (*quantization_bit_width == (T) 32.0) { TensorSigmoidOp<T>()(out, in); } else { T one = (T) 1.0; T two = (T) 2.0; T pre_scale = math_generics::pow(two, *quantization_bit_width); T post_scale = one / pre_scale; T min_val = (T) 0.0; T max_val = one - post_scale; TensorSigmoidOp<T>()(out, in); FixedPointQuantizationOp<T>()(out, out, &min_val, &max_val, &pre_scale, &post_scale); } } }; template <typename T> struct QuantizedTensorTanhOp { __device__ __forceinline__ void operator()(T* out, T* in, T *quantization_bit_width) const { if (*quantization_bit_width == (T) 32.0) { TensorTanhOp<T>()(out, in); } else { T one = (T) 1.0; T two = (T) 2.0; T pre_scale = math_generics::pow(two, *quantization_bit_width - one); T post_scale = math_generics::pow(two, - *quantization_bit_width + one); T min_val = (T) -1.0; T max_val = one - post_scale; TensorTanhOp<T>()(out, in); FixedPointQuantizationOp<T>()(out, out, &min_val, &max_val, &pre_scale, &post_scale); } } }; #include "generic/quantized_generic_fused_rnn_kernel.cu" #include <THCGenerateFloatType.h> #include "generic/quantized_generic_fused_rnn_kernel.cu" #include <THCGenerateDoubleType.h>
a7204852f31e47a9ff2527ab8697362b8b56af7e.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> namespace at { namespace native { template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t> void norm_kernel_cuda_impl(TensorIterator& iter, Scalar val) { float p; if (val.isIntegral(false)) { p = val.to<int64_t>(); } else if (val.isFloatingPoint()) { p = val.to<acc_t>(); } else { AT_ERROR("norm_kernel_cuda_impl expects norm to be integer or float"); } if (p == static_cast<float>(0)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<acc_t>(), 0); } else if (p == static_cast<float>(1)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<acc_t>(), 0); } else if (p == static_cast<float>(2)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<acc_t>(), 0); } else if (p == static_cast<float>(INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<acc_t>(), std::numeric_limits<acc_t>::min()); } else if (p == static_cast<float>(-INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<acc_t>(), std::numeric_limits<acc_t>::max()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<acc_t>{ acc_t(p) }, 0); } } static void norm_kernel_cuda(TensorIterator& iter, Scalar p) { if (iter.dtype() == kHalf) { return norm_kernel_cuda_impl<at::Half, float>(iter, p); } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::Half, float, float>(iter, p); } else if(iter.dtype() == kBFloat16) { return norm_kernel_cuda_impl<at::BFloat16, float>(iter, p); } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, p); } AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "norm_cuda", [&]() { norm_kernel_cuda_impl<scalar_t>(iter, p); }); } REGISTER_DISPATCH(norm_stub, &norm_kernel_cuda); }} // namespace at::native
a7204852f31e47a9ff2527ab8697362b8b56af7e.cu
#include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Reduce.cuh> #include <ATen/native/DispatchStub.h> #include <ATen/native/SharedReduceOps.h> #include <ATen/Dispatch.h> #include <ATen/native/ReduceOps.h> namespace at { namespace native { template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t> void norm_kernel_cuda_impl(TensorIterator& iter, Scalar val) { float p; if (val.isIntegral(false)) { p = val.to<int64_t>(); } else if (val.isFloatingPoint()) { p = val.to<acc_t>(); } else { AT_ERROR("norm_kernel_cuda_impl expects norm to be integer or float"); } if (p == static_cast<float>(0)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormZeroOps<acc_t>(), 0); } else if (p == static_cast<float>(1)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOneOps<acc_t>(), 0); } else if (p == static_cast<float>(2)) { gpu_reduce_kernel<scalar_t, out_t>(iter, NormTwoOps<acc_t>(), 0); } else if (p == static_cast<float>(INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMaxOps<acc_t>(), std::numeric_limits<acc_t>::min()); } else if (p == static_cast<float>(-INFINITY)) { gpu_reduce_kernel<scalar_t, out_t>(iter, AbsMinOps<acc_t>(), std::numeric_limits<acc_t>::max()); } else { gpu_reduce_kernel<scalar_t, out_t>(iter, NormOps<acc_t>{ acc_t(p) }, 0); } } static void norm_kernel_cuda(TensorIterator& iter, Scalar p) { if (iter.dtype() == kHalf) { return norm_kernel_cuda_impl<at::Half, float>(iter, p); } else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::Half, float, float>(iter, p); } else if(iter.dtype() == kBFloat16) { return norm_kernel_cuda_impl<at::BFloat16, float>(iter, p); } else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) { // type promotion that does cast and reduction in a single kernel return norm_kernel_cuda_impl<at::BFloat16, float, float>(iter, p); } AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "norm_cuda", [&]() { norm_kernel_cuda_impl<scalar_t>(iter, p); }); } REGISTER_DISPATCH(norm_stub, &norm_kernel_cuda); }} // namespace at::native
93e3bbe4beccd8da596c7f0a80526ee9b9ecade9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gputests.h" __global__ void kernel_test1_write(char* _ptr, char* end_ptr) { unsigned int i; unsigned long* ptr = (unsigned long*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned long*) end_ptr) { return; } for (i = 0; i < BLOCKSIZE/sizeof(unsigned long); i++) { ptr[i] =(unsigned long) & ptr[i]; } return; } __global__ void kernel_test1_read(char* _ptr, char* end_ptr, MemoryError *local_errors, int *local_count) { unsigned int i; unsigned long* ptr = (unsigned long*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned long*) end_ptr) { return; } for (i = 0; i < BLOCKSIZE/sizeof(unsigned long); i++) { if (ptr[i] != (unsigned long)& ptr[i]) { record_error(local_errors, local_count, &ptr[i], (unsigned long)&ptr[i]); } } return; } int test1(TestInputParams *tip, TestOutputParams *top, bool *term) { unsigned int i; char* end_ptr = tip->ptr + tip->tot_num_blocks* BLOCKSIZE; for (i=0; i < tip->tot_num_blocks; i+= GRIDSIZE) { if(*term == true) break; dim3 grid; grid.x= GRIDSIZE; hipLaunchKernelGGL(( kernel_test1_write), dim3(grid), dim3(1), 0, 0, tip->ptr + i*BLOCKSIZE, end_ptr); SYNC_CUERR; //SHOW_PROGRESS("test1 on writing", i, tot_num_blocks); } for (i=0; i < tip->tot_num_blocks; i+= GRIDSIZE) { if(*term == true) break; dim3 grid; grid.x= GRIDSIZE; hipLaunchKernelGGL(( kernel_test1_read), dim3(grid), dim3(1), 0, 0, tip->ptr + i*BLOCKSIZE, end_ptr, top->err_vector, top->err_count); SYNC_CUERR; //error_checking("test1 on reading", i); //SHOW_PROGRESS("test1 on reading", i, tot_num_blocks); } return hipSuccess; }
93e3bbe4beccd8da596c7f0a80526ee9b9ecade9.cu
#include "gputests.h" __global__ void kernel_test1_write(char* _ptr, char* end_ptr) { unsigned int i; unsigned long* ptr = (unsigned long*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned long*) end_ptr) { return; } for (i = 0; i < BLOCKSIZE/sizeof(unsigned long); i++) { ptr[i] =(unsigned long) & ptr[i]; } return; } __global__ void kernel_test1_read(char* _ptr, char* end_ptr, MemoryError *local_errors, int *local_count) { unsigned int i; unsigned long* ptr = (unsigned long*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned long*) end_ptr) { return; } for (i = 0; i < BLOCKSIZE/sizeof(unsigned long); i++) { if (ptr[i] != (unsigned long)& ptr[i]) { record_error(local_errors, local_count, &ptr[i], (unsigned long)&ptr[i]); } } return; } int test1(TestInputParams *tip, TestOutputParams *top, bool *term) { unsigned int i; char* end_ptr = tip->ptr + tip->tot_num_blocks* BLOCKSIZE; for (i=0; i < tip->tot_num_blocks; i+= GRIDSIZE) { if(*term == true) break; dim3 grid; grid.x= GRIDSIZE; kernel_test1_write<<<grid, 1>>>(tip->ptr + i*BLOCKSIZE, end_ptr); SYNC_CUERR; //SHOW_PROGRESS("test1 on writing", i, tot_num_blocks); } for (i=0; i < tip->tot_num_blocks; i+= GRIDSIZE) { if(*term == true) break; dim3 grid; grid.x= GRIDSIZE; kernel_test1_read<<<grid, 1>>>(tip->ptr + i*BLOCKSIZE, end_ptr, top->err_vector, top->err_count); SYNC_CUERR; //error_checking("test1 on reading", i); //SHOW_PROGRESS("test1 on reading", i, tot_num_blocks); } return cudaSuccess; }
adeedcd727e07b2f79dd7f8fb8b5d1c6e6a872d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include "latte/layers/sigmoid_layer.h" namespace latte { template <typename Dtype> __global__ void SigmoidForward(const int n, const Dtype *in, Dtype *out) { CUDA_KERNEL_LOOP(index, n) { out[index] = 1. / (1. + exp(-in[index])); } } template <typename Dtype> void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); hipLaunchKernelGGL(( SigmoidForward<Dtype>), dim3(LATTE_GET_BLOCKS(count)), dim3(LATTE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(SigmoidLayer); } // namespace latte
adeedcd727e07b2f79dd7f8fb8b5d1c6e6a872d1.cu
#include <cmath> #include "latte/layers/sigmoid_layer.h" namespace latte { template <typename Dtype> __global__ void SigmoidForward(const int n, const Dtype *in, Dtype *out) { CUDA_KERNEL_LOOP(index, n) { out[index] = 1. / (1. + exp(-in[index])); } } template <typename Dtype> void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); SigmoidForward<Dtype><<<LATTE_GET_BLOCKS(count), LATTE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(SigmoidLayer); } // namespace latte
cacc85298faffefcf377e7cde7b2f096fc6e9f5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // Toms Oliveira e Silva, November 2017 // // ACA 2017/2018 // // modify_sector CUDA kernel (each thread deals with one sector) // extern "C" __global__ void modify_sector_cuda_kernel(unsigned int * __restrict__ sector_data,unsigned int * __restrict__ sector_number,unsigned int n_sectors,unsigned int sector_size) { unsigned int x,y,idx,i,a,c,n_words; unsigned int *lo,*hi; lo = sector_data; hi = sector_data + n_sectors * sector_size / 4; // // compute the thread number // x = (unsigned int)threadIdx.x + (unsigned int)blockDim.x * (unsigned int)blockIdx.x; y = (unsigned int)threadIdx.y + (unsigned int)blockDim.y * (unsigned int)blockIdx.y; idx = (unsigned int)blockDim.x * (unsigned int)gridDim.x * y + x; if(idx >= n_sectors) return; // safety precaution // // convert the sector size into number of 4-byte words (it is assumed that sizeof(unsigned int) = 4) // n_words = sector_size / 4u; // // adjust pointers (N.B. the memory layout may not be optimal) // sector_data += n_words * idx; sector_number += idx; // // initialize the linear congruencial pseudo-random number generator // (section 3.2.1.2 of The Art of Computer Programming presents the theory behind the restrictions on a and c) // i = sector_number[0]; // get the sector number a = 0xACA00001u ^ ((i & 0x0F0F0F0Fu) << 2); // a must be a multiple of 4 plus 1 c = 0x00ACA001u ^ ((i & 0xF0F0F0F0u) >> 3); // c must be odd x = 0xACA02017u; // initial state // // modify the sector data // for(i = 0u;i < n_words;i++) { unsigned int *addr; x = a * x + c; // update the pseudo-random generator state addr = &sector_data[i]; if(addr >= lo && addr < hi) *addr ^= x; // modify the sector data } }
cacc85298faffefcf377e7cde7b2f096fc6e9f5c.cu
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // Tomás Oliveira e Silva, November 2017 // // ACA 2017/2018 // // modify_sector CUDA kernel (each thread deals with one sector) // extern "C" __global__ void modify_sector_cuda_kernel(unsigned int * __restrict__ sector_data,unsigned int * __restrict__ sector_number,unsigned int n_sectors,unsigned int sector_size) { unsigned int x,y,idx,i,a,c,n_words; unsigned int *lo,*hi; lo = sector_data; hi = sector_data + n_sectors * sector_size / 4; // // compute the thread number // x = (unsigned int)threadIdx.x + (unsigned int)blockDim.x * (unsigned int)blockIdx.x; y = (unsigned int)threadIdx.y + (unsigned int)blockDim.y * (unsigned int)blockIdx.y; idx = (unsigned int)blockDim.x * (unsigned int)gridDim.x * y + x; if(idx >= n_sectors) return; // safety precaution // // convert the sector size into number of 4-byte words (it is assumed that sizeof(unsigned int) = 4) // n_words = sector_size / 4u; // // adjust pointers (N.B. the memory layout may not be optimal) // sector_data += n_words * idx; sector_number += idx; // // initialize the linear congruencial pseudo-random number generator // (section 3.2.1.2 of The Art of Computer Programming presents the theory behind the restrictions on a and c) // i = sector_number[0]; // get the sector number a = 0xACA00001u ^ ((i & 0x0F0F0F0Fu) << 2); // a must be a multiple of 4 plus 1 c = 0x00ACA001u ^ ((i & 0xF0F0F0F0u) >> 3); // c must be odd x = 0xACA02017u; // initial state // // modify the sector data // for(i = 0u;i < n_words;i++) { unsigned int *addr; x = a * x + c; // update the pseudo-random generator state addr = &sector_data[i]; if(addr >= lo && addr < hi) *addr ^= x; // modify the sector data } }
c0465610daa2a02d553294eccfaef1bae401c350.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ptr2ind_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int64_t *ptr_data = NULL; hipMalloc(&ptr_data, XSIZE*YSIZE); int64_t *out_data = NULL; hipMalloc(&out_data, XSIZE*YSIZE); int64_t E = 1; int64_t numel = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ptr2ind_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr_data,out_data,E,numel); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ptr2ind_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr_data,out_data,E,numel); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ptr2ind_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr_data,out_data,E,numel); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c0465610daa2a02d553294eccfaef1bae401c350.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ptr2ind_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int64_t *ptr_data = NULL; cudaMalloc(&ptr_data, XSIZE*YSIZE); int64_t *out_data = NULL; cudaMalloc(&out_data, XSIZE*YSIZE); int64_t E = 1; int64_t numel = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ptr2ind_kernel<<<gridBlock,threadBlock>>>(ptr_data,out_data,E,numel); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ptr2ind_kernel<<<gridBlock,threadBlock>>>(ptr_data,out_data,E,numel); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ptr2ind_kernel<<<gridBlock,threadBlock>>>(ptr_data,out_data,E,numel); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
42d3ce304c3143a65a3710cfeeb3c203f1cca1f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "filters.h" #include "kernels.h" #include "png_utils.h" // One pixel per thread, row major __global__ void kernel2(my_png *input, my_png *output, filter *f) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < input->width * input->height) { int sums[3] = { 0, 0, 0 }; int offset = f->dim >> 1; int row = idx / input->width, col = idx % input->width; for (int r = row - offset, i = 0; r <= row + offset; r++, i++) { if (r < 0 || r >= input->height) continue; for (int c = col - offset, j = 0; c <= col + offset; c++, j++) { if (c < 0 || c >= input->width) continue; png_bytep curr_pix = input->pixels[r * input->width + c * 4]; for (int a = 0; a < 3; a++) sums[a] += curr_pix[a] * f->matrix[i * f->dim + j]; } } png_bytep pix = output->pixels[row * input->width + col * 4]; for (int a = 0; a < 3; a++) pix[a] = sums[a]; } } __global__ void kernel2n(my_png *output, int *minp, int *maxp) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < output->width * output->height) { int row = idx / output->width, col = idx % output->width; png_bytep pix = output->pixels[row * output->width + col * 4]; for (int a = 0; a < 3; a++) { if (minp[a] != maxp[a]) pix[a] = ((pix[a] - minp[a]) * 255) / (maxp[a] - minp[a]); } } }
42d3ce304c3143a65a3710cfeeb3c203f1cca1f5.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "filters.h" #include "kernels.h" #include "png_utils.h" // One pixel per thread, row major __global__ void kernel2(my_png *input, my_png *output, filter *f) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < input->width * input->height) { int sums[3] = { 0, 0, 0 }; int offset = f->dim >> 1; int row = idx / input->width, col = idx % input->width; for (int r = row - offset, i = 0; r <= row + offset; r++, i++) { if (r < 0 || r >= input->height) continue; for (int c = col - offset, j = 0; c <= col + offset; c++, j++) { if (c < 0 || c >= input->width) continue; png_bytep curr_pix = input->pixels[r * input->width + c * 4]; for (int a = 0; a < 3; a++) sums[a] += curr_pix[a] * f->matrix[i * f->dim + j]; } } png_bytep pix = output->pixels[row * input->width + col * 4]; for (int a = 0; a < 3; a++) pix[a] = sums[a]; } } __global__ void kernel2n(my_png *output, int *minp, int *maxp) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < output->width * output->height) { int row = idx / output->width, col = idx % output->width; png_bytep pix = output->pixels[row * output->width + col * 4]; for (int a = 0; a < 3; a++) { if (minp[a] != maxp[a]) pix[a] = ((pix[a] - minp[a]) * 255) / (maxp[a] - minp[a]); } } }
b91d8237608ab86f1e8b995d40e5c8a6c9473f39.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/slice_util.h" #include "oneflow/core/kernel/new_kernel_util.h" namespace oneflow { namespace { inline hipblasOperation_t GetCublasOp(char op) { switch (op) { case 'n': case 'N': { return HIPBLAS_OP_N; } case 't': case 'T': { return HIPBLAS_OP_T; } case 'c': case 'C': { return HIPBLAS_OP_C; } default: { UNIMPLEMENTED(); } } return HIPBLAS_OP_N; } template<typename T> struct CudaDataTypeTrait; template<> struct CudaDataTypeTrait<float> { const static hipDataType value = HIP_R_32F; }; template<> struct CudaDataTypeTrait<half> { const static hipDataType value = HIP_R_16F; }; template<typename T> void CublasBatchGemm(hipblasHandle_t handle, char transa, char transb, int64_t m, int64_t n, int64_t k, T alpha, const T* a, int64_t lda, int64_t stridea, const T* b, int64_t ldb, int64_t strideb, T beta, T* c, int64_t ldc, int64_t stridec, int64_t batch_size) { hipblasOperation_t opa = GetCublasOp(transa); hipblasOperation_t opb = GetCublasOp(transb); if (TORCH_HIP_VERSION >= 9010 && GetCudaSmVersion() >= 500) { #if TORCH_HIP_VERSION >= 9010 hipDataType data_type = CudaDataTypeTrait<T>::value; OF_CUBLAS_CHECK(hipblasGemmStridedBatchedEx( handle, opa, opb, m, n, k, reinterpret_cast<const void*>(&alpha), reinterpret_cast<const void*>(a), data_type, lda, stridea, reinterpret_cast<const void*>(b), data_type, ldb, strideb, reinterpret_cast<const void*>(&beta), reinterpret_cast<void*>(c), data_type, ldc, stridec, batch_size, data_type, HIPBLAS_GEMM_DEFAULT)); #else UNIMPLEMENTED(); #endif } else { cublas_gemmStridedBatched<T>(handle, opa, opb, m, n, k, &alpha, a, ldb, stridea, b, ldb, strideb, &beta, c, ldc, stridec, batch_size); } } #if TORCH_HIP_VERSION >= 9010 template<> void CublasBatchGemm<half>(hipblasHandle_t handle, char transa, char transb, int64_t m, int64_t n, int64_t k, half alpha, const half* a, int64_t lda, int64_t stridea, const half* b, int64_t ldb, int64_t strideb, half beta, half* c, int64_t ldc, int64_t stridec, int64_t batch_size) { using comp_t = float; hipblasOperation_t opa = GetCublasOp(transa); hipblasOperation_t opb = GetCublasOp(transb); if (GetCudaSmVersion() >= 500) { float alpha_f = static_cast<comp_t>(alpha); float beta_f = static_cast<comp_t>(beta); #if TORCH_HIP_VERSION >= 11000 hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT; #else hipblasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; #endif hipDataType data_type = CudaDataTypeTrait<half>::value; hipDataType comp_type = CudaDataTypeTrait<comp_t>::value; OF_CUBLAS_CHECK(hipblasGemmStridedBatchedEx( handle, opa, opb, m, n, k, &alpha_f, reinterpret_cast<const void*>(a), data_type, lda, stridea, reinterpret_cast<const void*>(b), data_type, ldb, strideb, &beta_f, reinterpret_cast<void*>(c), data_type, ldc, stridec, batch_size, comp_type, algo)); } else { cublas_gemmStridedBatched<half>(handle, opa, opb, m, n, k, &alpha, a, lda, stridea, b, ldb, strideb, &beta, c, ldc, stridec, batch_size); } } template<> void CublasBatchGemm<float16>(hipblasHandle_t handle, char transa, char transb, int64_t m, int64_t n, int64_t k, float16 alpha, const float16* a, int64_t lda, int64_t stridea, const float16* b, int64_t ldb, int64_t strideb, float16 beta, float16* c, int64_t ldc, int64_t stridec, int64_t batch_size) { CublasBatchGemm<half>(handle, transa, transb, m, n, k, static_cast<half>(alpha), reinterpret_cast<const half*>(a), lda, stridea, reinterpret_cast<const half*>(b), ldb, strideb, static_cast<half>(beta), reinterpret_cast<half*>(c), ldc, stridec, batch_size); } #endif // TORCH_HIP_VERSION >= 9010 template<typename T> void BatchedGemm(DeviceCtx* ctx, char opa, char opb, int64_t m, int64_t n, int64_t k, float alpha, const T* a, int64_t lda, int64_t stridea, const T* b, int64_t ldb, int64_t strideb, float beta, T* c, int64_t ldc, int64_t stridec, int64_t batch_size) { // swap m and n, a and b to convert from row-major to col-major CublasBatchGemm<T>(ctx->cublas_pmh_handle(), opb, opa, n, m, k, static_cast<T>(alpha), b, ldb, strideb, a, lda, stridea, static_cast<T>(beta), c, ldc, stridec, batch_size); } SliceParams ConstructSliceParams4Value(int64_t seq_len, int64_t batch_size, int64_t num_heads, int64_t head_size) { // slice (s, b, n, 3, h) to (s, b, n, 1, h) SliceParams params; std::memset(&params, 0, sizeof(SliceParams)); params.ndim = 4; params.dims[0] = seq_len; params.dims[1] = batch_size; params.dims[2] = num_heads; params.dims[3] = 3 * head_size; params.start[0] = 0; params.start[1] = 0; params.start[2] = 0; params.start[3] = 2 * head_size; params.step[0] = 1; params.step[1] = 1; params.step[2] = 1; params.step[3] = 1; params.size[0] = seq_len; params.size[1] = batch_size; params.size[2] = num_heads; params.size[3] = head_size; return params; } template<typename T> void TransposeGpu(DeviceCtx* ctx, const ShapeView& in_shape, const ShapeView& out_shape, const std::vector<int32_t>& perm, const T* in, T* out) { CHECK_EQ(in_shape.NumAxes(), out_shape.NumAxes()); int32_t num_axes = in_shape.NumAxes(); CHECK_EQ(num_axes, perm.size()); for (int i = 0; i < perm.size(); ++i) { CHECK_EQ(in_shape.At(perm[i]), out_shape.At(i)); } int64_t elem_cnt = in_shape.elem_cnt(); NewKernelUtil<DeviceType::kGPU>::Transpose(ctx, num_axes, in_shape, out_shape, perm, elem_cnt, in, out); } template<typename T> class FusedSelfAttentionQueryMulKeyAndValueGpuKernel final : public user_op::OpKernel { public: FusedSelfAttentionQueryMulKeyAndValueGpuKernel() = default; ~FusedSelfAttentionQueryMulKeyAndValueGpuKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0); int64_t seq_len = h_tensor->shape().At(0); int64_t batch_size = h_tensor->shape().At(1); int64_t hidden_size = h_tensor->shape().At(2); int64_t head_size = ctx->Attr<int64_t>("head_size"); int64_t num_heads = hidden_size / (3 * head_size); int64_t ld = batch_size * hidden_size; int64_t stride = 3 * head_size; int64_t k_offset = head_size; // q * k: (sq, b, n, h) x (sk, b, n, h) => (b, n, sq, h) x (b, n, sk, h) // => (b, n, sq, h) x (b, n, h, sk) -> (b, n, sq, sk) float alpha = ctx->Attr<float>("alpha"); user_op::Tensor* qmk_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key", 0); const T* q_dptr = h_tensor->dptr<T>(); const T* k_dptr = h_tensor->dptr<T>() + k_offset; BatchedGemm<T>(ctx->device_ctx(), 'N', 'T', seq_len, seq_len, head_size, alpha, q_dptr, ld, stride, k_dptr, ld, stride, 0.0f, qmk_tensor->mut_dptr<T>(), seq_len, seq_len * seq_len, batch_size * num_heads); // slice v user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); user_op::Tensor* v_tensor = ctx->Tensor4ArgNameAndIndex("value", 0); SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size); SliceKernelUtil<DeviceType::kGPU, T>::Forward(ctx->device_ctx(), params, h_tensor->dptr<T>(), tmp_v_tensor->mut_dptr<T>()); // v from (s, b, n, h) transpose to (b, n, s, h) Shape value_shape({seq_len, batch_size, num_heads, head_size}); TransposeGpu<T>(ctx->device_ctx(), value_shape, v_tensor->shape(), {1, 2, 0, 3}, tmp_v_tensor->dptr<T>(), v_tensor->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T> class FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel final : public user_op::OpKernel { public: FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() = default; ~FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* v_grad_tensor = ctx->Tensor4ArgNameAndIndex("value_grad", 0); const user_op::Tensor* qmk_grad_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key_grad", 0); const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0); user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); user_op::Tensor* h_grad_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states_grad", 0); float alpha = ctx->Attr<float>("alpha"); int64_t seq_len = h_grad_tensor->shape().At(0); int64_t batch_size = h_grad_tensor->shape().At(1); int64_t hidden_size = h_grad_tensor->shape().At(2); int64_t num_heads = v_grad_tensor->shape().At(1); int64_t head_size = v_grad_tensor->shape().At(3); int64_t ld = batch_size * hidden_size; int64_t stride = 3 * head_size; CHECK_EQ(hidden_size, num_heads * stride); // transpose from (b, n, s, h) to (s, b, n, h) Shape value_shape({seq_len, batch_size, num_heads, head_size}); TransposeGpu<T>(ctx->device_ctx(), v_grad_tensor->shape(), value_shape, {2, 0, 1, 3}, v_grad_tensor->dptr<T>(), tmp_v_tensor->mut_dptr<T>()); // slice v grad SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size); SliceKernelUtil<DeviceType::kGPU, T>::Backward( ctx->device_ctx(), params, tmp_v_tensor->dptr<T>(), h_grad_tensor->mut_dptr<T>()); // grad_q = grad_qmk * k // (b, n, sq, sk) x (b, n, sk, h) -> (b, n, s, h) <= (s, b, n, h) <= (s, b, n, 3, h) const T* qmk_grad_dptr = qmk_grad_tensor->dptr<T>(); const T* k_dptr = h_tensor->dptr<T>() + head_size; T* grad_q_dptr = h_grad_tensor->mut_dptr<T>(); BatchedGemm<T>(ctx->device_ctx(), 'N', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr, seq_len, seq_len * seq_len, k_dptr, ld, stride, 0.0f, grad_q_dptr, ld, stride, batch_size * num_heads); // grad_k = grad_qmk * q // (b, n, sk, sq) x (b, n, sq, h) -> (b, n, sk, h) <= (s, b, n, h) <= (s, b, n, 3, h) const T* q_dptr = h_tensor->dptr<T>(); T* grad_k_dptr = h_grad_tensor->mut_dptr<T>() + head_size; BatchedGemm<T>(ctx->device_ctx(), 'T', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr, seq_len, seq_len * seq_len, q_dptr, ld, stride, 0.0f, grad_k_dptr, ld, stride, batch_size * num_heads); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; size_t InferTmpBufferSize(user_op::InferContext* ctx) { const Shape* value_shape = ctx->Shape4ArgNameAndIndex("value", 0); DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value", 0); return value_shape->elem_cnt() * GetSizeOfDataType(value_dtype); } size_t InferGradTmpBufferSize(user_op::InferContext* ctx) { const Shape* value_shape = ctx->Shape4ArgNameAndIndex("value_grad", 0); DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value_grad", 0); return value_shape->elem_cnt() * GetSizeOfDataType(value_dtype); } } // namespace #define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value") \ .SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGpuKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferTmpBufferSize); #define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value_grad") \ .SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferGradTmpBufferSize); REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float) REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float16) REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float) REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float16) } // namespace oneflow
b91d8237608ab86f1e8b995d40e5c8a6c9473f39.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/slice_util.h" #include "oneflow/core/kernel/new_kernel_util.h" namespace oneflow { namespace { inline cublasOperation_t GetCublasOp(char op) { switch (op) { case 'n': case 'N': { return CUBLAS_OP_N; } case 't': case 'T': { return CUBLAS_OP_T; } case 'c': case 'C': { return CUBLAS_OP_C; } default: { UNIMPLEMENTED(); } } return CUBLAS_OP_N; } template<typename T> struct CudaDataTypeTrait; template<> struct CudaDataTypeTrait<float> { const static cudaDataType_t value = CUDA_R_32F; }; template<> struct CudaDataTypeTrait<half> { const static cudaDataType_t value = CUDA_R_16F; }; template<typename T> void CublasBatchGemm(cublasHandle_t handle, char transa, char transb, int64_t m, int64_t n, int64_t k, T alpha, const T* a, int64_t lda, int64_t stridea, const T* b, int64_t ldb, int64_t strideb, T beta, T* c, int64_t ldc, int64_t stridec, int64_t batch_size) { cublasOperation_t opa = GetCublasOp(transa); cublasOperation_t opb = GetCublasOp(transb); if (CUDA_VERSION >= 9010 && GetCudaSmVersion() >= 500) { #if CUDA_VERSION >= 9010 cudaDataType_t data_type = CudaDataTypeTrait<T>::value; OF_CUBLAS_CHECK(cublasGemmStridedBatchedEx( handle, opa, opb, m, n, k, reinterpret_cast<const void*>(&alpha), reinterpret_cast<const void*>(a), data_type, lda, stridea, reinterpret_cast<const void*>(b), data_type, ldb, strideb, reinterpret_cast<const void*>(&beta), reinterpret_cast<void*>(c), data_type, ldc, stridec, batch_size, data_type, CUBLAS_GEMM_DEFAULT)); #else UNIMPLEMENTED(); #endif } else { cublas_gemmStridedBatched<T>(handle, opa, opb, m, n, k, &alpha, a, ldb, stridea, b, ldb, strideb, &beta, c, ldc, stridec, batch_size); } } #if CUDA_VERSION >= 9010 template<> void CublasBatchGemm<half>(cublasHandle_t handle, char transa, char transb, int64_t m, int64_t n, int64_t k, half alpha, const half* a, int64_t lda, int64_t stridea, const half* b, int64_t ldb, int64_t strideb, half beta, half* c, int64_t ldc, int64_t stridec, int64_t batch_size) { using comp_t = float; cublasOperation_t opa = GetCublasOp(transa); cublasOperation_t opb = GetCublasOp(transb); if (GetCudaSmVersion() >= 500) { float alpha_f = static_cast<comp_t>(alpha); float beta_f = static_cast<comp_t>(beta); #if CUDA_VERSION >= 11000 cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT; #else cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; #endif cudaDataType_t data_type = CudaDataTypeTrait<half>::value; cudaDataType_t comp_type = CudaDataTypeTrait<comp_t>::value; OF_CUBLAS_CHECK(cublasGemmStridedBatchedEx( handle, opa, opb, m, n, k, &alpha_f, reinterpret_cast<const void*>(a), data_type, lda, stridea, reinterpret_cast<const void*>(b), data_type, ldb, strideb, &beta_f, reinterpret_cast<void*>(c), data_type, ldc, stridec, batch_size, comp_type, algo)); } else { cublas_gemmStridedBatched<half>(handle, opa, opb, m, n, k, &alpha, a, lda, stridea, b, ldb, strideb, &beta, c, ldc, stridec, batch_size); } } template<> void CublasBatchGemm<float16>(cublasHandle_t handle, char transa, char transb, int64_t m, int64_t n, int64_t k, float16 alpha, const float16* a, int64_t lda, int64_t stridea, const float16* b, int64_t ldb, int64_t strideb, float16 beta, float16* c, int64_t ldc, int64_t stridec, int64_t batch_size) { CublasBatchGemm<half>(handle, transa, transb, m, n, k, static_cast<half>(alpha), reinterpret_cast<const half*>(a), lda, stridea, reinterpret_cast<const half*>(b), ldb, strideb, static_cast<half>(beta), reinterpret_cast<half*>(c), ldc, stridec, batch_size); } #endif // CUDA_VERSION >= 9010 template<typename T> void BatchedGemm(DeviceCtx* ctx, char opa, char opb, int64_t m, int64_t n, int64_t k, float alpha, const T* a, int64_t lda, int64_t stridea, const T* b, int64_t ldb, int64_t strideb, float beta, T* c, int64_t ldc, int64_t stridec, int64_t batch_size) { // swap m and n, a and b to convert from row-major to col-major CublasBatchGemm<T>(ctx->cublas_pmh_handle(), opb, opa, n, m, k, static_cast<T>(alpha), b, ldb, strideb, a, lda, stridea, static_cast<T>(beta), c, ldc, stridec, batch_size); } SliceParams ConstructSliceParams4Value(int64_t seq_len, int64_t batch_size, int64_t num_heads, int64_t head_size) { // slice (s, b, n, 3, h) to (s, b, n, 1, h) SliceParams params; std::memset(&params, 0, sizeof(SliceParams)); params.ndim = 4; params.dims[0] = seq_len; params.dims[1] = batch_size; params.dims[2] = num_heads; params.dims[3] = 3 * head_size; params.start[0] = 0; params.start[1] = 0; params.start[2] = 0; params.start[3] = 2 * head_size; params.step[0] = 1; params.step[1] = 1; params.step[2] = 1; params.step[3] = 1; params.size[0] = seq_len; params.size[1] = batch_size; params.size[2] = num_heads; params.size[3] = head_size; return params; } template<typename T> void TransposeGpu(DeviceCtx* ctx, const ShapeView& in_shape, const ShapeView& out_shape, const std::vector<int32_t>& perm, const T* in, T* out) { CHECK_EQ(in_shape.NumAxes(), out_shape.NumAxes()); int32_t num_axes = in_shape.NumAxes(); CHECK_EQ(num_axes, perm.size()); for (int i = 0; i < perm.size(); ++i) { CHECK_EQ(in_shape.At(perm[i]), out_shape.At(i)); } int64_t elem_cnt = in_shape.elem_cnt(); NewKernelUtil<DeviceType::kGPU>::Transpose(ctx, num_axes, in_shape, out_shape, perm, elem_cnt, in, out); } template<typename T> class FusedSelfAttentionQueryMulKeyAndValueGpuKernel final : public user_op::OpKernel { public: FusedSelfAttentionQueryMulKeyAndValueGpuKernel() = default; ~FusedSelfAttentionQueryMulKeyAndValueGpuKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0); int64_t seq_len = h_tensor->shape().At(0); int64_t batch_size = h_tensor->shape().At(1); int64_t hidden_size = h_tensor->shape().At(2); int64_t head_size = ctx->Attr<int64_t>("head_size"); int64_t num_heads = hidden_size / (3 * head_size); int64_t ld = batch_size * hidden_size; int64_t stride = 3 * head_size; int64_t k_offset = head_size; // q * k: (sq, b, n, h) x (sk, b, n, h) => (b, n, sq, h) x (b, n, sk, h) // => (b, n, sq, h) x (b, n, h, sk) -> (b, n, sq, sk) float alpha = ctx->Attr<float>("alpha"); user_op::Tensor* qmk_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key", 0); const T* q_dptr = h_tensor->dptr<T>(); const T* k_dptr = h_tensor->dptr<T>() + k_offset; BatchedGemm<T>(ctx->device_ctx(), 'N', 'T', seq_len, seq_len, head_size, alpha, q_dptr, ld, stride, k_dptr, ld, stride, 0.0f, qmk_tensor->mut_dptr<T>(), seq_len, seq_len * seq_len, batch_size * num_heads); // slice v user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); user_op::Tensor* v_tensor = ctx->Tensor4ArgNameAndIndex("value", 0); SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size); SliceKernelUtil<DeviceType::kGPU, T>::Forward(ctx->device_ctx(), params, h_tensor->dptr<T>(), tmp_v_tensor->mut_dptr<T>()); // v from (s, b, n, h) transpose to (b, n, s, h) Shape value_shape({seq_len, batch_size, num_heads, head_size}); TransposeGpu<T>(ctx->device_ctx(), value_shape, v_tensor->shape(), {1, 2, 0, 3}, tmp_v_tensor->dptr<T>(), v_tensor->mut_dptr<T>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<typename T> class FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel final : public user_op::OpKernel { public: FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() = default; ~FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel() override = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* v_grad_tensor = ctx->Tensor4ArgNameAndIndex("value_grad", 0); const user_op::Tensor* qmk_grad_tensor = ctx->Tensor4ArgNameAndIndex("query_mul_key_grad", 0); const user_op::Tensor* h_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states", 0); user_op::Tensor* tmp_v_tensor = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); user_op::Tensor* h_grad_tensor = ctx->Tensor4ArgNameAndIndex("hidden_states_grad", 0); float alpha = ctx->Attr<float>("alpha"); int64_t seq_len = h_grad_tensor->shape().At(0); int64_t batch_size = h_grad_tensor->shape().At(1); int64_t hidden_size = h_grad_tensor->shape().At(2); int64_t num_heads = v_grad_tensor->shape().At(1); int64_t head_size = v_grad_tensor->shape().At(3); int64_t ld = batch_size * hidden_size; int64_t stride = 3 * head_size; CHECK_EQ(hidden_size, num_heads * stride); // transpose from (b, n, s, h) to (s, b, n, h) Shape value_shape({seq_len, batch_size, num_heads, head_size}); TransposeGpu<T>(ctx->device_ctx(), v_grad_tensor->shape(), value_shape, {2, 0, 1, 3}, v_grad_tensor->dptr<T>(), tmp_v_tensor->mut_dptr<T>()); // slice v grad SliceParams params = ConstructSliceParams4Value(seq_len, batch_size, num_heads, head_size); SliceKernelUtil<DeviceType::kGPU, T>::Backward( ctx->device_ctx(), params, tmp_v_tensor->dptr<T>(), h_grad_tensor->mut_dptr<T>()); // grad_q = grad_qmk * k // (b, n, sq, sk) x (b, n, sk, h) -> (b, n, s, h) <= (s, b, n, h) <= (s, b, n, 3, h) const T* qmk_grad_dptr = qmk_grad_tensor->dptr<T>(); const T* k_dptr = h_tensor->dptr<T>() + head_size; T* grad_q_dptr = h_grad_tensor->mut_dptr<T>(); BatchedGemm<T>(ctx->device_ctx(), 'N', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr, seq_len, seq_len * seq_len, k_dptr, ld, stride, 0.0f, grad_q_dptr, ld, stride, batch_size * num_heads); // grad_k = grad_qmk * q // (b, n, sk, sq) x (b, n, sq, h) -> (b, n, sk, h) <= (s, b, n, h) <= (s, b, n, 3, h) const T* q_dptr = h_tensor->dptr<T>(); T* grad_k_dptr = h_grad_tensor->mut_dptr<T>() + head_size; BatchedGemm<T>(ctx->device_ctx(), 'T', 'N', seq_len, head_size, seq_len, alpha, qmk_grad_dptr, seq_len, seq_len * seq_len, q_dptr, ld, stride, 0.0f, grad_k_dptr, ld, stride, batch_size * num_heads); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; size_t InferTmpBufferSize(user_op::InferContext* ctx) { const Shape* value_shape = ctx->Shape4ArgNameAndIndex("value", 0); DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value", 0); return value_shape->elem_cnt() * GetSizeOfDataType(value_dtype); } size_t InferGradTmpBufferSize(user_op::InferContext* ctx) { const Shape* value_shape = ctx->Shape4ArgNameAndIndex("value_grad", 0); DataType value_dtype = *ctx->Dtype4ArgNameAndIndex("value_grad", 0); return value_shape->elem_cnt() * GetSizeOfDataType(value_dtype); } } // namespace #define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value") \ .SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGpuKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferTmpBufferSize); #define REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("fused_self_attention_query_mul_key_and_value_grad") \ .SetCreateFn<FusedSelfAttentionQueryMulKeyAndValueGradGpuKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \ & (user_op::HobDataType("hidden_states", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn(InferGradTmpBufferSize); REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float) REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GPU_KERNEL(float16) REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float) REGISTER_FUSED_SELF_ATTENTION_QUERY_MUL_KEY_AND_VALUE_GRAD_GPU_KERNEL(float16) } // namespace oneflow
ed6c119e6ffe00b625826a5756a36419889f415c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _BLUR_CUDA_KERNEL #define _BLUR_CUDA_KERNEL #include "cta_config.h" __global__ void Blur3x3(float* input, float* output, int num_rows, int num_cols, float alpha) { int bidx = blockIdx.x; int tidx = threadIdx.x; int tidy = threadIdx.y; int num_matrix_blocks = (num_rows * num_cols) / (BLOCK_SIZE * BLOCK_SIZE); __shared__ float tmp_buffer[BLOCK_SIZE][BLOCK_SIZE]; for (int block_id = bidx; block_id < num_matrix_blocks; block_id += gridDim.x) { for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) { tmp_buffer[y + tidy][tidx] = input[ (y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks + block_id * NUM_THREADS_Y * NUM_THREADS_X + tidy * NUM_THREADS_X + tidx]; } __syncthreads(); for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) { float sum_val = 0.0f; for (int ky = 0; ky < 3; ++ky) { for (int kx = 0; kx < 3; ++kx) { int row_index = y + tidy + ky - 1; int col_index = tidx + kx - 1; CLAMP(row_index, 0, BLOCK_SIZE); CLAMP(col_index, 0, BLOCK_SIZE); sum_val += tmp_buffer[row_index][col_index]; } } output[ (y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks + block_id * NUM_THREADS_Y * NUM_THREADS_X + tidy * NUM_THREADS_X + tidx] = sum_val * alpha; } __syncthreads(); } return; } #endif
ed6c119e6ffe00b625826a5756a36419889f415c.cu
#ifndef _BLUR_CUDA_KERNEL #define _BLUR_CUDA_KERNEL #include "cta_config.h" __global__ void Blur3x3(float* input, float* output, int num_rows, int num_cols, float alpha) { int bidx = blockIdx.x; int tidx = threadIdx.x; int tidy = threadIdx.y; int num_matrix_blocks = (num_rows * num_cols) / (BLOCK_SIZE * BLOCK_SIZE); __shared__ float tmp_buffer[BLOCK_SIZE][BLOCK_SIZE]; for (int block_id = bidx; block_id < num_matrix_blocks; block_id += gridDim.x) { for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) { tmp_buffer[y + tidy][tidx] = input[ (y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks + block_id * NUM_THREADS_Y * NUM_THREADS_X + tidy * NUM_THREADS_X + tidx]; } __syncthreads(); for (int y = 0; y < BLOCK_SIZE; y += NUM_THREADS_Y) { float sum_val = 0.0f; for (int ky = 0; ky < 3; ++ky) { for (int kx = 0; kx < 3; ++kx) { int row_index = y + tidy + ky - 1; int col_index = tidx + kx - 1; CLAMP(row_index, 0, BLOCK_SIZE); CLAMP(col_index, 0, BLOCK_SIZE); sum_val += tmp_buffer[row_index][col_index]; } } output[ (y / NUM_THREADS_Y) * NUM_THREADS_Y * NUM_THREADS_X * num_matrix_blocks + block_id * NUM_THREADS_Y * NUM_THREADS_X + tidy * NUM_THREADS_X + tidx] = sum_val * alpha; } __syncthreads(); } return; } #endif
436e6a3b66982eec7093d86d316a027a526590ec.hip
// !!! This is a file automatically generated by hipify!!! #include "src/mem_alloc.h" // Allocate host memory. Return true if the allocation succeeds. bool alloc_host(void **ptr, size_t size) { return hipHostMalloc(ptr, size) == hipSuccess; } // Free host memory void free_host(void *ptr) { hipHostFree(ptr); }
436e6a3b66982eec7093d86d316a027a526590ec.cu
#include "src/mem_alloc.h" // Allocate host memory. Return true if the allocation succeeds. bool alloc_host(void **ptr, size_t size) { return cudaMallocHost(ptr, size) == cudaSuccess; } // Free host memory void free_host(void *ptr) { cudaFreeHost(ptr); }
086b47ab31281bc19960b726d0312bd1958b393a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } __global__ void lower_right_copy(int *dst, int *input_itemsets, int *reference, int max_rows, int max_cols, int i, int penalty) { int r, c; r = blockIdx.y*blockDim.y+threadIdx.y+i+1; c = blockIdx.x*blockDim.x+threadIdx.x+i+1; if( r >= max_rows || c >= max_cols) return; if( r == (max_cols - c + i)) { dst[r*max_cols+c] = maximum( input_itemsets[(r-1)*max_cols+c-1]+ reference[r*max_cols+c], input_itemsets[r*max_cols+c-1] - penalty, input_itemsets[(r-1)*max_cols+c] - penalty); } else { dst[r*max_cols+c] = input_itemsets[r*max_cols+c]; } }
086b47ab31281bc19960b726d0312bd1958b393a.cu
#include "includes.h" __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } __global__ void lower_right_copy(int *dst, int *input_itemsets, int *reference, int max_rows, int max_cols, int i, int penalty) { int r, c; r = blockIdx.y*blockDim.y+threadIdx.y+i+1; c = blockIdx.x*blockDim.x+threadIdx.x+i+1; if( r >= max_rows || c >= max_cols) return; if( r == (max_cols - c + i)) { dst[r*max_cols+c] = maximum( input_itemsets[(r-1)*max_cols+c-1]+ reference[r*max_cols+c], input_itemsets[r*max_cols+c-1] - penalty, input_itemsets[(r-1)*max_cols+c] - penalty); } else { dst[r*max_cols+c] = input_itemsets[r*max_cols+c]; } }
c548e36f54dee3288fab8c22ff8913db652619ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs Code Name: Panda 0.1 File: PandaScan.cu Time: 2012-07-01 Developer: Hui Li ([email protected]) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #ifndef _PANDASCAN_CU_ #define _PANDASCAN_CU_ // includes, kernels #include "Panda.h" // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 16 banks on G80 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; } // zero the last element in the scan so it will propagate back to the front s_data[index] = 0; } } __device__ unsigned int buildSum(int *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } return stride; } __device__ void scanRootToLeaves(int *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(int *data, int blockIndex, int *blockSums) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ int s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } __global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) { __shared__ int uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; __syncthreads(); // note two adds per thread g_data[address] += uni; g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni; } inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; } inline int floorPow2(int n) { #ifdef WIN32 // method 2 return 1 << (int)logb((float)n); #else // method 1 // int nf = (int)n; // return 1 << (((*(int*)&nf) >> 23) - 127); int exp; frexp((double)n, &exp); return 1 << (exp - 1); #endif } #define BLOCK_SIZE 256 int** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; void preallocBlockSums(unsigned int maxNumElements) { //assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); g_scanBlockSums = (int**) malloc(level * sizeof(int*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { (hipMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(int))); } numElts = numBlocks; } while (numElts > 1); // CUT_CHECK_ERROR("preallocBlockSums"); } void deallocBlockSums() { for (int i = 0; i < g_numLevelsAllocated; i++) { hipFree(g_scanBlockSums[i]); } // CUT_CHECK_ERROR("deallocBlockSums"); free((void**)g_scanBlockSums); g_scanBlockSums = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void saven_initialPrefixSum(unsigned int maxNumElements) { if(g_numEltsAllocated == 0) preallocBlockSums(maxNumElements); else if(g_numEltsAllocated>maxNumElements) { deallocBlockSums(); preallocBlockSums(maxNumElements); } } void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) { assert(g_numEltsAllocated >= numElements); } #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // make sure there are no CUDA errors before we start // CUT_CHECK_ERROR("prescanArrayRecursive before kernels"); // execute the scan if (numBlocks > 1) { hipLaunchKernelGGL(( prescan<true, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0); // CUT_CHECK_ERROR("prescanWithBlockSums"); if (np2LastBlock) { hipLaunchKernelGGL(( prescan<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock , 0, outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); // CUT_CHECK_ERROR("prescanNP2WithBlockSums"); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); hipLaunchKernelGGL(( uniformAdd), dim3(grid), dim3(threads) , 0, 0, outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); // CUT_CHECK_ERROR("uniformAdd"); if (np2LastBlock) { hipLaunchKernelGGL(( uniformAdd), dim3(1), dim3(numThreadsLastBlock) , 0, 0, outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); // CUT_CHECK_ERROR("uniformAdd"); } } else if (isPowerOfTwo(numElements)) { hipLaunchKernelGGL(( prescan<false, false>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numThreads * 2, 0, 0); // CUT_CHECK_ERROR("prescan"); } else { hipLaunchKernelGGL(( prescan<false, true>), dim3(grid), dim3(threads), sharedMemSize , 0, outArray, inArray, 0, numElements, 0, 0); // CUT_CHECK_ERROR("prescanNP2"); } } void prescanArray(int *outArray, int *inArray, int numElements) { prescanArrayRecursive(outArray, inArray, numElements, 0); } int prefexSum( int* d_inArr, int* d_outArr, int numRecords ) { preallocBlockSums(numRecords); prescanArray( d_outArr, d_inArr, numRecords ); deallocBlockSums(); int* h_outLast = ( int* )malloc( sizeof( int ) ); ( hipMemcpy( h_outLast, d_outArr+numRecords-1, sizeof(int), hipMemcpyDeviceToHost) ); int* h_inLast = ( int* )malloc( sizeof( int ) ); ( hipMemcpy( h_inLast, d_inArr+numRecords-1, sizeof(int), hipMemcpyDeviceToHost) ); unsigned int sum = *h_outLast + *h_inLast; free( h_outLast ); free( h_inLast ); return sum; } #endif // _PRESCAN_CU_
c548e36f54dee3288fab8c22ff8913db652619ba.cu
/* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs Code Name: Panda 0.1 File: PandaScan.cu Time: 2012-07-01 Developer: Hui Li ([email protected]) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #ifndef _PANDASCAN_CU_ #define _PANDASCAN_CU_ // includes, kernels #include "Panda.h" // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 16 banks on G80 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(int *s_data, const int *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(int* g_odata, const int* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(int* s_data, int *g_blockSums, int blockIndex) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; } // zero the last element in the scan so it will propagate back to the front s_data[index] = 0; } } __device__ unsigned int buildSum(int *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } return stride; } __device__ void scanRootToLeaves(int *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); int t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(int *data, int blockIndex, int *blockSums) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(int *g_odata, const int *g_idata, int *g_blockSums, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ int s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } __global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex) { __shared__ int uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; __syncthreads(); // note two adds per thread g_data[address] += uni; g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni; } inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; } inline int floorPow2(int n) { #ifdef WIN32 // method 2 return 1 << (int)logb((float)n); #else // method 1 // int nf = (int)n; // return 1 << (((*(int*)&nf) >> 23) - 127); int exp; frexp((double)n, &exp); return 1 << (exp - 1); #endif } #define BLOCK_SIZE 256 int** g_scanBlockSums; unsigned int g_numEltsAllocated = 0; unsigned int g_numLevelsAllocated = 0; void preallocBlockSums(unsigned int maxNumElements) { //assert(g_numEltsAllocated == 0); // shouldn't be called g_numEltsAllocated = maxNumElements; unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numElts = maxNumElements; int level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { level++; } numElts = numBlocks; } while (numElts > 1); g_scanBlockSums = (int**) malloc(level * sizeof(int*)); g_numLevelsAllocated = level; numElts = maxNumElements; level = 0; do { unsigned int numBlocks = max(1, (int)ceil((int)numElts / (2.f * blockSize))); if (numBlocks > 1) { (cudaMalloc((void**) &g_scanBlockSums[level++], numBlocks * sizeof(int))); } numElts = numBlocks; } while (numElts > 1); // CUT_CHECK_ERROR("preallocBlockSums"); } void deallocBlockSums() { for (int i = 0; i < g_numLevelsAllocated; i++) { cudaFree(g_scanBlockSums[i]); } // CUT_CHECK_ERROR("deallocBlockSums"); free((void**)g_scanBlockSums); g_scanBlockSums = 0; g_numEltsAllocated = 0; g_numLevelsAllocated = 0; } void saven_initialPrefixSum(unsigned int maxNumElements) { if(g_numEltsAllocated == 0) preallocBlockSums(maxNumElements); else if(g_numEltsAllocated>maxNumElements) { deallocBlockSums(); preallocBlockSums(maxNumElements); } } void prescanArrayRecursive(int *outArray, const int *inArray, int numElements, int level) { unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks unsigned int numBlocks = max(1, (int)ceil((int)numElements / (2.f * blockSize))); unsigned int numThreads; if (numBlocks > 1) numThreads = blockSize; else if (isPowerOfTwo(numElements)) numThreads = numElements / 2; else numThreads = floorPow2(numElements); unsigned int numEltsPerBlock = numThreads * 2; // if this is a non-power-of-2 array, the last block will be non-full // compute the smallest power of 2 able to compute its scan. unsigned int numEltsLastBlock = numElements - (numBlocks-1) * numEltsPerBlock; unsigned int numThreadsLastBlock = max(1, numEltsLastBlock / 2); unsigned int np2LastBlock = 0; unsigned int sharedMemLastBlock = 0; if (numEltsLastBlock != numEltsPerBlock) { np2LastBlock = 1; if(!isPowerOfTwo(numEltsLastBlock)) numThreadsLastBlock = floorPow2(numEltsLastBlock); unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS; sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace); } // padding space is used to avoid shared memory bank conflicts unsigned int extraSpace = numEltsPerBlock / NUM_BANKS; unsigned int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace); #ifdef DEBUG if (numBlocks > 1) { assert(g_numEltsAllocated >= numElements); } #endif // setup execution parameters // if NP2, we process the last block separately dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1); dim3 threads(numThreads, 1, 1); // make sure there are no CUDA errors before we start // CUT_CHECK_ERROR("prescanArrayRecursive before kernels"); // execute the scan if (numBlocks > 1) { prescan<true, false><<< grid, threads, sharedMemSize >>>(outArray, inArray, g_scanBlockSums[level], numThreads * 2, 0, 0); // CUT_CHECK_ERROR("prescanWithBlockSums"); if (np2LastBlock) { prescan<true, true><<< 1, numThreadsLastBlock, sharedMemLastBlock >>> (outArray, inArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); // CUT_CHECK_ERROR("prescanNP2WithBlockSums"); } // After scanning all the sub-blocks, we are mostly done. But now we // need to take all of the last values of the sub-blocks and scan those. // This will give us a new value that must be sdded to each block to // get the final results. // recursive (CPU) call prescanArrayRecursive(g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1); uniformAdd<<< grid, threads >>>(outArray, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0); // CUT_CHECK_ERROR("uniformAdd"); if (np2LastBlock) { uniformAdd<<< 1, numThreadsLastBlock >>>(outArray, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock); // CUT_CHECK_ERROR("uniformAdd"); } } else if (isPowerOfTwo(numElements)) { prescan<false, false><<< grid, threads, sharedMemSize >>>(outArray, inArray, 0, numThreads * 2, 0, 0); // CUT_CHECK_ERROR("prescan"); } else { prescan<false, true><<< grid, threads, sharedMemSize >>>(outArray, inArray, 0, numElements, 0, 0); // CUT_CHECK_ERROR("prescanNP2"); } } void prescanArray(int *outArray, int *inArray, int numElements) { prescanArrayRecursive(outArray, inArray, numElements, 0); } int prefexSum( int* d_inArr, int* d_outArr, int numRecords ) { preallocBlockSums(numRecords); prescanArray( d_outArr, d_inArr, numRecords ); deallocBlockSums(); int* h_outLast = ( int* )malloc( sizeof( int ) ); ( cudaMemcpy( h_outLast, d_outArr+numRecords-1, sizeof(int), cudaMemcpyDeviceToHost) ); int* h_inLast = ( int* )malloc( sizeof( int ) ); ( cudaMemcpy( h_inLast, d_inArr+numRecords-1, sizeof(int), cudaMemcpyDeviceToHost) ); unsigned int sum = *h_outLast + *h_inLast; free( h_outLast ); free( h_inLast ); return sum; } #endif // _PRESCAN_CU_
780b3e2a7166e9ab99f68649028a7b917d511f69.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gradient2d-256-10-128_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 238; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 242; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
780b3e2a7166e9ab99f68649028a7b917d511f69.cu
#include "gradient2d-256-10-128_kernel.hu" __device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; } __global__ void kernel0_10(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 236; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; float __reg_9_0; float __reg_9_1; float __reg_9_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC9(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_9_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_9_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_9_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_9_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_9_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_9_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_9_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_9_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_9_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_9_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(1, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(2, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(3, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(4, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(5, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(6, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(7, __reg_9_0, __reg_9_1, __reg_9_2); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(8, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(9, __reg_9_2, __reg_9_0, __reg_9_1); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 19); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 20); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(10, __reg_9_0, __reg_9_1, __reg_9_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_0_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_0_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 9, __reg_9_2, __reg_9_0, __reg_9_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 8, __reg_9_0, __reg_9_1, __reg_9_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 7, __reg_9_1, __reg_9_2, __reg_9_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 6, __reg_9_2, __reg_9_0, __reg_9_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 5, __reg_9_0, __reg_9_1, __reg_9_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 4, __reg_9_1, __reg_9_2, __reg_9_0); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 3, __reg_9_2, __reg_9_0, __reg_9_1); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 2, __reg_9_0, __reg_9_1, __reg_9_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_0_1); __STORE(__h - 1, __reg_9_1, __reg_9_2, __reg_9_0); __STORE(__h + 0, __reg_9_2, __reg_9_0, __reg_0_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __CALC9(__reg_9_0, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h - 10, __reg_9_1, __reg_9_2, __reg_9_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __CALC9(__reg_9_1, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 10, __reg_9_2, __reg_9_0, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __CALC9(__reg_9_2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 10, __reg_9_0, __reg_9_1, __reg_9_2); __h++; } } __global__ void kernel0_9(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 238; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; float __reg_8_0; float __reg_8_1; float __reg_8_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC8(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_8_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_8_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_8_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_8_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_8_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_8_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_8_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_8_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_8_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(1, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(2, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(3, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(4, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(5, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(6, __reg_8_2, __reg_8_0, __reg_8_1); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(7, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(8, __reg_8_1, __reg_8_2, __reg_8_0); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 17); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 18); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(9, __reg_8_2, __reg_8_0, __reg_8_1); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_0_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_0_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 8, __reg_8_1, __reg_8_2, __reg_8_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 7, __reg_8_2, __reg_8_0, __reg_8_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 6, __reg_8_0, __reg_8_1, __reg_8_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 5, __reg_8_1, __reg_8_2, __reg_8_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 4, __reg_8_2, __reg_8_0, __reg_8_1); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 3, __reg_8_0, __reg_8_1, __reg_8_2); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 2, __reg_8_1, __reg_8_2, __reg_8_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_0_2); __STORE(__h - 1, __reg_8_2, __reg_8_0, __reg_8_1); __STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_0_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __CALC8(__reg_8_2, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 9, __reg_8_0, __reg_8_1, __reg_8_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __CALC8(__reg_8_0, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __CALC8(__reg_8_1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h - 9, __reg_8_2, __reg_8_0, __reg_8_1); __h++; } } __global__ void kernel0_8(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 240; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; float __reg_7_0; float __reg_7_1; float __reg_7_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC7(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_7_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_7_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_7_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_7_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_7_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_7_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_7_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_7_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(1, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(2, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(3, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(4, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(5, __reg_7_1, __reg_7_2, __reg_7_0); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(6, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(7, __reg_7_0, __reg_7_1, __reg_7_2); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 15); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 16); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(8, __reg_7_1, __reg_7_2, __reg_7_0); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_0_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_0_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 7, __reg_7_0, __reg_7_1, __reg_7_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 6, __reg_7_1, __reg_7_2, __reg_7_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 5, __reg_7_2, __reg_7_0, __reg_7_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 4, __reg_7_0, __reg_7_1, __reg_7_2); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 3, __reg_7_1, __reg_7_2, __reg_7_0); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 2, __reg_7_2, __reg_7_0, __reg_7_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_0_0); __STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2); __STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_0_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC7(__reg_7_1, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 8, __reg_7_2, __reg_7_0, __reg_7_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __CALC7(__reg_7_2, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h - 8, __reg_7_0, __reg_7_1, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __CALC7(__reg_7_0, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 8, __reg_7_1, __reg_7_2, __reg_7_0); __h++; } } __global__ void kernel0_7(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 242; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; float __reg_6_0; float __reg_6_1; float __reg_6_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC6(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_6_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_6_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_6_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_6_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_6_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_6_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_6_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(1, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(2, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(3, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(4, __reg_6_0, __reg_6_1, __reg_6_2); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(5, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(6, __reg_6_2, __reg_6_0, __reg_6_1); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 13); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 14); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(7, __reg_6_0, __reg_6_1, __reg_6_2); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_0_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_0_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 6, __reg_6_2, __reg_6_0, __reg_6_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 5, __reg_6_0, __reg_6_1, __reg_6_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 4, __reg_6_1, __reg_6_2, __reg_6_0); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 3, __reg_6_2, __reg_6_0, __reg_6_1); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_0_1); __STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_0); __STORE(__h + 0, __reg_6_2, __reg_6_0, __reg_0_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __CALC6(__reg_6_0, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h - 7, __reg_6_1, __reg_6_2, __reg_6_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __CALC6(__reg_6_1, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 7, __reg_6_2, __reg_6_0, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __CALC6(__reg_6_2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2); __h++; } } __global__ void kernel0_6(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 244; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; float __reg_5_0; float __reg_5_1; float __reg_5_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC5(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_5_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_5_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_5_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_5_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_5_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_5_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(1, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(2, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(3, __reg_5_2, __reg_5_0, __reg_5_1); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(4, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(5, __reg_5_1, __reg_5_2, __reg_5_0); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 11); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 12); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(6, __reg_5_2, __reg_5_0, __reg_5_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_0_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_0_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 5, __reg_5_1, __reg_5_2, __reg_5_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 4, __reg_5_2, __reg_5_0, __reg_5_1); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_0_2); __STORE(__h - 1, __reg_5_2, __reg_5_0, __reg_5_1); __STORE(__h + 0, __reg_5_0, __reg_5_1, __reg_0_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __CALC5(__reg_5_2, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 6, __reg_5_0, __reg_5_1, __reg_5_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __CALC5(__reg_5_0, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 6, __reg_5_1, __reg_5_2, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __CALC5(__reg_5_1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h - 6, __reg_5_2, __reg_5_0, __reg_5_1); __h++; } } __global__ void kernel0_5(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 246; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; float __reg_4_0; float __reg_4_1; float __reg_4_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC4(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_4_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_4_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_4_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_4_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_4_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(1, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(2, __reg_4_1, __reg_4_2, __reg_4_0); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(3, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(4, __reg_4_0, __reg_4_1, __reg_4_2); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 9); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 10); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(5, __reg_4_1, __reg_4_2, __reg_4_0); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_0_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_0_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_0); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 2, __reg_4_2, __reg_4_0, __reg_4_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_0_0); __STORE(__h - 1, __reg_4_0, __reg_4_1, __reg_4_2); __STORE(__h + 0, __reg_4_1, __reg_4_2, __reg_0_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC4(__reg_4_1, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 5, __reg_4_2, __reg_4_0, __reg_4_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __CALC4(__reg_4_2, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h - 5, __reg_4_0, __reg_4_1, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __CALC4(__reg_4_0, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 5, __reg_4_1, __reg_4_2, __reg_4_0); __h++; } } __global__ void kernel0_4(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 248; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; float __reg_3_0; float __reg_3_1; float __reg_3_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC3(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_3_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_3_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_3_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_3_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(1, __reg_3_0, __reg_3_1, __reg_3_2); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(2, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(3, __reg_3_2, __reg_3_0, __reg_3_1); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 7); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 8); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(4, __reg_3_0, __reg_3_1, __reg_3_2); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_0_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __LOAD(__reg_0_1, __h + 1); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 3, __reg_3_2, __reg_3_0, __reg_3_1); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 2, __reg_3_0, __reg_3_1, __reg_3_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_0_1); __STORE(__h - 1, __reg_3_1, __reg_3_2, __reg_3_0); __STORE(__h + 0, __reg_3_2, __reg_3_0, __reg_0_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __CALC3(__reg_3_0, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __CALC3(__reg_3_1, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 4, __reg_3_2, __reg_3_0, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __CALC3(__reg_3_2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 4, __reg_3_0, __reg_3_1, __reg_3_2); __h++; } } __global__ void kernel0_3(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 250; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; float __reg_2_0; float __reg_2_1; float __reg_2_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __CALC2(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_2_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_2_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(1, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_1, __reg_2_2, __reg_2_0); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __LOAD(__reg_0_2, 5); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, 6); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_2, __reg_2_0, __reg_2_1); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_0_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_0_0); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_0_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_0_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_1, __h + 0); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, __h + 1); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 2, __reg_2_1, __reg_2_2, __reg_2_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 1, __reg_2_2, __reg_2_0, __reg_2_1); __STORE(__h + 0, __reg_2_0, __reg_2_1, __reg_0_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_1, __reg_1_2, __reg_1_0); __STORE(__h - 3, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_0, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 3, __reg_2_1, __reg_2_2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __CALC2(__reg_2_1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_2_2, __reg_2_0, __reg_2_1); __h++; } } __global__ void kernel0_2(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 252; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; float __reg_1_0; float __reg_1_1; float __reg_1_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2); else out = reg1; } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_1_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(1, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_0, 3); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, 4); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_1, __reg_1_2, __reg_1_0); __DB_SWITCH(); __syncthreads(); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_0_1); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_0_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_2, __h + 0); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 1, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h + 0, __reg_1_1, __reg_1_2, __reg_0_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_1, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 2, __reg_1_2, __reg_1_0, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_2, __reg_0_1, __reg_0_2, __reg_0_0); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_0, __reg_0_2, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_1, __reg_1_2, __reg_1_0); __h++; } } __global__ void kernel0_1(float *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 254; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; float __reg_0_0; float __reg_0_1; float __reg_0_2; __shared__ float __b_sb_double[__blockSize * 2]; float *__b_sb = __b_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR(__rn0, __a, __b, __c) do { __rn0 = ((__REGREF(__b, 0)) + (1.0f / sqrt((((0.0001f + (((__REGREF(__b, 0)) - (__REGREF(__a, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__a, 0))))) + (((__REGREF(__b, 0)) - (__REGREF(__c, 0))) * ((__REGREF(__b, 0)) - (__REGREF(__c, 0))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, 1))))) + (((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))) * ((__REGREF(__b, 0)) - (__SBREF(__b_sb, -1))))))); } while (0) #define __DB_SWITCH() do { __b_sb = &__b_sb_double[(__b_sb == __b_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c) do { __DB_SWITCH(); __b_sb[__tid] = b; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2) do { __CALCSETUP(reg0, reg1, reg2); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __STORE(1, __reg_0_0, __reg_0_1, __reg_0_2); } __b_sb = __b_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h + 0, __reg_0_2, __reg_0_0, __reg_0_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 1, __reg_0_1, __reg_0_2, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 1, __reg_0_2, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 1, __reg_0_0, __reg_0_1, __reg_0_2); __h++; } }
4a52d7363b6079998d8d1ec2a560ef2b67ccb8a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <include/cuda_runtime.h> #include "backend/kernel_compiler/gpu/cuda_impl/select_impl.cuh" template <typename T> __global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { output[pos] = cond[pos] ? input_x[pos] : input_y[pos]; } return; } template <typename T> void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, hipStream_t cuda_stream) { hipLaunchKernelGGL(( Select), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, cond, input_x, input_y, output); return; } template void CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y, float* output, hipStream_t cuda_stream); template void CalSelect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output, hipStream_t cuda_stream); template void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y, half* output, hipStream_t cuda_stream); template void CalSelect<int64_t>(const size_t size, const bool* cond, const int64_t* input_X, const int64_t* input_y, int64_t* output, hipStream_t cuda_stream);
4a52d7363b6079998d8d1ec2a560ef2b67ccb8a3.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdio.h> #include <stdint.h> #include <include/cuda_runtime.h> #include "backend/kernel_compiler/gpu/cuda_impl/select_impl.cuh" template <typename T> __global__ void Select(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) { output[pos] = cond[pos] ? input_x[pos] : input_y[pos]; } return; } template <typename T> void CalSelect(const size_t size, const bool* cond, const T* input_x, const T* input_y, T* output, cudaStream_t cuda_stream) { Select<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, cond, input_x, input_y, output); return; } template void CalSelect<float>(const size_t size, const bool* cond, const float* input_X, const float* input_y, float* output, cudaStream_t cuda_stream); template void CalSelect<int>(const size_t size, const bool* cond, const int* input_X, const int* input_y, int* output, cudaStream_t cuda_stream); template void CalSelect<half>(const size_t size, const bool* cond, const half* input_X, const half* input_y, half* output, cudaStream_t cuda_stream); template void CalSelect<int64_t>(const size_t size, const bool* cond, const int64_t* input_X, const int64_t* input_y, int64_t* output, cudaStream_t cuda_stream);
fa41302315b5c12a5185f708766456397dcb8cef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" #include <iostream> //#include <boost/graph/buffer_concepts.hpp> #include "templated_extract.cuh" namespace pcl { namespace device { namespace kinfuLS { struct SlicePointsExtractor { enum { MAX_LOCAL_POINTS = 1, MIN_X_MARGIN = 0, MIN_Y_MARGIN = 0, MIN_Z_MARGIN = 0, }; // returns the number of points extracted __device__ __forceinline__ int filter(const FullScan6& parent, const pcl::gpu::kinfuLS::tsdf_buffer& buffer, int x, int y, int z) { int W; float F = parent.fetch_with_rolling_buffer (x, y, z, W); bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ; int local_count = 0; if (in_black_zone) { int W; float F = parent.fetch_with_rolling_buffer (x, y, z, W); if (W != 0 && F != 1.f && F < 0.98 /*&& F != 0.0f && F > -1.0f*/) { float4 p; p.x = x; p.y = y; p.z = z; p.w = F; points[local_count++] = p; } } return local_count; } __device__ __forceinline__ bool isFull(const FullScan6& parent, unsigned int i) { return (i >= parent.output_xyz.size); } __device__ void store(const FullScan6& parent, int offset_storage, int l) { float x = points[l].x; float y = points[l].y; float z = points[l].z; float i = points[l].w; parent.store_point_intensity (x, y, z, i, parent.output_xyz.data, parent.output_intensity.data, offset_storage); } int knowledge_limit; bool edges_only; int3 minBounds,maxBounds; float4 points[MAX_LOCAL_POINTS]; }; struct IncompletePointsExtractor { enum { MAX_LOCAL_POINTS = 1, MIN_X_MARGIN = 1, MIN_Y_MARGIN = 1, MIN_Z_MARGIN = 1, }; __device__ IncompletePointsExtractor(int kl,bool eo): knowledge_limit(kl), edges_only(eo) {} // returns the number of points extracted __device__ __forceinline__ int filter(const FullScan6& parent, const pcl::gpu::kinfuLS::tsdf_buffer& buffer, int x, int y, int z) { int W; float F = parent.fetch_with_rolling_buffer (x, y, z, W); if (W >= knowledge_limit && F > 0.0) { bool found_unk = false; bool found_occ = false; float3 unk_tot = make_float3(0.0,0.0,0.0); #pragma unroll for (int i = 0; i < TOTAL_BEARINGS18; i++) { int i26 = getBearingIdById18(i); int3 b = getBearingById26(i26); const float nm = getNormOfBearing(b); int Wv; float Fv = parent.fetch_with_rolling_buffer (x + b.x,y + b.y,z + b.z,Wv); float weight = max(knowledge_limit - Wv,-knowledge_limit); if (Wv >= knowledge_limit && Fv < 0.0) weight = knowledge_limit; unk_tot = make_float3(unk_tot.x + weight * (float(b.x) / nm), unk_tot.y + weight * (float(b.y) / nm), unk_tot.z + weight * (float(b.z) / nm)); if (Wv < knowledge_limit) { if (isBearingId6(i26)) // restrict to 6-neighborhood for unknown found_unk = true; } if (Wv >= knowledge_limit) if (Fv < 0.0) found_occ = true; } if ((found_occ == edges_only) && found_unk) { #pragma unroll for (int i = 0; i < TOTAL_BEARINGS26; i++) if (!isBearingId18(i)) { int3 b = getBearingById26(i); const float nm = getNormOfBearing(b); int Wv; const float Fv = parent.fetch_with_rolling_buffer (x + b.x,y + b.y,z + b.z,Wv); float weight = max(knowledge_limit - Wv,-knowledge_limit); if (Wv >= knowledge_limit && Fv < 0.0) weight = knowledge_limit; unk_tot = make_float3(unk_tot.x + weight * (float(b.x) / nm), unk_tot.y + weight * (float(b.y) / nm), unk_tot.z + weight * (float(b.z) / nm)); } float3 p; p.x = x; p.y = y; p.z = z; float3 n; n.x = -unk_tot.x; n.y = -unk_tot.y; n.z = -unk_tot.z; points[0] = p; normals[0] = normalized(n); return 1; } } return 0; } enum { TOTAL_BEARINGS26 = 26 }; __device__ __forceinline__ int3 getBearingById26(int id) { int3 result; int act_id = id < 13 ? id : (id + 1); // avoid (0,0,0) result.x = act_id % 3 - 1; result.y = (act_id / 3) % 3 - 1; result.z = (act_id / 9) % 3 - 1; return result; } __device__ __forceinline__ float getNormOfBearing(int3 b) { int sn = abs(b.x) + abs(b.y) + abs(b.z); if (sn == 1) return 1.0; if (sn == 2) return 1.414; if (sn == 3) return 1.732; return 1.0; } enum { TOTAL_BEARINGS18 = 18 }; __device__ __forceinline__ int getBearingIdById18(int id) { const int ids[TOTAL_BEARINGS18] = {1,3,4,5,7,9,10,11,12,13,14,15,16,18,20,21,22,24}; return ids[id]; } __device__ __forceinline__ bool isBearingId18(int id) { const int ids[TOTAL_BEARINGS18] = {1,3,4,5,7,9,10,11,12,13,14,15,16,18,20,21,22,24}; #pragma unroll for (int i = 0; i < TOTAL_BEARINGS18; i++) if (ids[i] == id) return true; return false; } enum { TOTAL_BEARINGS6 = 6 }; __device__ __forceinline__ int getBearingIdById6(int id) { const int ids[TOTAL_BEARINGS6] = {4,21,12,13,10,15}; return ids[id]; } __device__ __forceinline__ bool isBearingId6(int id) { const int ids[TOTAL_BEARINGS6] = {4,21,12,13,10,15}; #pragma unroll for (int i = 0; i < TOTAL_BEARINGS6; i++) if (ids[i] == id) return true; return false; } __device__ __forceinline__ bool isFull(const FullScan6& parent, unsigned int i) { return (i >= parent.output_xyz.size); } __device__ void store(const FullScan6& parent, int offset_storage, int l) { float x = points[l].x; float y = points[l].y; float z = points[l].z; float nx = normals[l].x; float ny = normals[l].y; float nz = normals[l].z; parent.store_point_normals (x, y, z, nx, ny, nz, parent.output_xyz.data, parent.output_normals.data, offset_storage); } int knowledge_limit; bool edges_only; float3 points[MAX_LOCAL_POINTS]; float3 normals[MAX_LOCAL_POINTS]; private: IncompletePointsExtractor() {} }; __global__ void extractSliceKernel (const FullScan6 fs, int3 minBounds, int3 maxBounds) { SlicePointsExtractor extractor; extractor.maxBounds = maxBounds; extractor.minBounds = minBounds; fs.templatedExtract (extractor); } __global__ void extractIncompletePointsKernel (const FullScan6 fs,bool edges_only) { IncompletePointsExtractor extractor(5,edges_only); fs.templatedExtract (extractor); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, const int shiftX, const int shiftY, const int shiftZ, PtrSz<PointType> output_xyz, PtrSz<float> output_intensities, PtrStep<int> last_data_transfer_matrix, int & data_transfer_finished) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / buffer->voxels_size.x; fs.cell_size.y = volume_size.y / buffer->voxels_size.y; fs.cell_size.z = volume_size.z / buffer->voxels_size.z; fs.output_xyz = output_xyz; fs.output_intensity = output_intensities; fs.data_transfer_completion_matrix = last_data_transfer_matrix; fs.rolling_buffer = *buffer; dim3 block (FullScan6::CTA_SIZE_X, FullScan6::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); //Compute slice bounds int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int newZ = buffer->origin_GRID.z + shiftZ; int3 minBounds, maxBounds; //X if (newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if (minBounds.x > maxBounds.x) std::swap (minBounds.x, maxBounds.x); //Y if (newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap (minBounds.y, maxBounds.y); //Z if (newZ >= 0) { minBounds.z = buffer->origin_GRID.z; maxBounds.z = newZ; } else { minBounds.z = newZ + buffer->voxels_size.z; maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z; } if (minBounds.z > maxBounds.z) std::swap(minBounds.z, maxBounds.z); if (minBounds.x >= 0 && minBounds.x < buffer->origin_GRID.x) { minBounds.x += buffer->voxels_size.x - buffer->origin_GRID.x; maxBounds.x += buffer->voxels_size.x - buffer->origin_GRID.x; } else { minBounds.x -= buffer->origin_GRID.x; maxBounds.x -= buffer->origin_GRID.x; } if (minBounds.y >= 0 && minBounds.y < buffer->origin_GRID.y) { minBounds.y += buffer->voxels_size.y - buffer->origin_GRID.y; maxBounds.y += buffer->voxels_size.y - buffer->origin_GRID.y; } else { minBounds.y -= buffer->origin_GRID.y; maxBounds.y -= buffer->origin_GRID.y; } if (minBounds.z >= 0 && minBounds.z < buffer->origin_GRID.z) { minBounds.z += buffer->voxels_size.z - buffer->origin_GRID.z; maxBounds.z += buffer->voxels_size.z - buffer->origin_GRID.z; } else { minBounds.z -= buffer->origin_GRID.z; maxBounds.z -= buffer->origin_GRID.z; } fs.init_globals(); // Extraction call hipLaunchKernelGGL(( extractSliceKernel), dim3(grid), dim3(block), 0, 0, fs, minBounds, maxBounds); cudaSafeCall ( hipGetLastError () ); cudaSafeCall ( hipDeviceSynchronize () ); int size = fs.get_result_size(data_transfer_finished); return min ((int)size, int(output_xyz.size)); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void getDataTransferCompletionMatrixSize(size_t & height, size_t & width) { dim3 block (FullScan6::CTA_SIZE_X, FullScan6::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); width = grid.x; height = grid.y; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t extractIncompletePointsAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,const bool edges_only, PtrSz<PointType> output_xyz, PtrSz<float4> output_normals, PtrStep<int> last_data_transfer_matrix, int & data_transfer_finished) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / buffer->voxels_size.x; fs.cell_size.y = volume_size.y / buffer->voxels_size.y; fs.cell_size.z = volume_size.z / buffer->voxels_size.z; fs.output_xyz = output_xyz; fs.output_normals = output_normals; fs.data_transfer_completion_matrix = last_data_transfer_matrix; fs.rolling_buffer = *buffer; dim3 block (FullScan6::CTA_SIZE_X, FullScan6::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); fs.init_globals(); // Extraction call hipLaunchKernelGGL(( extractIncompletePointsKernel), dim3(grid), dim3(block), 0, 0, fs, edges_only); cudaSafeCall ( hipGetLastError () ); cudaSafeCall ( hipDeviceSynchronize () ); int size = fs.get_result_size(data_transfer_finished); return min (size, int(output_xyz.size)); } } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { namespace kinfuLS { template<typename NormalType> struct ExtractNormals { float3 cell_size; PtrStep<short2> volume; PtrSz<PointType> points; mutable NormalType* output; __device__ __forceinline__ float readTsdf (int x, int y, int z) const { return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]); } __device__ __forceinline__ float3 fetchPoint (int idx) const { PointType p = points.data[idx]; return make_float3 (p.x, p.y, p.z); } __device__ __forceinline__ void storeNormal (int idx, float3 normal) const { NormalType n; n.x = normal.x; n.y = normal.y; n.z = normal.z; output[idx] = n; } __device__ __forceinline__ int3 getVoxel (const float3& point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ void operator () () const { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= points.size) return; const float qnan = numeric_limits<float>::quiet_NaN (); float3 n = make_float3 (qnan, qnan, qnan); float3 point = fetchPoint (idx); int3 g = getVoxel (point); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2) { float3 t; t = point; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t); t = point; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t); n.x = (Fx1 - Fx2); t = point; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t); t = point; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t); n.y = (Fy1 - Fy2); t = point; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t); t = point; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t); n.z = (Fz1 - Fz2); n = normalized (n); } storeNormal (idx, n); } __device__ __forceinline__ float interpolateTrilineary (const float3& point) const { int3 g = getVoxel (point); /* //OLD CODE float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; if (point.x < vx) g.x--; if (point.y < vy) g.y--; if (point.z < vz) g.z--; //float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; //float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; //float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float a = point.x/ cell_size.x - (g.x + 0.5f); float b = point.y/ cell_size.y - (g.y + 0.5f); float c = point.z/ cell_size.z - (g.z + 0.5f); */ //NEW CODE float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; }; float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; }; float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; }; float res = (1 - a) * ( (1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1) * c ) ) + a * ( (1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1) * c ) ); return res; } }; template<typename NormalType> __global__ void extractNormalsKernel (const ExtractNormals<NormalType> en) { en (); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template<typename NormalType> void extractNormals (const PtrStep<short2>& volume, const float3& volume_size, const PtrSz<PointType>& points, NormalType* output) { ExtractNormals<NormalType> en; en.volume = volume; en.cell_size.x = volume_size.x / VOLUME_X; en.cell_size.y = volume_size.y / VOLUME_Y; en.cell_size.z = volume_size.z / VOLUME_Z; en.points = points; en.output = output; dim3 block (256); dim3 grid (divUp (points.size, block.x)); hipLaunchKernelGGL(( extractNormalsKernel), dim3(grid), dim3(block), 0, 0, en); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } template void extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output); template void extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output); } } }
fa41302315b5c12a5185f708766456397dcb8cef.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" #include <iostream> //#include <boost/graph/buffer_concepts.hpp> #include "templated_extract.cuh" namespace pcl { namespace device { namespace kinfuLS { struct SlicePointsExtractor { enum { MAX_LOCAL_POINTS = 1, MIN_X_MARGIN = 0, MIN_Y_MARGIN = 0, MIN_Z_MARGIN = 0, }; // returns the number of points extracted __device__ __forceinline__ int filter(const FullScan6& parent, const pcl::gpu::kinfuLS::tsdf_buffer& buffer, int x, int y, int z) { int W; float F = parent.fetch_with_rolling_buffer (x, y, z, W); bool in_black_zone = ( (x >= minBounds.x && x <= maxBounds.x) || (y >= minBounds.y && y <= maxBounds.y) || ( z >= minBounds.z && z <= maxBounds.z) ) ; int local_count = 0; if (in_black_zone) { int W; float F = parent.fetch_with_rolling_buffer (x, y, z, W); if (W != 0 && F != 1.f && F < 0.98 /*&& F != 0.0f && F > -1.0f*/) { float4 p; p.x = x; p.y = y; p.z = z; p.w = F; points[local_count++] = p; } } return local_count; } __device__ __forceinline__ bool isFull(const FullScan6& parent, unsigned int i) { return (i >= parent.output_xyz.size); } __device__ void store(const FullScan6& parent, int offset_storage, int l) { float x = points[l].x; float y = points[l].y; float z = points[l].z; float i = points[l].w; parent.store_point_intensity (x, y, z, i, parent.output_xyz.data, parent.output_intensity.data, offset_storage); } int knowledge_limit; bool edges_only; int3 minBounds,maxBounds; float4 points[MAX_LOCAL_POINTS]; }; struct IncompletePointsExtractor { enum { MAX_LOCAL_POINTS = 1, MIN_X_MARGIN = 1, MIN_Y_MARGIN = 1, MIN_Z_MARGIN = 1, }; __device__ IncompletePointsExtractor(int kl,bool eo): knowledge_limit(kl), edges_only(eo) {} // returns the number of points extracted __device__ __forceinline__ int filter(const FullScan6& parent, const pcl::gpu::kinfuLS::tsdf_buffer& buffer, int x, int y, int z) { int W; float F = parent.fetch_with_rolling_buffer (x, y, z, W); if (W >= knowledge_limit && F > 0.0) { bool found_unk = false; bool found_occ = false; float3 unk_tot = make_float3(0.0,0.0,0.0); #pragma unroll for (int i = 0; i < TOTAL_BEARINGS18; i++) { int i26 = getBearingIdById18(i); int3 b = getBearingById26(i26); const float nm = getNormOfBearing(b); int Wv; float Fv = parent.fetch_with_rolling_buffer (x + b.x,y + b.y,z + b.z,Wv); float weight = max(knowledge_limit - Wv,-knowledge_limit); if (Wv >= knowledge_limit && Fv < 0.0) weight = knowledge_limit; unk_tot = make_float3(unk_tot.x + weight * (float(b.x) / nm), unk_tot.y + weight * (float(b.y) / nm), unk_tot.z + weight * (float(b.z) / nm)); if (Wv < knowledge_limit) { if (isBearingId6(i26)) // restrict to 6-neighborhood for unknown found_unk = true; } if (Wv >= knowledge_limit) if (Fv < 0.0) found_occ = true; } if ((found_occ == edges_only) && found_unk) { #pragma unroll for (int i = 0; i < TOTAL_BEARINGS26; i++) if (!isBearingId18(i)) { int3 b = getBearingById26(i); const float nm = getNormOfBearing(b); int Wv; const float Fv = parent.fetch_with_rolling_buffer (x + b.x,y + b.y,z + b.z,Wv); float weight = max(knowledge_limit - Wv,-knowledge_limit); if (Wv >= knowledge_limit && Fv < 0.0) weight = knowledge_limit; unk_tot = make_float3(unk_tot.x + weight * (float(b.x) / nm), unk_tot.y + weight * (float(b.y) / nm), unk_tot.z + weight * (float(b.z) / nm)); } float3 p; p.x = x; p.y = y; p.z = z; float3 n; n.x = -unk_tot.x; n.y = -unk_tot.y; n.z = -unk_tot.z; points[0] = p; normals[0] = normalized(n); return 1; } } return 0; } enum { TOTAL_BEARINGS26 = 26 }; __device__ __forceinline__ int3 getBearingById26(int id) { int3 result; int act_id = id < 13 ? id : (id + 1); // avoid (0,0,0) result.x = act_id % 3 - 1; result.y = (act_id / 3) % 3 - 1; result.z = (act_id / 9) % 3 - 1; return result; } __device__ __forceinline__ float getNormOfBearing(int3 b) { int sn = abs(b.x) + abs(b.y) + abs(b.z); if (sn == 1) return 1.0; if (sn == 2) return 1.414; if (sn == 3) return 1.732; return 1.0; } enum { TOTAL_BEARINGS18 = 18 }; __device__ __forceinline__ int getBearingIdById18(int id) { const int ids[TOTAL_BEARINGS18] = {1,3,4,5,7,9,10,11,12,13,14,15,16,18,20,21,22,24}; return ids[id]; } __device__ __forceinline__ bool isBearingId18(int id) { const int ids[TOTAL_BEARINGS18] = {1,3,4,5,7,9,10,11,12,13,14,15,16,18,20,21,22,24}; #pragma unroll for (int i = 0; i < TOTAL_BEARINGS18; i++) if (ids[i] == id) return true; return false; } enum { TOTAL_BEARINGS6 = 6 }; __device__ __forceinline__ int getBearingIdById6(int id) { const int ids[TOTAL_BEARINGS6] = {4,21,12,13,10,15}; return ids[id]; } __device__ __forceinline__ bool isBearingId6(int id) { const int ids[TOTAL_BEARINGS6] = {4,21,12,13,10,15}; #pragma unroll for (int i = 0; i < TOTAL_BEARINGS6; i++) if (ids[i] == id) return true; return false; } __device__ __forceinline__ bool isFull(const FullScan6& parent, unsigned int i) { return (i >= parent.output_xyz.size); } __device__ void store(const FullScan6& parent, int offset_storage, int l) { float x = points[l].x; float y = points[l].y; float z = points[l].z; float nx = normals[l].x; float ny = normals[l].y; float nz = normals[l].z; parent.store_point_normals (x, y, z, nx, ny, nz, parent.output_xyz.data, parent.output_normals.data, offset_storage); } int knowledge_limit; bool edges_only; float3 points[MAX_LOCAL_POINTS]; float3 normals[MAX_LOCAL_POINTS]; private: IncompletePointsExtractor() {} }; __global__ void extractSliceKernel (const FullScan6 fs, int3 minBounds, int3 maxBounds) { SlicePointsExtractor extractor; extractor.maxBounds = maxBounds; extractor.minBounds = minBounds; fs.templatedExtract (extractor); } __global__ void extractIncompletePointsKernel (const FullScan6 fs,bool edges_only) { IncompletePointsExtractor extractor(5,edges_only); fs.templatedExtract (extractor); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t extractSliceAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, const int shiftX, const int shiftY, const int shiftZ, PtrSz<PointType> output_xyz, PtrSz<float> output_intensities, PtrStep<int> last_data_transfer_matrix, int & data_transfer_finished) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / buffer->voxels_size.x; fs.cell_size.y = volume_size.y / buffer->voxels_size.y; fs.cell_size.z = volume_size.z / buffer->voxels_size.z; fs.output_xyz = output_xyz; fs.output_intensity = output_intensities; fs.data_transfer_completion_matrix = last_data_transfer_matrix; fs.rolling_buffer = *buffer; dim3 block (FullScan6::CTA_SIZE_X, FullScan6::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); //Compute slice bounds int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int newZ = buffer->origin_GRID.z + shiftZ; int3 minBounds, maxBounds; //X if (newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if (minBounds.x > maxBounds.x) std::swap (minBounds.x, maxBounds.x); //Y if (newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap (minBounds.y, maxBounds.y); //Z if (newZ >= 0) { minBounds.z = buffer->origin_GRID.z; maxBounds.z = newZ; } else { minBounds.z = newZ + buffer->voxels_size.z; maxBounds.z = buffer->origin_GRID.z + buffer->voxels_size.z; } if (minBounds.z > maxBounds.z) std::swap(minBounds.z, maxBounds.z); if (minBounds.x >= 0 && minBounds.x < buffer->origin_GRID.x) { minBounds.x += buffer->voxels_size.x - buffer->origin_GRID.x; maxBounds.x += buffer->voxels_size.x - buffer->origin_GRID.x; } else { minBounds.x -= buffer->origin_GRID.x; maxBounds.x -= buffer->origin_GRID.x; } if (minBounds.y >= 0 && minBounds.y < buffer->origin_GRID.y) { minBounds.y += buffer->voxels_size.y - buffer->origin_GRID.y; maxBounds.y += buffer->voxels_size.y - buffer->origin_GRID.y; } else { minBounds.y -= buffer->origin_GRID.y; maxBounds.y -= buffer->origin_GRID.y; } if (minBounds.z >= 0 && minBounds.z < buffer->origin_GRID.z) { minBounds.z += buffer->voxels_size.z - buffer->origin_GRID.z; maxBounds.z += buffer->voxels_size.z - buffer->origin_GRID.z; } else { minBounds.z -= buffer->origin_GRID.z; maxBounds.z -= buffer->origin_GRID.z; } fs.init_globals(); // Extraction call extractSliceKernel<<<grid, block>>>(fs, minBounds, maxBounds); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall ( cudaDeviceSynchronize () ); int size = fs.get_result_size(data_transfer_finished); return min ((int)size, int(output_xyz.size)); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void getDataTransferCompletionMatrixSize(size_t & height, size_t & width) { dim3 block (FullScan6::CTA_SIZE_X, FullScan6::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); width = grid.x; height = grid.y; } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// size_t extractIncompletePointsAsCloud (const PtrStep<short2>& volume, const float3& volume_size, const pcl::gpu::kinfuLS::tsdf_buffer* buffer,const bool edges_only, PtrSz<PointType> output_xyz, PtrSz<float4> output_normals, PtrStep<int> last_data_transfer_matrix, int & data_transfer_finished) { FullScan6 fs; fs.volume = volume; fs.cell_size.x = volume_size.x / buffer->voxels_size.x; fs.cell_size.y = volume_size.y / buffer->voxels_size.y; fs.cell_size.z = volume_size.z / buffer->voxels_size.z; fs.output_xyz = output_xyz; fs.output_normals = output_normals; fs.data_transfer_completion_matrix = last_data_transfer_matrix; fs.rolling_buffer = *buffer; dim3 block (FullScan6::CTA_SIZE_X, FullScan6::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); fs.init_globals(); // Extraction call extractIncompletePointsKernel<<<grid, block>>>(fs, edges_only); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall ( cudaDeviceSynchronize () ); int size = fs.get_result_size(data_transfer_finished); return min (size, int(output_xyz.size)); } } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { namespace kinfuLS { template<typename NormalType> struct ExtractNormals { float3 cell_size; PtrStep<short2> volume; PtrSz<PointType> points; mutable NormalType* output; __device__ __forceinline__ float readTsdf (int x, int y, int z) const { return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]); } __device__ __forceinline__ float3 fetchPoint (int idx) const { PointType p = points.data[idx]; return make_float3 (p.x, p.y, p.z); } __device__ __forceinline__ void storeNormal (int idx, float3 normal) const { NormalType n; n.x = normal.x; n.y = normal.y; n.z = normal.z; output[idx] = n; } __device__ __forceinline__ int3 getVoxel (const float3& point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ void operator () () const { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= points.size) return; const float qnan = numeric_limits<float>::quiet_NaN (); float3 n = make_float3 (qnan, qnan, qnan); float3 point = fetchPoint (idx); int3 g = getVoxel (point); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2) { float3 t; t = point; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t); t = point; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t); n.x = (Fx1 - Fx2); t = point; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t); t = point; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t); n.y = (Fy1 - Fy2); t = point; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t); t = point; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t); n.z = (Fz1 - Fz2); n = normalized (n); } storeNormal (idx, n); } __device__ __forceinline__ float interpolateTrilineary (const float3& point) const { int3 g = getVoxel (point); /* //OLD CODE float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; if (point.x < vx) g.x--; if (point.y < vy) g.y--; if (point.z < vz) g.z--; //float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; //float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; //float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float a = point.x/ cell_size.x - (g.x + 0.5f); float b = point.y/ cell_size.y - (g.y + 0.5f); float c = point.z/ cell_size.z - (g.z + 0.5f); */ //NEW CODE float a = point.x/ cell_size.x - (g.x + 0.5f); if (a<0) { g.x--; a+=1.0f; }; float b = point.y/ cell_size.y - (g.y + 0.5f); if (b<0) { g.y--; b+=1.0f; }; float c = point.z/ cell_size.z - (g.z + 0.5f); if (c<0) { g.z--; c+=1.0f; }; float res = (1 - a) * ( (1 - b) * ( readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1) * c ) ) + a * ( (1 - b) * ( readTsdf (g.x + 1, g.y + 0, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1) * c ) + b * ( readTsdf (g.x + 1, g.y + 1, g.z + 0) * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1) * c ) ); return res; } }; template<typename NormalType> __global__ void extractNormalsKernel (const ExtractNormals<NormalType> en) { en (); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template<typename NormalType> void extractNormals (const PtrStep<short2>& volume, const float3& volume_size, const PtrSz<PointType>& points, NormalType* output) { ExtractNormals<NormalType> en; en.volume = volume; en.cell_size.x = volume_size.x / VOLUME_X; en.cell_size.y = volume_size.y / VOLUME_Y; en.cell_size.z = volume_size.z / VOLUME_Z; en.points = points; en.output = output; dim3 block (256); dim3 grid (divUp (points.size, block.x)); extractNormalsKernel<<<grid, block>>>(en); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } template void extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output); template void extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output); } } }
57f4649905a8a47c9ea0033cbde3dae505a027e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2015-2017 Illia Olenchenko #include <math.h> #include <iostream> #include "vector" #include "../lib/alglib/src/ap.h" #include "../lib/alglib/src/alglibmisc.h" #include "../lib/alglib/src/alglibinternal.h" #include "../lib/alglib/src/linalg.h" #include "../lib/alglib/src/statistics.h" #include "../lib/alglib/src/dataanalysis.h" #include "../lib/alglib/src/specialfunctions.h" #include "../lib/alglib/src/solvers.h" #include "../lib/alglib/src/optimization.h" #include "../lib/alglib/src/diffequations.h" #include "../lib/alglib/src/fasttransforms.h" #include "../lib/alglib/src/integration.h" #include "../lib/alglib/src/interpolation.h" #include "../utils/out.h" #include "../utils/functions.h" #include "../utils/init.h" #include "../utils/transform.h" #include "../utils/richardson.h" #include "../utils/tools.h" #include <string> #include <ctime> #include <mkl.h> #include <stdio.h> using namespace std; using namespace alglib; using namespace alglib_impl; /** * CUDA functions */ /** * N is for number of points of SLAU * should be N % 2 == 0 for correct split * @type int */ #ifndef N #define N 40 #endif #ifndef GPU #define GPU 2 #endif __device__ int barrier = N - 2; __device__ int blocks = N - 2; __global__ void myshab(double *temp, int n_row, int n_col, int plus, double *all) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * n_col + col; if (index >= n_row * n_col) return; int lindex = index + N + 1 + 2 * (int) (index / (N - 2)) + (N * plus); temp[index] = -4 * all[lindex] + all[lindex - N] + all[lindex + N] + all[lindex - 1] + all[lindex + 1]; } // B, Shablon, Tau, firstAppr, iteration number __global__ void mykernel(double *a, double *b, double *c, double *d, int n_row, int n_col, int plus, int i, double *all ) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * n_col + col; if (index >= n_row * n_col) return; int lindex = index + N + 1 + 2 * (int) (index / (N - 2)) + (N * plus); d[index] = (-a[index] + b[index]) * c[i] + d[index]; all[lindex] = d[index]; } int main() { /** * t0 is for documenting whole processing time * @type double */ double t0 = dsecnd(); // multi GPUs vector<int> cudas(GPU); for (size_t i = 0; i < GPU; i++) { cudas[i] = i; } // inner Ns int n_inner = (N - 2) * (N - 2); int n_splitted_inner = n_inner / GPU; /* * Getting inputs A and B */ vector<vector<double> > A(n_inner, vector<double>(n_inner, 0)); readMatr(A); vector<vector<double> > B(N, vector<double>(N, 0)); vector<double> Tau(1, 0); vector<vector<double> > firstAppr(N, vector<double>(N, 0)); vector<vector<double> > tempAppr(N, vector<double>(N, 0)); firstApprSet(firstAppr); readVector(B); alglib::real_2d_array matrix; matrix.setcontent(n_inner, n_inner, arrToRealArr(A)); double eps = 1e-5; /* *creating another parts *wr - *wi - *vl - *vr - */ alglib::real_1d_array wr; alglib::real_1d_array wi; alglib::real_2d_array vl; alglib::real_2d_array vr; /* * */ alglib::smatrixevd(matrix, (int)sqrt(N - 2), 0, true, wr, vl); double AlphaMax = findMaxRealArr(wr); double AlphaMin = findMinRealArr(wr); Tau[0] = 2. / (AlphaMax + AlphaMin); double ksi = AlphaMin / AlphaMax; // std::cout << ksi << "ksi" << '\n'; // is it important to calculate n*n alphas? double ro0 = (1. - ksi) / (1. + ksi); double ro1 = (1. - sqrt(ksi)) / (1. + sqrt(ksi)); int maxIter = findMaxIter(eps, ksi); maxIter = maxIter * 2; vector<double> optTau(1, 1); vector<double> duo(0); decToDuo(duo, maxIter); calculateOptTau(optTau, duo); for (int i = 1; i < maxIter + 1; ++i) Tau.push_back(nextTau(Tau, ro0, maxIter, optTau)); /* *main loop here */ double **temp = new double*[(int)GPU]; double *all = new double[N * N]; double **b = new double*[(int)GPU]; double **fa = new double*[(int)GPU]; double *taum = new double[maxIter + 1]; for (size_t i = 0; i < GPU; i++) { temp[i] = new double[(int)n_splitted_inner]; b[i] = new double[(int)n_splitted_inner]; fa[i] = new double[(int)n_splitted_inner]; } double *d_a[GPU], *d_b[GPU], *d_c[GPU], *d_d[GPU], *d_g[GPU]; int size = sizeof(double); for (size_t i = 0; i < GPU; i++) { hipSetDevice(cudas[i]); hipDeviceEnablePeerAccess(cudas[i], 0); } for (size_t i = 0; i < GPU; i++) { hipSetDevice(cudas[i]); hipMalloc((void **)&d_a[i], size * (n_splitted_inner)); hipMalloc((void **)&d_b[i], size * (n_splitted_inner)); hipMalloc((void **)&d_d[i], size * (n_splitted_inner)); hipMalloc((void **)&d_g[i], size * (N * N)); hipMalloc((void **)&d_c[i], size * (maxIter + 1)); } for (size_t i = 0; i < GPU; i++) { int plus = i * ((int)(N / 2) - 1); for (int j = 1; j < (int)(N / 2); j++) { for (int k = 1; k < N - 1; k++) { temp[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = 0; b[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = B[j + plus][k]; fa[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = firstAppr[j + plus][k]; } } } for (int j = 0; j < N ; j++) { for (int k = 0; k < N; k++) { all[j * N + k] = firstAppr[j][k]; } } // outVector(all, N * N); for (int i = 0; i < maxIter + 1; i++) { taum[i] = Tau[i]; } for (int i = 0; i < GPU; i++) { hipSetDevice(cudas[i]); hipMemcpy(d_a[i], b[i], size * (n_splitted_inner), hipMemcpyHostToDevice); hipMemcpy(d_d[i], fa[i], size * (n_splitted_inner), hipMemcpyHostToDevice); hipMemcpy(d_c[i], taum, size * (maxIter + 1), hipMemcpyHostToDevice); hipMemcpy(d_g[i], all, size * (N * N), hipMemcpyHostToDevice); hipMemcpy(d_b[i], temp[i], size * (n_splitted_inner), hipMemcpyHostToDevice); } double timeChecker = dsecnd(); // char aster; dim3 threadsPerBlock(16, 16); int n_row = (int)((N - 2) / 2); int n_col = (N - 2); dim3 numBlocks(max(n_row / threadsPerBlock.x, 1), max(n_col / threadsPerBlock.y, 1)); for (int j = 1; j < maxIter + 1; ++j) { for (size_t i = 0; i < GPU; i++) { hipSetDevice(cudas[i]); int plus = i * ((int)(N / 2) - 1); hipLaunchKernelGGL(( myshab) , dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_b[i], n_row, n_col, plus, d_g[i]); hipLaunchKernelGGL(( mykernel) , dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_a[i], d_b[i], d_c[i], d_d[i], n_row, n_col, plus, j, d_g[i]); } for (size_t i = 0; i < GPU; i++) { int index = N * ((int)(N / 2) + (i == 0 ? -1 : 0)); // copy all to other. hipMemcpy( &d_g[i == 0 ? 1 : 0][index], &d_g[i][index], size * N, hipMemcpyDefault); } } for (size_t i = 0; i < GPU; i++) { hipMemcpy(fa[i], d_d[i], size * (n_splitted_inner), hipMemcpyDeviceToHost); } hipDeviceSynchronize(); double tMain = dsecnd() - timeChecker; for (size_t i = 0; i < GPU; i++) { int plus = i * ((int)(N / 2) - 1); for (int j = 1; j < (int)(N / 2); j++) { for (int k = 1; k < N - 1; k++) { firstAppr[j + plus][k] = fa[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)]; } } } for (size_t i = 0; i < GPU; i++) { hipFree(d_a[i]); hipFree(d_b[i]); hipFree(d_c[i]); hipFree(d_d[i]); hipFree(d_g[i]); } /* * outing */ // firstApprSet(tempAppr); cout << "The N is : " << N << endl; // cout <<"The A(shorted) Is:" <<endl; // outMatr(A); // cout <<"The B(shorted) Is:" <<endl; // outMatr(B); // cout <<"The duo(shorted) Is:" <<endl; // outVector(duo); // cout <<"The opt(shorted) Is:" <<endl; // outVector(optTau); // cout <<"The first appr Is:" <<endl; // outMatr(tempAppr); // cout <<"The last approximation Is:" <<endl; // outMatr(firstAppr); // cout <<"The Max alpha Is:" <<endl; // cout <<AlphaMax <<endl; // cout <<"The Min alpha Is:" <<endl; // cout <<AlphaMin <<endl; // cout <<"The Tau is:" <<endl; // outVector(Tau); // cout <<"The ksi is:" <<endl; // cout <<ksi <<endl; // cout <<"The ro0 is:" <<endl; // cout <<ro0 <<endl; // cout <<"The ro1 is:" <<endl; // cout <<ro1 <<endl; // cout <<"The maxIter is:" <<endl; // cout <<maxIter <<endl; cout <<"The time is:" <<endl; cout << dsecnd() - t0 <<" s" <<endl; cout <<"The time of main is:" <<endl; cout << tMain <<" s" <<endl; // cout <<"The 1 1 is:" <<endl; // cout << firstAppr[1][1] <<endl; // cout <<"The 2 2 is:" <<endl; // cout << firstAppr[2][2] <<endl; // cout <<"The N - 2 N - 2 is:" <<endl; // cout << firstAppr[firstAppr.size() - 2][firstAppr.size() - 2] <<endl; // cout <<"The N - 3 N - 3 is:" <<endl; // cout << firstAppr[firstAppr.size() - 3][firstAppr.size() - 3] <<endl; return 0; }
57f4649905a8a47c9ea0033cbde3dae505a027e5.cu
// Copyright 2015-2017 Illia Olenchenko #include <math.h> #include <iostream> #include "vector" #include "../lib/alglib/src/ap.h" #include "../lib/alglib/src/alglibmisc.h" #include "../lib/alglib/src/alglibinternal.h" #include "../lib/alglib/src/linalg.h" #include "../lib/alglib/src/statistics.h" #include "../lib/alglib/src/dataanalysis.h" #include "../lib/alglib/src/specialfunctions.h" #include "../lib/alglib/src/solvers.h" #include "../lib/alglib/src/optimization.h" #include "../lib/alglib/src/diffequations.h" #include "../lib/alglib/src/fasttransforms.h" #include "../lib/alglib/src/integration.h" #include "../lib/alglib/src/interpolation.h" #include "../utils/out.h" #include "../utils/functions.h" #include "../utils/init.h" #include "../utils/transform.h" #include "../utils/richardson.h" #include "../utils/tools.h" #include <string> #include <ctime> #include <mkl.h> #include <stdio.h> using namespace std; using namespace alglib; using namespace alglib_impl; /** * CUDA functions */ /** * N is for number of points of SLAU * should be N % 2 == 0 for correct split * @type int */ #ifndef N #define N 40 #endif #ifndef GPU #define GPU 2 #endif __device__ int barrier = N - 2; __device__ int blocks = N - 2; __global__ void myshab(double *temp, int n_row, int n_col, int plus, double *all) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * n_col + col; if (index >= n_row * n_col) return; int lindex = index + N + 1 + 2 * (int) (index / (N - 2)) + (N * plus); temp[index] = -4 * all[lindex] + all[lindex - N] + all[lindex + N] + all[lindex - 1] + all[lindex + 1]; } // B, Shablon, Tau, firstAppr, iteration number __global__ void mykernel(double *a, double *b, double *c, double *d, int n_row, int n_col, int plus, int i, double *all ) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * n_col + col; if (index >= n_row * n_col) return; int lindex = index + N + 1 + 2 * (int) (index / (N - 2)) + (N * plus); d[index] = (-a[index] + b[index]) * c[i] + d[index]; all[lindex] = d[index]; } int main() { /** * t0 is for documenting whole processing time * @type double */ double t0 = dsecnd(); // multi GPUs vector<int> cudas(GPU); for (size_t i = 0; i < GPU; i++) { cudas[i] = i; } // inner Ns int n_inner = (N - 2) * (N - 2); int n_splitted_inner = n_inner / GPU; /* * Getting inputs A and B */ vector<vector<double> > A(n_inner, vector<double>(n_inner, 0)); readMatr(A); vector<vector<double> > B(N, vector<double>(N, 0)); vector<double> Tau(1, 0); vector<vector<double> > firstAppr(N, vector<double>(N, 0)); vector<vector<double> > tempAppr(N, vector<double>(N, 0)); firstApprSet(firstAppr); readVector(B); alglib::real_2d_array matrix; matrix.setcontent(n_inner, n_inner, arrToRealArr(A)); double eps = 1e-5; /* *creating another parts *wr - целые части собственных чисел *wi - мнимые части собственных чисел *vl - собственный левый вектор *vr - собственный правый вектор */ alglib::real_1d_array wr; alglib::real_1d_array wi; alglib::real_2d_array vl; alglib::real_2d_array vr; /* * расчет собственных чисел */ alglib::smatrixevd(matrix, (int)sqrt(N - 2), 0, true, wr, vl); double AlphaMax = findMaxRealArr(wr); double AlphaMin = findMinRealArr(wr); Tau[0] = 2. / (AlphaMax + AlphaMin); double ksi = AlphaMin / AlphaMax; // std::cout << ksi << "ksi" << '\n'; // is it important to calculate n*n alphas? double ro0 = (1. - ksi) / (1. + ksi); double ro1 = (1. - sqrt(ksi)) / (1. + sqrt(ksi)); int maxIter = findMaxIter(eps, ksi); maxIter = maxIter * 2; vector<double> optTau(1, 1); vector<double> duo(0); decToDuo(duo, maxIter); calculateOptTau(optTau, duo); for (int i = 1; i < maxIter + 1; ++i) Tau.push_back(nextTau(Tau, ro0, maxIter, optTau)); /* *main loop here */ double **temp = new double*[(int)GPU]; double *all = new double[N * N]; double **b = new double*[(int)GPU]; double **fa = new double*[(int)GPU]; double *taum = new double[maxIter + 1]; for (size_t i = 0; i < GPU; i++) { temp[i] = new double[(int)n_splitted_inner]; b[i] = new double[(int)n_splitted_inner]; fa[i] = new double[(int)n_splitted_inner]; } double *d_a[GPU], *d_b[GPU], *d_c[GPU], *d_d[GPU], *d_g[GPU]; int size = sizeof(double); for (size_t i = 0; i < GPU; i++) { cudaSetDevice(cudas[i]); cudaDeviceEnablePeerAccess(cudas[i], 0); } for (size_t i = 0; i < GPU; i++) { cudaSetDevice(cudas[i]); cudaMalloc((void **)&d_a[i], size * (n_splitted_inner)); cudaMalloc((void **)&d_b[i], size * (n_splitted_inner)); cudaMalloc((void **)&d_d[i], size * (n_splitted_inner)); cudaMalloc((void **)&d_g[i], size * (N * N)); cudaMalloc((void **)&d_c[i], size * (maxIter + 1)); } for (size_t i = 0; i < GPU; i++) { int plus = i * ((int)(N / 2) - 1); for (int j = 1; j < (int)(N / 2); j++) { for (int k = 1; k < N - 1; k++) { temp[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = 0; b[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = B[j + plus][k]; fa[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)] = firstAppr[j + plus][k]; } } } for (int j = 0; j < N ; j++) { for (int k = 0; k < N; k++) { all[j * N + k] = firstAppr[j][k]; } } // outVector(all, N * N); for (int i = 0; i < maxIter + 1; i++) { taum[i] = Tau[i]; } for (int i = 0; i < GPU; i++) { cudaSetDevice(cudas[i]); cudaMemcpy(d_a[i], b[i], size * (n_splitted_inner), cudaMemcpyHostToDevice); cudaMemcpy(d_d[i], fa[i], size * (n_splitted_inner), cudaMemcpyHostToDevice); cudaMemcpy(d_c[i], taum, size * (maxIter + 1), cudaMemcpyHostToDevice); cudaMemcpy(d_g[i], all, size * (N * N), cudaMemcpyHostToDevice); cudaMemcpy(d_b[i], temp[i], size * (n_splitted_inner), cudaMemcpyHostToDevice); } double timeChecker = dsecnd(); // char aster; dim3 threadsPerBlock(16, 16); int n_row = (int)((N - 2) / 2); int n_col = (N - 2); dim3 numBlocks(max(n_row / threadsPerBlock.x, 1), max(n_col / threadsPerBlock.y, 1)); for (int j = 1; j < maxIter + 1; ++j) { for (size_t i = 0; i < GPU; i++) { cudaSetDevice(cudas[i]); int plus = i * ((int)(N / 2) - 1); myshab <<<numBlocks, threadsPerBlock>>>(d_b[i], n_row, n_col, plus, d_g[i]); mykernel <<<numBlocks, threadsPerBlock>>>(d_a[i], d_b[i], d_c[i], d_d[i], n_row, n_col, plus, j, d_g[i]); } for (size_t i = 0; i < GPU; i++) { int index = N * ((int)(N / 2) + (i == 0 ? -1 : 0)); // copy all to other. cudaMemcpy( &d_g[i == 0 ? 1 : 0][index], &d_g[i][index], size * N, cudaMemcpyDefault); } } for (size_t i = 0; i < GPU; i++) { cudaMemcpy(fa[i], d_d[i], size * (n_splitted_inner), cudaMemcpyDeviceToHost); } cudaDeviceSynchronize(); double tMain = dsecnd() - timeChecker; for (size_t i = 0; i < GPU; i++) { int plus = i * ((int)(N / 2) - 1); for (int j = 1; j < (int)(N / 2); j++) { for (int k = 1; k < N - 1; k++) { firstAppr[j + plus][k] = fa[i][(j - 1) * (int)((N - 2) / 2) + (k - 1)]; } } } for (size_t i = 0; i < GPU; i++) { cudaFree(d_a[i]); cudaFree(d_b[i]); cudaFree(d_c[i]); cudaFree(d_d[i]); cudaFree(d_g[i]); } /* * outing */ // firstApprSet(tempAppr); cout << "The N is : " << N << endl; // cout <<"The A(shorted) Is:" <<endl; // outMatr(A); // cout <<"The B(shorted) Is:" <<endl; // outMatr(B); // cout <<"The duo(shorted) Is:" <<endl; // outVector(duo); // cout <<"The opt(shorted) Is:" <<endl; // outVector(optTau); // cout <<"The first appr Is:" <<endl; // outMatr(tempAppr); // cout <<"The last approximation Is:" <<endl; // outMatr(firstAppr); // cout <<"The Max alpha Is:" <<endl; // cout <<AlphaMax <<endl; // cout <<"The Min alpha Is:" <<endl; // cout <<AlphaMin <<endl; // cout <<"The Tau is:" <<endl; // outVector(Tau); // cout <<"The ksi is:" <<endl; // cout <<ksi <<endl; // cout <<"The ro0 is:" <<endl; // cout <<ro0 <<endl; // cout <<"The ro1 is:" <<endl; // cout <<ro1 <<endl; // cout <<"The maxIter is:" <<endl; // cout <<maxIter <<endl; cout <<"The time is:" <<endl; cout << dsecnd() - t0 <<" s" <<endl; cout <<"The time of main is:" <<endl; cout << tMain <<" s" <<endl; // cout <<"The 1 1 is:" <<endl; // cout << firstAppr[1][1] <<endl; // cout <<"The 2 2 is:" <<endl; // cout << firstAppr[2][2] <<endl; // cout <<"The N - 2 N - 2 is:" <<endl; // cout << firstAppr[firstAppr.size() - 2][firstAppr.size() - 2] <<endl; // cout <<"The N - 3 N - 3 is:" <<endl; // cout << firstAppr[firstAppr.size() - 3][firstAppr.size() - 3] <<endl; return 0; }
327aade11d1c5f88b7c9aa77e3e7c9ab914ce654.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand_kernel.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include "device_launch_parameters.h" #include <opencv2\core.hpp> #include <opencv2\highgui.hpp> #include <stdio.h> #include <assert.h> #include <iostream> #include <fstream> #define BLOCK_SIZE 16 #define WARP_SIZE 32 #define PI 3.1415926f texture<uchar, hipTextureType2D, hipReadModeElementType> leftTex; texture<uchar, hipTextureType2D, hipReadModeElementType> rightTex; __global__ void rand_init(hiprandState_t *d_states, int height, int width) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; hiprand_init(1234ULL, offset, 0, &d_states[offset]); } __global__ void wrap_phase_shift(uchar* src, float* dst, int height, int width, float diffT) { int imgSize = height*width; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; float sqrt3 = sqrtf(3.0f); float I1 = static_cast<float>(src[offset]); float I2 = static_cast<float>(src[imgSize + offset]); float I3 = static_cast<float>(src[2 * imgSize + offset]); float maxI = fmaxf(fmaxf(I1, I2), I3); float I1_I2 = fabs(I1 - I2); float I2_I3 = fabs(I2 - I3); float I1_I3 = fabs(I1 - I3); if ((I1_I2 < diffT) && (I2_I3 < diffT) && (I1_I3 < diffT)) { dst[offset] = -4.0f; return; } float phiVal = atan2f(sqrt3*(I1 - I3), (2 * I2 - I1 - I3)); if (phiVal < 0) phiVal += 2 * PI; dst[offset] = phiVal; } __global__ void mean_filter(uchar* d_dst1, uchar* d_dst2, uchar* src1, uchar *src2, int height, int width, int win1, int win2) { int winSize = (2 * win1 + 1)*(2 * win2 + 1); int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; float sum1 = 0; float sum2 = 0; for (int h = -win1; h <= win1; h++) { int y0 = y + h; if (y0 < 0) y0 = 0; if (y0 >= height) y0 = height - 1; for (int w = -win2; w <= win2; w++) { //sum1 += tex2D(leftTex, x + w, y + h); //sum2 += tex2D(rightTex, x + w, y + h); int x0 = x + w; if (x0 < 0) x0 = 0; if (x0 >= width - 1) x0 = width - 1; sum1 += src1[y0*width + x0]; sum2 += src2[y0*width + x0]; } } d_dst1[offset] = static_cast<uchar>(sum1 / winSize + 0.5f); d_dst2[offset] = static_cast<uchar>(sum2 / winSize + 0.5f); } //census transform __global__ void census_transform64(uchar* d_leftMean, uchar* d_rightMean, uint64_t* d_leftCen, uint64_t* d_rightCen, int height, int width, int win1, int win2) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; uint64_t leftRes = 0; uint64_t rightRes = 0; int count = 0; for (int h = -win1; h <= win1; h++) { for (int w = -win2; w <= win2; w++) { if (h == 0 && w == 0) continue; uchar leftTemp = tex2D(leftTex, x + w, y + h); uchar rightTemp = tex2D(rightTex, x + w, y + h); if (d_leftMean[offset] > leftTemp) { leftRes = leftRes | (1 << count); } if (d_rightMean[offset] > rightTemp) { rightRes = rightRes | (1 << count); } count++; } } d_leftCen[offset] = leftRes; d_rightCen[offset] = rightRes; } __global__ void census_transform32(uint32_t *d_leftCen, uint32_t *d_rightCen, uchar *d_leftMean, uchar *d_rightMean, int height, int width, int win1, int win2) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; uint32_t leftRes = 0; uint32_t rightRes = 0; int count = 0; for (int h = -win1; h <= win1; h++) { for (int w = -win2; w <= win2; w++) { if (h == 0 && w == 0) continue; uchar leftTemp = tex2D(leftTex, x + w, y + h); uchar rightTemp = tex2D(rightTex, x + w, y + h); if (d_leftMean[offset] > leftTemp) { leftRes = leftRes | (1 << count); } if (d_rightMean[offset] > rightTemp) { rightRes = rightRes | (1 << count); } count++; if (count == 32) { d_leftCen[y*width * 2 + 2 * x] = leftRes; d_rightCen[y*width * 2 + 2 * x] = rightRes; leftRes = 0; rightRes = 0; count = 0; } } } d_leftCen[y*width * 2 + 2 * x + 1] = leftRes; d_rightCen[y*width * 2 + 2 * x + 1] = rightRes; } __device__ int hamming_distance(uint64_t c1, uint64_t c2) { return __popcll(c1^c2); } __device__ int hamming_distance(uint32_t c1, uint32_t c2) { return __popcll(c1^c2); } __device__ void search_best_disp(uint32_t* leftCen, uint32_t* rightCen, float* leftPhi, float* rightPhi, int width, int height, int x, int y, int minDisp, int maxDisp, int &bestDx, int &bestScore) { for (int dx = minDisp; dx <= maxDisp; dx++) { int cxR = x - dx; if (cxR < 0) continue; if (leftPhi) { float phiT = 0.25f; float phiL = leftPhi[y*width + x]; float phiR = rightPhi[y*width + cxR]; float dPhi = abs(phiL - phiR); if (dPhi > phiT) continue; if (phiL < 0) continue; if (phiR < 0) continue; } uint32_t c1 = leftCen[y*width * 2 + 2 * x]; uint32_t c2 = rightCen[y*width * 2 + 2 * cxR]; int d1 = hamming_distance(c1, c2); c1 = leftCen[y*width * 2 + 2 * x + 1]; c2 = rightCen[y*width * 2 + 2 * cxR + 1]; int d2 = hamming_distance(c1, c2); int d = d1 + d2; if (d < bestScore) { bestScore = d; bestDx = dx; } } } __global__ void disp_image_init(int* d_dispImg, int* d_scoreImg, float* d_leftPhi, float* d_rightPhi, uint32_t* d_leftCen, uint32_t* d_rightCen, int height, int width, int minDisp, int maxDisp, int dispRange, int randTimes, hiprandState_t *d_states) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; d_scoreImg[offset] = 1000; d_dispImg[offset] = minDisp; int bestScore = 1000; int bestDisp = minDisp; for (int t = 0; t < randTimes; t++) { int d = hiprand(d_states + offset) % dispRange + minDisp; //if (x == 128 && y == 128) printf("%d ", d); int minD = d; int maxD = d; int score = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, d, score); if (score < bestScore) { bestScore = score; bestDisp = d; } } d_scoreImg[offset] = bestScore; d_dispImg[offset] = bestDisp; } __global__ void left_to_right(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int y = blockIdx.x*blockDim.x + threadIdx.x; for (int x = 1; x < width; x++) { int offset = y*width + x; int x0 = x - 1, y0 = y; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void right_to_left(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int y = blockIdx.x*blockDim.x + threadIdx.x; for (int x = width - 1 - 1; x >= 0; x--) { int offset = y*width + x; int x0 = x + 1, y0 = y; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void up_to_down(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int x = blockIdx.x*blockDim.x + threadIdx.x; for (int y = 1; y < height; y++) { int offset = y*width + x; int x0 = x, y0 = y - 1; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void down_to_up(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int x = blockIdx.x*blockDim.x + threadIdx.x; for (int y = height - 1 - 1; y >= 0; y--) { int offset = y*width + x; int x0 = x, y0 = y + 1; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void median_filter(int *d_src, int *d_dst, int height, int width) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int row = idx / width; int col = idx % width; const int n = 3; int win[n*n]; int half = n / 2; if (row >= half && col >= half && row < height - half && col < width - half) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { win[i*n + j] = d_src[(row - half + i)*width + col - half + j]; } } for (int i = 0; i < (n*n) / 2 + 1; i++) { int minIdx = i; for (int j = i + 1; j < n*n; j++) { if (win[j] < win[minIdx]) { minIdx = j; } } const int temp = win[i]; win[i] = win[minIdx]; win[minIdx] = temp; } d_dst[idx] = win[(n*n) / 2]; } else { d_dst[idx] = d_src[idx]; } } __global__ void median_filter2(int *d_src, int *d_dst, int height, int width) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = y*width + x; const int winSize = 3; const int halfSize = winSize / 2; int win[winSize*winSize]; // first step: horizontal median filter if (x >= winSize && y >= winSize && x < width - winSize && y < height - winSize) { int i = 0; int j = 0; int temp = 0; for (int x2 = x - winSize; x2 <= x + winSize; x2++) { temp = d_src[y*width + x2]; i = j - 1; while (i >= 0 && win[i] > temp) { win[i + 1] = win[i]; i--; } win[i + 1] = temp; j++; } d_dst[y*width + x] = win[winSize]; } else { d_dst[y*width + x] = d_src[y*width + x]; } __syncthreads(); // second step: vertical median filter if (x >= winSize && y >= winSize && x < width - winSize && y < height - winSize) { int i = 0; int j = 0; int temp = 0; for (int y2 = y - winSize; y2 <= y + winSize; y2++) { temp = d_dst[y2*width + x]; i = j - 1; while (i >= 0 && win[i] > temp) { win[i + 1] = win[i]; i--; } win[i + 1] = temp; j++; } d_dst[y*width + x] = win[winSize]; } else { d_dst[y*width + x] = d_src[y*width + x]; } } __device__ int FindRoot(int *d_labelImg, int label) { while (d_labelImg[label] != label) { label = d_labelImg[label]; } return label; } __device__ void Union(int *d_dispImg, int *d_labelImg, int address0, int address1, int *sChanged) { if (fabsf(d_dispImg[address0] - d_dispImg[address1]) <= 2) { int root0 = FindRoot(d_labelImg, address0); int root1 = FindRoot(d_labelImg, address1); if (root0 < root1) { atomicMin(d_labelImg + root1, root0); sChanged[0] = 1; } else if (root1 < root0) { atomicMin(d_labelImg + root0, root1); sChanged[0] = 1; } } } __global__ void block_label(int *d_dispImg, int *d_labelImg, int height, int width) { __shared__ int sSegs[BLOCK_SIZE*BLOCK_SIZE]; __shared__ int sLabels[BLOCK_SIZE*BLOCK_SIZE]; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*width + x; int l_x = x % BLOCK_SIZE; int l_y = y % BLOCK_SIZE; int l_idx = l_y*BLOCK_SIZE + l_x; sSegs[l_idx] = d_dispImg[idx]; __shared__ int sChanged[1]; __syncthreads(); int label = l_idx; //int n_l_x[8], n_l_y[8]; //n_l_x[0] = l_x - 1; n_l_y[0] = l_y - 1; //n_l_x[1] = l_x; n_l_y[1] = l_y - 1; //n_l_x[2] = l_x + 1; n_l_y[2] = l_y - 1; //n_l_x[3] = l_x - 1; n_l_y[3] = l_y; //n_l_x[4] = l_x + 1; n_l_y[4] = l_y; //n_l_x[5] = l_x - 1; n_l_y[5] = l_y + 1; //n_l_x[6] = l_x; n_l_y[6] = l_y + 1; //n_l_x[7] = l_x + 1; n_l_y[7] = l_y + 1; const int neighArea = 4; int n_l_x[neighArea], n_l_y[neighArea]; n_l_x[0] = l_x - 1; n_l_y[0] = l_y; n_l_x[1] = l_x + 1; n_l_y[1] = l_y; n_l_x[2] = l_x; n_l_y[2] = l_y - 1; n_l_x[3] = l_x; n_l_y[3] = l_y + 1; while (1) { sLabels[l_idx] = label; if (threadIdx.x == 0 && threadIdx.y == 0) sChanged[0] = 0; __syncthreads(); int newLabel = label; for (int i = 0; i < neighArea; i++) { if (n_l_x[i] >= 0 && n_l_x[i] < BLOCK_SIZE && n_l_y[i] >= 0 && n_l_y[i] < BLOCK_SIZE) { int n_l_idx = n_l_y[i] * BLOCK_SIZE + n_l_x[i]; /*if (sSegs[l_idx] == 255 && sSegs[n_l_idx] == 255) { newLabel = static_cast<int>(fminf(newLabel, sLabels[n_l_idx])); }*/ if (fabsf(sSegs[l_idx] - sSegs[n_l_idx]) <= 1) { newLabel = static_cast<int>(fminf(newLabel, sLabels[n_l_idx])); } } } __syncthreads(); if (newLabel < label) { atomicMin(sLabels + label, newLabel); sChanged[0] = 1; } __syncthreads(); if (sChanged[0] == 0) break; label = FindRoot(sLabels, label); __syncthreads(); } /*if (d_dispImg[idx] == 0) { d_labelImg[idx] = 0; } else { d_labelImg[idx] = (blockIdx.y*blockDim.y + label / BLOCK_SIZE)*width + blockIdx.x*blockDim.x + label % BLOCK_SIZE; }*/ d_labelImg[idx] = (blockIdx.y*blockDim.y + label / BLOCK_SIZE)*width + blockIdx.x*blockDim.x + label % BLOCK_SIZE; //d_labelImg[idx] = label; } __global__ void block_merge(int *d_dispImg, int *d_labelImg, int height, int width) { dim3 subBlockIdx(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); dim3 subBlockDim(BLOCK_SIZE, BLOCK_SIZE); int rep = subBlockDim.x / blockDim.z; __shared__ int sChanged[1]; while (1) { if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { sChanged[0] = 0; } __syncthreads(); for (int i = 0; i < rep; i++) { int x = subBlockIdx.x*subBlockDim.x + i*blockDim.z + threadIdx.z; int y = (subBlockIdx.y + 1)*subBlockDim.y - 1; if (y + 1 < height) { int address0 = y*width + x; int address1 = (y + 1)*width + x; Union(d_dispImg, d_labelImg, address0, address1, sChanged); } } for (int i = 0; i < rep; i++) { int x = (subBlockIdx.x + 1)*subBlockDim.x - 1; int y = subBlockIdx.y*subBlockDim.y + i*blockDim.z + threadIdx.z; if (x + 1 < width) { int address0 = y*width + x; int address1 = y*width + x + 1; Union(d_dispImg, d_labelImg, address0, address1, sChanged); } } __syncthreads(); if (sChanged[0] == 0) break; __syncthreads(); } } __global__ void calcu_area(int *d_dispImg, int *d_labelImg, int *d_areaImg, int height, int width) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*BLOCK_SIZE*width + x; int currLabel = FindRoot(d_labelImg, idx); int nextLabel; int count = 1; for (int i = 1; i < BLOCK_SIZE; i++) { idx = (y*BLOCK_SIZE + i)*width + x; nextLabel = FindRoot(d_labelImg, idx); if (currLabel != nextLabel) { atomicAdd(d_areaImg + currLabel, count); currLabel = nextLabel; count = 1; } else { count++; } if (i == BLOCK_SIZE - 1) { atomicAdd(d_areaImg + currLabel, count); } } __syncthreads(); } __global__ void remove_small_segments(int *d_dispImg, int *d_labelImg, int *d_areaImg, int height, int width, int speckleSize, int minDisp) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*width + x; int label = FindRoot(d_labelImg, idx); if (d_areaImg[label] < speckleSize) { d_dispImg[idx] = minDisp; } } // src0---half black / half white // src1---half white / half black int SegBW2(IplImage* src0, IplImage* bina) { if (src0 == NULL) return -1; int w, h, ws8; w = src0->width; h = src0->height; ws8 = src0->widthStep; float T = 0; for (int r = 0; r < h; r++) { for (int c = 0; c < w; c++) { unsigned char* pRow = (unsigned char*)src0->imageData + r*ws8; T = T + float(pRow[c]); } } T /= w*h; for (int r = 0; r < h; r++) { unsigned char* pRow1 = (unsigned char*)src0->imageData + r*ws8; unsigned char* pRowB = (unsigned char*)bina->imageData + r*ws8; for (int c = 0; c < w; c++) { //int diff = pRow1[c] - pRow2[c]; if (pRow1[c]>int(T)) { pRowB[c] = 255; } else { pRowB[c] = 0; } } } return 0; } // unwrapping for a planar target using a binary pattern // nFringe----the number of fringes int UnwrappingPhase(IplImage* bina, float *rtmPhi, float *absPhi, int nFringes) { int w = bina->width; int h = bina->height; int w2 = w / 4; float pi = 3.1415926f; for (int r = 0; r<h; r++) { unsigned char* binaRow = (unsigned char*)bina->imageData + r*w; float *rtmPhiRow = rtmPhi + r*w; float *absPhiRow = absPhi + r*w; for (int c = w2; c < w - w2; c++) { if (rtmPhiRow[c] <= -4) { continue; } if (binaRow[c] - binaRow[c - 1] > 0) { r = r; int k = 0; k = nFringes / 2; for (int c2 = c; c2<w; c2++) { if (rtmPhiRow[c2] <= -4) { continue; } if ((rtmPhiRow[c2] - rtmPhiRow[c2 - 1])<-pi) { k += 1; } absPhiRow[c2] = k * 2 * pi + rtmPhiRow[c2]; } k = nFringes / 2; for (int c2 = c; c2 >= 0; c2--) { if (rtmPhiRow[c2] <= -4) { continue; } if ((rtmPhiRow[c2] - rtmPhiRow[c2 + 1])>pi) { k -= 1; } absPhiRow[c2] = k * 2 * pi + rtmPhiRow[c2]; //kMap[r*w + c2] = k; } //kMap[r*w + c] = 1; break; } } } //OutData2Txt(kMap, w, h, w, "d:/kMap.txt"); //delete[] kMap; return 0; } //compute the absolute phase map for the reference image void RefImgAbsPhase(IplImage* binaImages, int nBinaImages, float *absPhi, float* retPhiRef, float nFringes) { float pi = 3.1415926f; int w = binaImages->width; int h = binaImages->height; //float* rtmPhi =new float[w*h]; float *column = new float[w*h]; //memset(retPhiRef, 0, sizeof(float)*w*h); int diffT = 18; IplImage* bina = cvCreateImage(cvSize(w, h), 8, 1); //SegBW(binaImages[0], binaImages[1], bina); //cvSaveImage("d:/bw.bmp", bina); SegBW2(binaImages, bina); cvSaveImage("bw2.bmp", bina); //WrapPhaseShift(fringeImages, nFringeImages, retPhiRef, diffT); UnwrappingPhase(bina, retPhiRef, absPhi, nFringes); //OutData2Txt(retPhiRef, w, h, w, "rtmPhiRef.txt"); //OutData2Txt(absPhi, w, h, w, "absPhiRef.txt"); delete[] column; cvReleaseImage(&bina); return; } // unwrapping using the reference image int Unwrapping_RefImg(int w, int h, float *rtmPhi, float *h_rightPhi, float *refPhi, float *absPhi, int *disp, int minDisp) { float *phi0 = new float[w*h]; float *rphi0 = new float[w*h]; float pi = 3.14159f; for (int r = 0; r < h; r++) { for (int c = 0; c<w; c++) { if ((r == 179) && (c == 108)) { r = r; } int idx = r*w + c; if (rtmPhi[r*w + c] <= -4) continue; int dx = int(disp[idx] - 0.5f); if (dx <= minDisp) { //absPhi[idx] = 0; continue; } int xRef = c - dx; if (xRef < 0) continue; float roughPhi = refPhi[r*w + xRef]; phi0[idx] = roughPhi; rphi0[idx] = h_rightPhi[r*w + xRef]; int k = int((roughPhi - rtmPhi[idx]) / (2 * pi) + 0.5f); absPhi[idx] = 2 * k*pi + rtmPhi[idx]; } } //OutData2Txt(phi0, w, h, w, "phi0.txt"); //OutData2Txt(rphi0, w, h, w, "rphi0.txt"); delete[] phi0; delete[] rphi0; return 0; } int main(int argc, char* argv[]) { IplImage* leftImg = cvLoadImage("tsetImage\\david\\speckle.bmp", 0); IplImage* rightImg = cvLoadImage("tsetImage\\ref\\ref.bmp", 0); IplImage* objFringeImg[3]; IplImage* refFringeImg[3]; objFringeImg[0] = cvLoadImage("tsetImage\\david\\objFringe0.bmp", 0); objFringeImg[1] = cvLoadImage("tsetImage\\david\\objFringe1.bmp", 0); objFringeImg[2] = cvLoadImage("tsetImage\\david\\objFringe2.bmp", 0); refFringeImg[0] = cvLoadImage("tsetImage\\ref\\refFringe0.bmp", 0); refFringeImg[1] = cvLoadImage("tsetImage\\ref\\refFringe1.bmp", 0); refFringeImg[2] = cvLoadImage("tsetImage\\ref\\refFringe2.bmp", 0); IplImage* binaImg = cvLoadImage("tsetImage\\cam_00.bmp", 0); const unsigned int height = 480; const unsigned int width = 640; const unsigned int imgSize = height*width; uchar* d_objFriImg, *d_refFriImg; if (hipSuccess != hipMalloc((void **)&d_objFriImg, 3 * imgSize * sizeof(uchar))) std::cout << "device malloc object fringe image error" << std::endl; if (hipSuccess != hipMalloc((void **)&d_refFriImg, 3 * imgSize * sizeof(uchar))) std::cout << "device malloc reference fringe image error" << std::endl; float* d_leftPhi, *d_rightPhi; if (hipSuccess != hipMalloc((void **)&d_leftPhi, imgSize * sizeof(float))) std::cout << "device malloc left phase image error" << std::endl; if (hipSuccess != hipMemset(d_leftPhi, 0.0f, imgSize * sizeof(float))) std::cout << "device memset left phase image error" << std::endl; if (hipSuccess != hipMalloc((void **)&d_rightPhi, imgSize * sizeof(float))) std::cout << "device malloc right phase image error" << std::endl; if (hipSuccess != hipMemset(d_rightPhi, 0.0f, imgSize * sizeof(float))) std::cout << "device memset right phase image error" << std::endl; if (hipSuccess != hipMemcpy(d_objFriImg, objFringeImg[0]->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy object fringe image 0 from host to devcie error" << std::endl; if (hipSuccess != hipMemcpy(d_objFriImg + imgSize, objFringeImg[1]->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy object fringe image 1 from host to device error" << std::endl; if (hipSuccess != hipMemcpy(d_objFriImg + 2 * imgSize, objFringeImg[2]->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy object fringe image 2 from host to device error" << std::endl; if (hipSuccess != hipMemcpy(d_refFriImg, refFringeImg[0]->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy reference fringe image 0 from host to devcie error" << std::endl; if (hipSuccess != hipMemcpy(d_refFriImg + imgSize, refFringeImg[1]->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy reference fringe image 1 from host to device error" << std::endl; if (hipSuccess != hipMemcpy(d_refFriImg + 2 * imgSize, refFringeImg[2]->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy reference fringe image 2 from host to devcie error" << std::endl; uchar* d_leftImg, *d_rightImg; if (hipSuccess != hipMalloc((void **)&d_leftImg, imgSize * sizeof(uchar))) std::cout << "device malloc left image error" << std::endl; if (hipSuccess != hipMalloc((void **)&d_rightImg, imgSize * sizeof(uchar))) std::cout << "device malloc right image error" << std::endl; if (hipSuccess != hipMemcpy(d_leftImg, leftImg->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy left image from host to device error" << std::endl; if (hipSuccess != hipMemcpy(d_rightImg, rightImg->imageData, imgSize * sizeof(uchar), hipMemcpyHostToDevice)) std::cout << "copy right image from host to device error" << std::endl; uint32_t* d_leftCen, *d_rightCen; if (hipSuccess != hipMalloc((void **)&d_leftCen, 2 * imgSize * sizeof(uint32_t))) std::cout << "device malloc left census error" << std::endl; if (hipSuccess != hipMalloc((void **)&d_rightCen, 2 * imgSize * sizeof(uint32_t))) std::cout << "device malloc right census error" << std::endl; uchar* d_leftMean, *d_rightMean; if (hipSuccess != hipMalloc((void **)&d_leftMean, imgSize * sizeof(uchar))) std::cout << "device malloc left mean error" << std::endl; if (hipSuccess != hipMalloc((void **)&d_rightMean, imgSize * sizeof(uchar))) std::cout << "device malloc right mean error" << std::endl; int* d_dispImg, *d_scoreImg; if (hipSuccess != hipMalloc((void **)&d_dispImg, imgSize * sizeof(int))) std::cout << "device malloc disparity image error" << std::endl; if (hipSuccess != hipMalloc((void **)&d_scoreImg, imgSize * sizeof(int))) std::cout << "device malloc score image error" << std::endl; int *d_postImg; if (hipSuccess != hipMalloc((void **)&d_postImg, imgSize * sizeof(int))) std::cout << "device malloc post-processing image error" << std::endl; int *d_labelImg; if (hipSuccess != hipMalloc((void **)&d_labelImg, imgSize * sizeof(int))) std::cout << "device malloc label image error" << std::endl; int *d_areaImg; if (hipSuccess != hipMalloc((void **)&d_areaImg, imgSize * sizeof(int))) std::cout << "device malloc area image error" << std::endl; leftTex.addressMode[0] = hipAddressModeClamp; leftTex.addressMode[1] = hipAddressModeClamp; rightTex.addressMode[0] = hipAddressModeClamp; rightTex.addressMode[1] = hipAddressModeClamp; hipChannelFormatDesc desc = hipCreateChannelDesc<uchar>(); if (hipSuccess != hipBindTexture2D(NULL, &leftTex, d_leftImg, &desc, width, height, width * sizeof(uchar))) std::cout << "bind left texture error" << std::endl; if (hipSuccess != hipBindTexture2D(NULL, &rightTex, d_rightImg, &desc, width, height, width * sizeof(uchar))) std::cout << "bind right texture error" << std::endl; hiprandState_t *d_states; hipMalloc((void **)&d_states, height*width * sizeof(hiprandState_t)); int *d_doneImg; hipMalloc((void **)&d_doneImg, height*width * sizeof(int)); hipMemset(d_doneImg, 0, imgSize * sizeof(int)); int *segList_x, *segList_y; hipMalloc((void **)&segList_x, imgSize * sizeof(int)); hipMalloc((void **)&segList_y, imgSize * sizeof(int)); int minDisp = -50; int maxDisp = 165; int dispRange = maxDisp - minDisp; int randTimes = 3; dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y); dim3 blockSize1(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize1(width / blockSize1.x, height / blockSize1.y); dim3 blockSize2(4, 4, BLOCK_SIZE); dim3 gridSize2(width / (2 * BLOCK_SIZE), height / (2 * BLOCK_SIZE)); dim3 blockSize3(BLOCK_SIZE, 1); dim3 gridSize3(width / BLOCK_SIZE, height / BLOCK_SIZE); dim3 blockSize4(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize4(width / BLOCK_SIZE, height / BLOCK_SIZE); rand_init << <gridSize, blockSize >> > (d_states, height, width); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, NULL); int speckleSize = 100; int diffT = 18; wrap_phase_shift << <gridSize, blockSize >> > (d_objFriImg, d_leftPhi, height, width, diffT); wrap_phase_shift << <gridSize, blockSize >> > (d_refFriImg, d_rightPhi, height, width, diffT); int win1 = 3, win2 = 4; mean_filter << <gridSize, blockSize >> > (d_leftMean, d_rightMean, d_leftImg, d_rightImg, height, width, win1, win2); census_transform32 << <gridSize, blockSize >> > (d_leftCen, d_rightCen, d_leftMean, d_rightMean, height, width, win1, win2); //uint32_t *h_leftCen = new uint32_t[2 * imgSize](); //uint32_t *h_rightCen = new uint32_t[2 * imgSize](); //hipMemcpy(h_leftCen, d_leftCen, 2 * imgSize * sizeof(uint32_t), hipMemcpyDeviceToHost); //hipMemcpy(h_rightCen, d_rightCen, 2 * imgSize * sizeof(uint32_t), hipMemcpyDeviceToHost); //float *h_leftPhi = new float[imgSize](); //float *h_rightPhi = new float[imgSize](); //hipMemcpy(h_leftPhi, d_leftPhi, imgSize * sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(h_rightPhi, d_rightPhi, imgSize * sizeof(float), hipMemcpyDeviceToHost); //uchar *h_leftMean = new uchar[imgSize](); //uchar *h_rightMean = new uchar[imgSize](); //hipMemcpy(h_leftMean, d_leftMean, imgSize * sizeof(uchar), hipMemcpyDeviceToHost); //hipMemcpy(h_rightMean, d_rightMean, imgSize * sizeof(uchar), hipMemcpyDeviceToHost); disp_image_init << <gridSize, blockSize >> > (d_dispImg, d_scoreImg, d_leftPhi, d_rightPhi, d_leftCen, d_rightCen, height, width, minDisp, maxDisp, dispRange, randTimes, d_states); left_to_right << <height / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); up_to_down << <width / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); right_to_left << <height / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); down_to_up << <width / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); median_filter2 << <gridSize, blockSize >> > (d_dispImg, d_postImg, height, width); block_label << <gridSize1, blockSize1 >> > (d_postImg, d_labelImg, height, width); block_merge << <gridSize2, blockSize2 >> > (d_postImg, d_labelImg, height, width); calcu_area << <gridSize3, blockSize3 >> > (d_postImg, d_labelImg, d_areaImg, height, width); remove_small_segments << <gridSize4, blockSize4 >> > (d_postImg, d_labelImg, d_areaImg, height, width, speckleSize, minDisp); hipEventRecord(stop, NULL); hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); printf("GPU processing time : %.4f (ms)\n", msecTotal); float *h_leftPhi = new float[imgSize](); float *h_rightPhi = new float[imgSize](); float *h_absPhiRef = new float[imgSize](); int* h_dispImg = new int[height*width](); if (hipMemcpy(h_leftPhi, d_leftPhi, imgSize * sizeof(float), hipMemcpyDeviceToHost)) std::cout << "copy left phase image from device to host error" << std::endl; if (hipMemcpy(h_rightPhi, d_rightPhi, imgSize * sizeof(float), hipMemcpyDeviceToHost)) std::cout << "copy right phase image from device to host error" << std::endl; if (hipMemcpy(h_dispImg, d_postImg, height*width * sizeof(int), hipMemcpyDeviceToHost)) std::cout << "copy disparity image from device to host error" << std::endl; int nBinaImg = 1; float n_finges = 25.6f; RefImgAbsPhase(binaImg, nBinaImg, h_absPhiRef, h_rightPhi, n_finges); float *h_absPhi = new float[imgSize](); Unwrapping_RefImg(width, height, h_leftPhi, h_rightPhi, h_absPhiRef, h_absPhi, h_dispImg, minDisp); cv::Mat h_dispMat(height, width, CV_8UC1, cv::Scalar(0)); for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { h_dispMat.at<uchar>(row, col) = static_cast<uchar>(h_dispImg[row*width + col]); //std::cout << h_dispImg[row*width + col] << " "; } //std::cout << std::endl; } cv::Mat absPhiMat(height, width, CV_32FC1, cv::Scalar(0)); for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { absPhiMat.at<float>(row, col) = h_absPhi[row*width + col]; } } cv::normalize(absPhiMat, absPhiMat, 255, 0, cv::NORM_MINMAX); absPhiMat.convertTo(absPhiMat, CV_8U); IplImage absPhiImage(absPhiMat); cvShowImage("absPhi", &absPhiImage); IplImage dispShowImg(h_dispMat); //IplImage leftPhiImg(leftPhiMat); //IplImage leftMeanImg(leftMeanMat); cvShowImage("dispImg", &dispShowImg); cvSaveImage("dispImg.bmp", &dispShowImg); //cvShowImage("leftPhiImg", &leftPhiImg); //cvShowImage("leftMeanImg", &leftMeanImg); cvWaitKey(0); }
327aade11d1c5f88b7c9aa77e3e7c9ab914ce654.cu
#include "cuda.h" #include "curand_kernel.h" #include "cuda_runtime.h" #include "cuda_runtime_api.h" #include "device_launch_parameters.h" #include <opencv2\core.hpp> #include <opencv2\highgui.hpp> #include <stdio.h> #include <assert.h> #include <iostream> #include <fstream> #define BLOCK_SIZE 16 #define WARP_SIZE 32 #define PI 3.1415926f texture<uchar, cudaTextureType2D, cudaReadModeElementType> leftTex; texture<uchar, cudaTextureType2D, cudaReadModeElementType> rightTex; __global__ void rand_init(curandState *d_states, int height, int width) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; curand_init(1234ULL, offset, 0, &d_states[offset]); } __global__ void wrap_phase_shift(uchar* src, float* dst, int height, int width, float diffT) { int imgSize = height*width; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; float sqrt3 = sqrtf(3.0f); float I1 = static_cast<float>(src[offset]); float I2 = static_cast<float>(src[imgSize + offset]); float I3 = static_cast<float>(src[2 * imgSize + offset]); float maxI = fmaxf(fmaxf(I1, I2), I3); float I1_I2 = fabs(I1 - I2); float I2_I3 = fabs(I2 - I3); float I1_I3 = fabs(I1 - I3); if ((I1_I2 < diffT) && (I2_I3 < diffT) && (I1_I3 < diffT)) { dst[offset] = -4.0f; return; } float phiVal = atan2f(sqrt3*(I1 - I3), (2 * I2 - I1 - I3)); if (phiVal < 0) phiVal += 2 * PI; dst[offset] = phiVal; } __global__ void mean_filter(uchar* d_dst1, uchar* d_dst2, uchar* src1, uchar *src2, int height, int width, int win1, int win2) { int winSize = (2 * win1 + 1)*(2 * win2 + 1); int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; float sum1 = 0; float sum2 = 0; for (int h = -win1; h <= win1; h++) { int y0 = y + h; if (y0 < 0) y0 = 0; if (y0 >= height) y0 = height - 1; for (int w = -win2; w <= win2; w++) { //sum1 += tex2D(leftTex, x + w, y + h); //sum2 += tex2D(rightTex, x + w, y + h); int x0 = x + w; if (x0 < 0) x0 = 0; if (x0 >= width - 1) x0 = width - 1; sum1 += src1[y0*width + x0]; sum2 += src2[y0*width + x0]; } } d_dst1[offset] = static_cast<uchar>(sum1 / winSize + 0.5f); d_dst2[offset] = static_cast<uchar>(sum2 / winSize + 0.5f); } //census transform __global__ void census_transform64(uchar* d_leftMean, uchar* d_rightMean, uint64_t* d_leftCen, uint64_t* d_rightCen, int height, int width, int win1, int win2) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; uint64_t leftRes = 0; uint64_t rightRes = 0; int count = 0; for (int h = -win1; h <= win1; h++) { for (int w = -win2; w <= win2; w++) { if (h == 0 && w == 0) continue; uchar leftTemp = tex2D(leftTex, x + w, y + h); uchar rightTemp = tex2D(rightTex, x + w, y + h); if (d_leftMean[offset] > leftTemp) { leftRes = leftRes | (1 << count); } if (d_rightMean[offset] > rightTemp) { rightRes = rightRes | (1 << count); } count++; } } d_leftCen[offset] = leftRes; d_rightCen[offset] = rightRes; } __global__ void census_transform32(uint32_t *d_leftCen, uint32_t *d_rightCen, uchar *d_leftMean, uchar *d_rightMean, int height, int width, int win1, int win2) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; uint32_t leftRes = 0; uint32_t rightRes = 0; int count = 0; for (int h = -win1; h <= win1; h++) { for (int w = -win2; w <= win2; w++) { if (h == 0 && w == 0) continue; uchar leftTemp = tex2D(leftTex, x + w, y + h); uchar rightTemp = tex2D(rightTex, x + w, y + h); if (d_leftMean[offset] > leftTemp) { leftRes = leftRes | (1 << count); } if (d_rightMean[offset] > rightTemp) { rightRes = rightRes | (1 << count); } count++; if (count == 32) { d_leftCen[y*width * 2 + 2 * x] = leftRes; d_rightCen[y*width * 2 + 2 * x] = rightRes; leftRes = 0; rightRes = 0; count = 0; } } } d_leftCen[y*width * 2 + 2 * x + 1] = leftRes; d_rightCen[y*width * 2 + 2 * x + 1] = rightRes; } __device__ int hamming_distance(uint64_t c1, uint64_t c2) { return __popcll(c1^c2); } __device__ int hamming_distance(uint32_t c1, uint32_t c2) { return __popcll(c1^c2); } __device__ void search_best_disp(uint32_t* leftCen, uint32_t* rightCen, float* leftPhi, float* rightPhi, int width, int height, int x, int y, int minDisp, int maxDisp, int &bestDx, int &bestScore) { for (int dx = minDisp; dx <= maxDisp; dx++) { int cxR = x - dx; if (cxR < 0) continue; if (leftPhi) { float phiT = 0.25f; float phiL = leftPhi[y*width + x]; float phiR = rightPhi[y*width + cxR]; float dPhi = abs(phiL - phiR); if (dPhi > phiT) continue; if (phiL < 0) continue; if (phiR < 0) continue; } uint32_t c1 = leftCen[y*width * 2 + 2 * x]; uint32_t c2 = rightCen[y*width * 2 + 2 * cxR]; int d1 = hamming_distance(c1, c2); c1 = leftCen[y*width * 2 + 2 * x + 1]; c2 = rightCen[y*width * 2 + 2 * cxR + 1]; int d2 = hamming_distance(c1, c2); int d = d1 + d2; if (d < bestScore) { bestScore = d; bestDx = dx; } } } __global__ void disp_image_init(int* d_dispImg, int* d_scoreImg, float* d_leftPhi, float* d_rightPhi, uint32_t* d_leftCen, uint32_t* d_rightCen, int height, int width, int minDisp, int maxDisp, int dispRange, int randTimes, curandState *d_states) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*width; d_scoreImg[offset] = 1000; d_dispImg[offset] = minDisp; int bestScore = 1000; int bestDisp = minDisp; for (int t = 0; t < randTimes; t++) { int d = curand(d_states + offset) % dispRange + minDisp; //if (x == 128 && y == 128) printf("%d ", d); int minD = d; int maxD = d; int score = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, d, score); if (score < bestScore) { bestScore = score; bestDisp = d; } } d_scoreImg[offset] = bestScore; d_dispImg[offset] = bestDisp; } __global__ void left_to_right(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int y = blockIdx.x*blockDim.x + threadIdx.x; for (int x = 1; x < width; x++) { int offset = y*width + x; int x0 = x - 1, y0 = y; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void right_to_left(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int y = blockIdx.x*blockDim.x + threadIdx.x; for (int x = width - 1 - 1; x >= 0; x--) { int offset = y*width + x; int x0 = x + 1, y0 = y; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void up_to_down(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int x = blockIdx.x*blockDim.x + threadIdx.x; for (int y = 1; y < height; y++) { int offset = y*width + x; int x0 = x, y0 = y - 1; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void down_to_up(int* d_dispImg, int* d_scoreImg, uint32_t* d_leftCen, uint32_t* d_rightCen, float* d_leftPhi, float* d_rightPhi, int height, int width, int minDisp, int maxDisp) { int x = blockIdx.x*blockDim.x + threadIdx.x; for (int y = height - 1 - 1; y >= 0; y--) { int offset = y*width + x; int x0 = x, y0 = y + 1; int d0 = d_dispImg[y0*width + x0]; if (d0 <= minDisp) continue; int score = d_scoreImg[offset]; int minD = d0 - 1; int maxD = d0 + 1; int bestD = minDisp; int bestScore = 1000; search_best_disp(d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, width, height, x, y, minD, maxD, bestD, bestScore); if (bestScore < score) { if (bestD < maxDisp) { d_dispImg[offset] = bestD; d_scoreImg[offset] = bestScore; } } } } __global__ void median_filter(int *d_src, int *d_dst, int height, int width) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int row = idx / width; int col = idx % width; const int n = 3; int win[n*n]; int half = n / 2; if (row >= half && col >= half && row < height - half && col < width - half) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { win[i*n + j] = d_src[(row - half + i)*width + col - half + j]; } } for (int i = 0; i < (n*n) / 2 + 1; i++) { int minIdx = i; for (int j = i + 1; j < n*n; j++) { if (win[j] < win[minIdx]) { minIdx = j; } } const int temp = win[i]; win[i] = win[minIdx]; win[minIdx] = temp; } d_dst[idx] = win[(n*n) / 2]; } else { d_dst[idx] = d_src[idx]; } } __global__ void median_filter2(int *d_src, int *d_dst, int height, int width) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = y*width + x; const int winSize = 3; const int halfSize = winSize / 2; int win[winSize*winSize]; // first step: horizontal median filter if (x >= winSize && y >= winSize && x < width - winSize && y < height - winSize) { int i = 0; int j = 0; int temp = 0; for (int x2 = x - winSize; x2 <= x + winSize; x2++) { temp = d_src[y*width + x2]; i = j - 1; while (i >= 0 && win[i] > temp) { win[i + 1] = win[i]; i--; } win[i + 1] = temp; j++; } d_dst[y*width + x] = win[winSize]; } else { d_dst[y*width + x] = d_src[y*width + x]; } __syncthreads(); // second step: vertical median filter if (x >= winSize && y >= winSize && x < width - winSize && y < height - winSize) { int i = 0; int j = 0; int temp = 0; for (int y2 = y - winSize; y2 <= y + winSize; y2++) { temp = d_dst[y2*width + x]; i = j - 1; while (i >= 0 && win[i] > temp) { win[i + 1] = win[i]; i--; } win[i + 1] = temp; j++; } d_dst[y*width + x] = win[winSize]; } else { d_dst[y*width + x] = d_src[y*width + x]; } } __device__ int FindRoot(int *d_labelImg, int label) { while (d_labelImg[label] != label) { label = d_labelImg[label]; } return label; } __device__ void Union(int *d_dispImg, int *d_labelImg, int address0, int address1, int *sChanged) { if (fabsf(d_dispImg[address0] - d_dispImg[address1]) <= 2) { int root0 = FindRoot(d_labelImg, address0); int root1 = FindRoot(d_labelImg, address1); if (root0 < root1) { atomicMin(d_labelImg + root1, root0); sChanged[0] = 1; } else if (root1 < root0) { atomicMin(d_labelImg + root0, root1); sChanged[0] = 1; } } } __global__ void block_label(int *d_dispImg, int *d_labelImg, int height, int width) { __shared__ int sSegs[BLOCK_SIZE*BLOCK_SIZE]; __shared__ int sLabels[BLOCK_SIZE*BLOCK_SIZE]; int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*width + x; int l_x = x % BLOCK_SIZE; int l_y = y % BLOCK_SIZE; int l_idx = l_y*BLOCK_SIZE + l_x; sSegs[l_idx] = d_dispImg[idx]; __shared__ int sChanged[1]; __syncthreads(); int label = l_idx; //int n_l_x[8], n_l_y[8]; //n_l_x[0] = l_x - 1; n_l_y[0] = l_y - 1; //n_l_x[1] = l_x; n_l_y[1] = l_y - 1; //n_l_x[2] = l_x + 1; n_l_y[2] = l_y - 1; //n_l_x[3] = l_x - 1; n_l_y[3] = l_y; //n_l_x[4] = l_x + 1; n_l_y[4] = l_y; //n_l_x[5] = l_x - 1; n_l_y[5] = l_y + 1; //n_l_x[6] = l_x; n_l_y[6] = l_y + 1; //n_l_x[7] = l_x + 1; n_l_y[7] = l_y + 1; const int neighArea = 4; int n_l_x[neighArea], n_l_y[neighArea]; n_l_x[0] = l_x - 1; n_l_y[0] = l_y; n_l_x[1] = l_x + 1; n_l_y[1] = l_y; n_l_x[2] = l_x; n_l_y[2] = l_y - 1; n_l_x[3] = l_x; n_l_y[3] = l_y + 1; while (1) { sLabels[l_idx] = label; if (threadIdx.x == 0 && threadIdx.y == 0) sChanged[0] = 0; __syncthreads(); int newLabel = label; for (int i = 0; i < neighArea; i++) { if (n_l_x[i] >= 0 && n_l_x[i] < BLOCK_SIZE && n_l_y[i] >= 0 && n_l_y[i] < BLOCK_SIZE) { int n_l_idx = n_l_y[i] * BLOCK_SIZE + n_l_x[i]; /*if (sSegs[l_idx] == 255 && sSegs[n_l_idx] == 255) { newLabel = static_cast<int>(fminf(newLabel, sLabels[n_l_idx])); }*/ if (fabsf(sSegs[l_idx] - sSegs[n_l_idx]) <= 1) { newLabel = static_cast<int>(fminf(newLabel, sLabels[n_l_idx])); } } } __syncthreads(); if (newLabel < label) { atomicMin(sLabels + label, newLabel); sChanged[0] = 1; } __syncthreads(); if (sChanged[0] == 0) break; label = FindRoot(sLabels, label); __syncthreads(); } /*if (d_dispImg[idx] == 0) { d_labelImg[idx] = 0; } else { d_labelImg[idx] = (blockIdx.y*blockDim.y + label / BLOCK_SIZE)*width + blockIdx.x*blockDim.x + label % BLOCK_SIZE; }*/ d_labelImg[idx] = (blockIdx.y*blockDim.y + label / BLOCK_SIZE)*width + blockIdx.x*blockDim.x + label % BLOCK_SIZE; //d_labelImg[idx] = label; } __global__ void block_merge(int *d_dispImg, int *d_labelImg, int height, int width) { dim3 subBlockIdx(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); dim3 subBlockDim(BLOCK_SIZE, BLOCK_SIZE); int rep = subBlockDim.x / blockDim.z; __shared__ int sChanged[1]; while (1) { if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { sChanged[0] = 0; } __syncthreads(); for (int i = 0; i < rep; i++) { int x = subBlockIdx.x*subBlockDim.x + i*blockDim.z + threadIdx.z; int y = (subBlockIdx.y + 1)*subBlockDim.y - 1; if (y + 1 < height) { int address0 = y*width + x; int address1 = (y + 1)*width + x; Union(d_dispImg, d_labelImg, address0, address1, sChanged); } } for (int i = 0; i < rep; i++) { int x = (subBlockIdx.x + 1)*subBlockDim.x - 1; int y = subBlockIdx.y*subBlockDim.y + i*blockDim.z + threadIdx.z; if (x + 1 < width) { int address0 = y*width + x; int address1 = y*width + x + 1; Union(d_dispImg, d_labelImg, address0, address1, sChanged); } } __syncthreads(); if (sChanged[0] == 0) break; __syncthreads(); } } __global__ void calcu_area(int *d_dispImg, int *d_labelImg, int *d_areaImg, int height, int width) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*BLOCK_SIZE*width + x; int currLabel = FindRoot(d_labelImg, idx); int nextLabel; int count = 1; for (int i = 1; i < BLOCK_SIZE; i++) { idx = (y*BLOCK_SIZE + i)*width + x; nextLabel = FindRoot(d_labelImg, idx); if (currLabel != nextLabel) { atomicAdd(d_areaImg + currLabel, count); currLabel = nextLabel; count = 1; } else { count++; } if (i == BLOCK_SIZE - 1) { atomicAdd(d_areaImg + currLabel, count); } } __syncthreads(); } __global__ void remove_small_segments(int *d_dispImg, int *d_labelImg, int *d_areaImg, int height, int width, int speckleSize, int minDisp) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int idx = y*width + x; int label = FindRoot(d_labelImg, idx); if (d_areaImg[label] < speckleSize) { d_dispImg[idx] = minDisp; } } // src0---half black / half white // src1---half white / half black int SegBW2(IplImage* src0, IplImage* bina) { if (src0 == NULL) return -1; int w, h, ws8; w = src0->width; h = src0->height; ws8 = src0->widthStep; float T = 0; for (int r = 0; r < h; r++) { for (int c = 0; c < w; c++) { unsigned char* pRow = (unsigned char*)src0->imageData + r*ws8; T = T + float(pRow[c]); } } T /= w*h; for (int r = 0; r < h; r++) { unsigned char* pRow1 = (unsigned char*)src0->imageData + r*ws8; unsigned char* pRowB = (unsigned char*)bina->imageData + r*ws8; for (int c = 0; c < w; c++) { //int diff = pRow1[c] - pRow2[c]; if (pRow1[c]>int(T)) { pRowB[c] = 255; } else { pRowB[c] = 0; } } } return 0; } // unwrapping for a planar target using a binary pattern // nFringe----the number of fringes int UnwrappingPhase(IplImage* bina, float *rtmPhi, float *absPhi, int nFringes) { int w = bina->width; int h = bina->height; int w2 = w / 4; float pi = 3.1415926f; for (int r = 0; r<h; r++) { unsigned char* binaRow = (unsigned char*)bina->imageData + r*w; float *rtmPhiRow = rtmPhi + r*w; float *absPhiRow = absPhi + r*w; for (int c = w2; c < w - w2; c++) { if (rtmPhiRow[c] <= -4) { continue; } if (binaRow[c] - binaRow[c - 1] > 0) { r = r; int k = 0; k = nFringes / 2; for (int c2 = c; c2<w; c2++) { if (rtmPhiRow[c2] <= -4) { continue; } if ((rtmPhiRow[c2] - rtmPhiRow[c2 - 1])<-pi) { k += 1; } absPhiRow[c2] = k * 2 * pi + rtmPhiRow[c2]; } k = nFringes / 2; for (int c2 = c; c2 >= 0; c2--) { if (rtmPhiRow[c2] <= -4) { continue; } if ((rtmPhiRow[c2] - rtmPhiRow[c2 + 1])>pi) { k -= 1; } absPhiRow[c2] = k * 2 * pi + rtmPhiRow[c2]; //kMap[r*w + c2] = k; } //kMap[r*w + c] = 1; break; } } } //OutData2Txt(kMap, w, h, w, "d:/kMap.txt"); //delete[] kMap; return 0; } //compute the absolute phase map for the reference image void RefImgAbsPhase(IplImage* binaImages, int nBinaImages, float *absPhi, float* retPhiRef, float nFringes) { float pi = 3.1415926f; int w = binaImages->width; int h = binaImages->height; //float* rtmPhi =new float[w*h]; float *column = new float[w*h]; //memset(retPhiRef, 0, sizeof(float)*w*h); int diffT = 18; IplImage* bina = cvCreateImage(cvSize(w, h), 8, 1); //SegBW(binaImages[0], binaImages[1], bina); //cvSaveImage("d:/bw.bmp", bina); SegBW2(binaImages, bina); cvSaveImage("bw2.bmp", bina); //WrapPhaseShift(fringeImages, nFringeImages, retPhiRef, diffT); UnwrappingPhase(bina, retPhiRef, absPhi, nFringes); //OutData2Txt(retPhiRef, w, h, w, "rtmPhiRef.txt"); //OutData2Txt(absPhi, w, h, w, "absPhiRef.txt"); delete[] column; cvReleaseImage(&bina); return; } // unwrapping using the reference image int Unwrapping_RefImg(int w, int h, float *rtmPhi, float *h_rightPhi, float *refPhi, float *absPhi, int *disp, int minDisp) { float *phi0 = new float[w*h]; float *rphi0 = new float[w*h]; float pi = 3.14159f; for (int r = 0; r < h; r++) { for (int c = 0; c<w; c++) { if ((r == 179) && (c == 108)) { r = r; } int idx = r*w + c; if (rtmPhi[r*w + c] <= -4) continue; int dx = int(disp[idx] - 0.5f); if (dx <= minDisp) { //absPhi[idx] = 0; continue; } int xRef = c - dx; if (xRef < 0) continue; float roughPhi = refPhi[r*w + xRef]; phi0[idx] = roughPhi; rphi0[idx] = h_rightPhi[r*w + xRef]; int k = int((roughPhi - rtmPhi[idx]) / (2 * pi) + 0.5f); absPhi[idx] = 2 * k*pi + rtmPhi[idx]; } } //OutData2Txt(phi0, w, h, w, "phi0.txt"); //OutData2Txt(rphi0, w, h, w, "rphi0.txt"); delete[] phi0; delete[] rphi0; return 0; } int main(int argc, char* argv[]) { IplImage* leftImg = cvLoadImage("tsetImage\\david\\speckle.bmp", 0); IplImage* rightImg = cvLoadImage("tsetImage\\ref\\ref.bmp", 0); IplImage* objFringeImg[3]; IplImage* refFringeImg[3]; objFringeImg[0] = cvLoadImage("tsetImage\\david\\objFringe0.bmp", 0); objFringeImg[1] = cvLoadImage("tsetImage\\david\\objFringe1.bmp", 0); objFringeImg[2] = cvLoadImage("tsetImage\\david\\objFringe2.bmp", 0); refFringeImg[0] = cvLoadImage("tsetImage\\ref\\refFringe0.bmp", 0); refFringeImg[1] = cvLoadImage("tsetImage\\ref\\refFringe1.bmp", 0); refFringeImg[2] = cvLoadImage("tsetImage\\ref\\refFringe2.bmp", 0); IplImage* binaImg = cvLoadImage("tsetImage\\cam_00.bmp", 0); const unsigned int height = 480; const unsigned int width = 640; const unsigned int imgSize = height*width; uchar* d_objFriImg, *d_refFriImg; if (cudaSuccess != cudaMalloc((void **)&d_objFriImg, 3 * imgSize * sizeof(uchar))) std::cout << "device malloc object fringe image error" << std::endl; if (cudaSuccess != cudaMalloc((void **)&d_refFriImg, 3 * imgSize * sizeof(uchar))) std::cout << "device malloc reference fringe image error" << std::endl; float* d_leftPhi, *d_rightPhi; if (cudaSuccess != cudaMalloc((void **)&d_leftPhi, imgSize * sizeof(float))) std::cout << "device malloc left phase image error" << std::endl; if (cudaSuccess != cudaMemset(d_leftPhi, 0.0f, imgSize * sizeof(float))) std::cout << "device memset left phase image error" << std::endl; if (cudaSuccess != cudaMalloc((void **)&d_rightPhi, imgSize * sizeof(float))) std::cout << "device malloc right phase image error" << std::endl; if (cudaSuccess != cudaMemset(d_rightPhi, 0.0f, imgSize * sizeof(float))) std::cout << "device memset right phase image error" << std::endl; if (cudaSuccess != cudaMemcpy(d_objFriImg, objFringeImg[0]->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy object fringe image 0 from host to devcie error" << std::endl; if (cudaSuccess != cudaMemcpy(d_objFriImg + imgSize, objFringeImg[1]->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy object fringe image 1 from host to device error" << std::endl; if (cudaSuccess != cudaMemcpy(d_objFriImg + 2 * imgSize, objFringeImg[2]->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy object fringe image 2 from host to device error" << std::endl; if (cudaSuccess != cudaMemcpy(d_refFriImg, refFringeImg[0]->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy reference fringe image 0 from host to devcie error" << std::endl; if (cudaSuccess != cudaMemcpy(d_refFriImg + imgSize, refFringeImg[1]->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy reference fringe image 1 from host to device error" << std::endl; if (cudaSuccess != cudaMemcpy(d_refFriImg + 2 * imgSize, refFringeImg[2]->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy reference fringe image 2 from host to devcie error" << std::endl; uchar* d_leftImg, *d_rightImg; if (cudaSuccess != cudaMalloc((void **)&d_leftImg, imgSize * sizeof(uchar))) std::cout << "device malloc left image error" << std::endl; if (cudaSuccess != cudaMalloc((void **)&d_rightImg, imgSize * sizeof(uchar))) std::cout << "device malloc right image error" << std::endl; if (cudaSuccess != cudaMemcpy(d_leftImg, leftImg->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy left image from host to device error" << std::endl; if (cudaSuccess != cudaMemcpy(d_rightImg, rightImg->imageData, imgSize * sizeof(uchar), cudaMemcpyHostToDevice)) std::cout << "copy right image from host to device error" << std::endl; uint32_t* d_leftCen, *d_rightCen; if (cudaSuccess != cudaMalloc((void **)&d_leftCen, 2 * imgSize * sizeof(uint32_t))) std::cout << "device malloc left census error" << std::endl; if (cudaSuccess != cudaMalloc((void **)&d_rightCen, 2 * imgSize * sizeof(uint32_t))) std::cout << "device malloc right census error" << std::endl; uchar* d_leftMean, *d_rightMean; if (cudaSuccess != cudaMalloc((void **)&d_leftMean, imgSize * sizeof(uchar))) std::cout << "device malloc left mean error" << std::endl; if (cudaSuccess != cudaMalloc((void **)&d_rightMean, imgSize * sizeof(uchar))) std::cout << "device malloc right mean error" << std::endl; int* d_dispImg, *d_scoreImg; if (cudaSuccess != cudaMalloc((void **)&d_dispImg, imgSize * sizeof(int))) std::cout << "device malloc disparity image error" << std::endl; if (cudaSuccess != cudaMalloc((void **)&d_scoreImg, imgSize * sizeof(int))) std::cout << "device malloc score image error" << std::endl; int *d_postImg; if (cudaSuccess != cudaMalloc((void **)&d_postImg, imgSize * sizeof(int))) std::cout << "device malloc post-processing image error" << std::endl; int *d_labelImg; if (cudaSuccess != cudaMalloc((void **)&d_labelImg, imgSize * sizeof(int))) std::cout << "device malloc label image error" << std::endl; int *d_areaImg; if (cudaSuccess != cudaMalloc((void **)&d_areaImg, imgSize * sizeof(int))) std::cout << "device malloc area image error" << std::endl; leftTex.addressMode[0] = cudaAddressModeClamp; leftTex.addressMode[1] = cudaAddressModeClamp; rightTex.addressMode[0] = cudaAddressModeClamp; rightTex.addressMode[1] = cudaAddressModeClamp; cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar>(); if (cudaSuccess != cudaBindTexture2D(NULL, &leftTex, d_leftImg, &desc, width, height, width * sizeof(uchar))) std::cout << "bind left texture error" << std::endl; if (cudaSuccess != cudaBindTexture2D(NULL, &rightTex, d_rightImg, &desc, width, height, width * sizeof(uchar))) std::cout << "bind right texture error" << std::endl; curandState *d_states; cudaMalloc((void **)&d_states, height*width * sizeof(curandState)); int *d_doneImg; cudaMalloc((void **)&d_doneImg, height*width * sizeof(int)); cudaMemset(d_doneImg, 0, imgSize * sizeof(int)); int *segList_x, *segList_y; cudaMalloc((void **)&segList_x, imgSize * sizeof(int)); cudaMalloc((void **)&segList_y, imgSize * sizeof(int)); int minDisp = -50; int maxDisp = 165; int dispRange = maxDisp - minDisp; int randTimes = 3; dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize((width + blockSize.x - 1) / blockSize.x, (height + blockSize.y - 1) / blockSize.y); dim3 blockSize1(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize1(width / blockSize1.x, height / blockSize1.y); dim3 blockSize2(4, 4, BLOCK_SIZE); dim3 gridSize2(width / (2 * BLOCK_SIZE), height / (2 * BLOCK_SIZE)); dim3 blockSize3(BLOCK_SIZE, 1); dim3 gridSize3(width / BLOCK_SIZE, height / BLOCK_SIZE); dim3 blockSize4(BLOCK_SIZE, BLOCK_SIZE); dim3 gridSize4(width / BLOCK_SIZE, height / BLOCK_SIZE); rand_init << <gridSize, blockSize >> > (d_states, height, width); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, NULL); int speckleSize = 100; int diffT = 18; wrap_phase_shift << <gridSize, blockSize >> > (d_objFriImg, d_leftPhi, height, width, diffT); wrap_phase_shift << <gridSize, blockSize >> > (d_refFriImg, d_rightPhi, height, width, diffT); int win1 = 3, win2 = 4; mean_filter << <gridSize, blockSize >> > (d_leftMean, d_rightMean, d_leftImg, d_rightImg, height, width, win1, win2); census_transform32 << <gridSize, blockSize >> > (d_leftCen, d_rightCen, d_leftMean, d_rightMean, height, width, win1, win2); //uint32_t *h_leftCen = new uint32_t[2 * imgSize](); //uint32_t *h_rightCen = new uint32_t[2 * imgSize](); //cudaMemcpy(h_leftCen, d_leftCen, 2 * imgSize * sizeof(uint32_t), cudaMemcpyDeviceToHost); //cudaMemcpy(h_rightCen, d_rightCen, 2 * imgSize * sizeof(uint32_t), cudaMemcpyDeviceToHost); //float *h_leftPhi = new float[imgSize](); //float *h_rightPhi = new float[imgSize](); //cudaMemcpy(h_leftPhi, d_leftPhi, imgSize * sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(h_rightPhi, d_rightPhi, imgSize * sizeof(float), cudaMemcpyDeviceToHost); //uchar *h_leftMean = new uchar[imgSize](); //uchar *h_rightMean = new uchar[imgSize](); //cudaMemcpy(h_leftMean, d_leftMean, imgSize * sizeof(uchar), cudaMemcpyDeviceToHost); //cudaMemcpy(h_rightMean, d_rightMean, imgSize * sizeof(uchar), cudaMemcpyDeviceToHost); disp_image_init << <gridSize, blockSize >> > (d_dispImg, d_scoreImg, d_leftPhi, d_rightPhi, d_leftCen, d_rightCen, height, width, minDisp, maxDisp, dispRange, randTimes, d_states); left_to_right << <height / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); up_to_down << <width / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); right_to_left << <height / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); down_to_up << <width / WARP_SIZE, WARP_SIZE >> > (d_dispImg, d_scoreImg, d_leftCen, d_rightCen, d_leftPhi, d_rightPhi, height, width, minDisp, maxDisp); median_filter2 << <gridSize, blockSize >> > (d_dispImg, d_postImg, height, width); block_label << <gridSize1, blockSize1 >> > (d_postImg, d_labelImg, height, width); block_merge << <gridSize2, blockSize2 >> > (d_postImg, d_labelImg, height, width); calcu_area << <gridSize3, blockSize3 >> > (d_postImg, d_labelImg, d_areaImg, height, width); remove_small_segments << <gridSize4, blockSize4 >> > (d_postImg, d_labelImg, d_areaImg, height, width, speckleSize, minDisp); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); printf("GPU processing time : %.4f (ms)\n", msecTotal); float *h_leftPhi = new float[imgSize](); float *h_rightPhi = new float[imgSize](); float *h_absPhiRef = new float[imgSize](); int* h_dispImg = new int[height*width](); if (cudaMemcpy(h_leftPhi, d_leftPhi, imgSize * sizeof(float), cudaMemcpyDeviceToHost)) std::cout << "copy left phase image from device to host error" << std::endl; if (cudaMemcpy(h_rightPhi, d_rightPhi, imgSize * sizeof(float), cudaMemcpyDeviceToHost)) std::cout << "copy right phase image from device to host error" << std::endl; if (cudaMemcpy(h_dispImg, d_postImg, height*width * sizeof(int), cudaMemcpyDeviceToHost)) std::cout << "copy disparity image from device to host error" << std::endl; int nBinaImg = 1; float n_finges = 25.6f; RefImgAbsPhase(binaImg, nBinaImg, h_absPhiRef, h_rightPhi, n_finges); float *h_absPhi = new float[imgSize](); Unwrapping_RefImg(width, height, h_leftPhi, h_rightPhi, h_absPhiRef, h_absPhi, h_dispImg, minDisp); cv::Mat h_dispMat(height, width, CV_8UC1, cv::Scalar(0)); for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { h_dispMat.at<uchar>(row, col) = static_cast<uchar>(h_dispImg[row*width + col]); //std::cout << h_dispImg[row*width + col] << " "; } //std::cout << std::endl; } cv::Mat absPhiMat(height, width, CV_32FC1, cv::Scalar(0)); for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { absPhiMat.at<float>(row, col) = h_absPhi[row*width + col]; } } cv::normalize(absPhiMat, absPhiMat, 255, 0, cv::NORM_MINMAX); absPhiMat.convertTo(absPhiMat, CV_8U); IplImage absPhiImage(absPhiMat); cvShowImage("absPhi", &absPhiImage); IplImage dispShowImg(h_dispMat); //IplImage leftPhiImg(leftPhiMat); //IplImage leftMeanImg(leftMeanMat); cvShowImage("dispImg", &dispShowImg); cvSaveImage("dispImg.bmp", &dispShowImg); //cvShowImage("leftPhiImg", &leftPhiImg); //cvShowImage("leftMeanImg", &leftMeanImg); cvWaitKey(0); }
3387e09d51c951e66f4ad69eb4b5708f74f4eca5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> using namespace std; __global__ void vecMat(int *a, int *b, int *c, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int sum = 0; for (int j = 0; j < n; j++) { sum += a[row * n + j] * b[j]; } c[row] = sum; } int main() { int n; cin >> n; int *a = new int[n * n]; int *b = new int[n]; int *c = new int[n]; int size = n * sizeof(int); cout<<"Matrix A: "<<endl; for (int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cin >> a[i * n + j]; } } cout<<"Matrix A is: "<<endl; for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cout << "a[" << i * n + j << "] = " << a[i * n + j] << " "; } cout << endl; } cout<<"Vector B: "<<endl; for(int i = 0; i < n; i++) { cin >> b[i]; } cout<<"Vector B is: "<<endl; for(int i = 0; i < n; i++) { cout << "b[" << i << "] = " <<b[i] << " "; } cout<<endl; int *dev_a, *dev_b, *dev_c; hipMalloc(&dev_a, n * size); hipMalloc(&dev_b, size); hipMalloc(&dev_c, size); hipMemcpy(dev_a, a, n * size, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice); dim3 grid_dim(n, n, 1); hipLaunchKernelGGL(( vecMat) , dim3(grid_dim), dim3(1) , 0, 0, dev_a, dev_b, dev_c, n); hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost); cout << "Output: " << endl; for(int i = 0; i < n; i++) { cout<< "c[" << i << "] = " << c[i] <<" "; } }
3387e09d51c951e66f4ad69eb4b5708f74f4eca5.cu
#include<iostream> using namespace std; __global__ void vecMat(int *a, int *b, int *c, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int sum = 0; for (int j = 0; j < n; j++) { sum += a[row * n + j] * b[j]; } c[row] = sum; } int main() { int n; cin >> n; int *a = new int[n * n]; int *b = new int[n]; int *c = new int[n]; int size = n * sizeof(int); cout<<"Matrix A: "<<endl; for (int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cin >> a[i * n + j]; } } cout<<"Matrix A is: "<<endl; for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { cout << "a[" << i * n + j << "] = " << a[i * n + j] << " "; } cout << endl; } cout<<"Vector B: "<<endl; for(int i = 0; i < n; i++) { cin >> b[i]; } cout<<"Vector B is: "<<endl; for(int i = 0; i < n; i++) { cout << "b[" << i << "] = " <<b[i] << " "; } cout<<endl; int *dev_a, *dev_b, *dev_c; cudaMalloc(&dev_a, n * size); cudaMalloc(&dev_b, size); cudaMalloc(&dev_c, size); cudaMemcpy(dev_a, a, n * size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); dim3 grid_dim(n, n, 1); vecMat <<< grid_dim, 1 >>> (dev_a, dev_b, dev_c, n); cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); cout << "Output: " << endl; for(int i = 0; i < n; i++) { cout<< "c[" << i << "] = " << c[i] <<" "; } }
29adacd90a557fa59426db43fff082c4d395475d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Author: Alex Fender [email protected] #include <algorithm> #include <graph.hpp> #include "pagerank_1D.cuh" #include "utilities/graph_utils.cuh" namespace cugraph { namespace mg { template <typename VT, typename WT> __global__ void transition_kernel(const size_t e, const VT *ind, const VT *degree, WT *val) { for (auto i = threadIdx.x + blockIdx.x * blockDim.x; i < e; i += gridDim.x * blockDim.x) val[i] = 1.0 / degree[ind[i]]; // Degree contains IN degree. So all degree[ind[i]] were // incremented by definition (no div by 0). } template <typename VT, typename ET, typename WT> Pagerank<VT, ET, WT>::Pagerank(const raft::handle_t &handle_, GraphCSCView<VT, ET, WT> const &G) : comm(handle_.get_comms()), bookmark(G.number_of_vertices), prev_pr(G.number_of_vertices), val(G.local_edges[comm.get_rank()]), handle(handle_), has_personalization(false) { v_glob = G.number_of_vertices; v_loc = G.local_vertices[comm.get_rank()]; e_loc = G.local_edges[comm.get_rank()]; part_off = G.local_offsets; local_vertices = G.local_vertices; off = G.offsets; ind = G.indices; blocks = handle_.get_device_properties().maxGridSize[0]; threads = handle_.get_device_properties().maxThreadsPerBlock; sm_count = handle_.get_device_properties().multiProcessorCount; is_setup = false; } template <typename VT, typename ET, typename WT> Pagerank<VT, ET, WT>::~Pagerank() { } template <typename VT, typename ET, typename WT> void Pagerank<VT, ET, WT>::transition_vals(const VT *degree) { if (e_loc > 0) { int threads = ::min(e_loc, this->threads); int blocks = ::min(32 * sm_count, this->blocks); hipLaunchKernelGGL(( transition_kernel<VT, WT>), dim3(blocks), dim3(threads), 0, 0, e_loc, ind, degree, val.data().get()); CHECK_CUDA(nullptr); } } template <typename VT, typename ET, typename WT> void Pagerank<VT, ET, WT>::flag_leafs(const VT *degree) { if (v_glob > 0) { int threads = ::min(v_glob, this->threads); int blocks = ::min(32 * sm_count, this->blocks); hipLaunchKernelGGL(( cugraph::detail::flag_leafs_kernel<VT, WT>) , dim3(blocks), dim3(threads), 0, 0, v_glob, degree, bookmark.data().get()); CHECK_CUDA(nullptr); } } // Artificially create the google matrix by setting val and bookmark template <typename VT, typename ET, typename WT> void Pagerank<VT, ET, WT>::setup(WT _alpha, VT *degree, VT personalization_subset_size, VT *personalization_subset, WT *personalization_values) { if (!is_setup) { alpha = _alpha; WT zero = 0.0; WT one = 1.0; // Update dangling node vector cugraph::detail::fill(v_glob, bookmark.data().get(), zero); flag_leafs(degree); cugraph::detail::update_dangling_nodes(v_glob, bookmark.data().get(), alpha); // Transition matrix transition_vals(degree); // personalize if (personalization_subset_size != 0) { CUGRAPH_EXPECTS(personalization_subset != nullptr, "Invalid API parameter: personalization_subset array should be of size " "personalization_subset_size"); CUGRAPH_EXPECTS(personalization_values != nullptr, "Invalid API parameter: personalization_values array should be of size " "personalization_subset_size"); CUGRAPH_EXPECTS(personalization_subset_size <= v_glob, "Personalization size should be smaller than V"); WT sum = cugraph::detail::nrm1(personalization_subset_size, personalization_values); if (sum != zero) { has_personalization = true; personalization_vector.resize(v_glob); cugraph::detail::fill(v_glob, personalization_vector.data().get(), zero); cugraph::detail::scal(v_glob, one / sum, personalization_values); cugraph::detail::scatter(personalization_subset_size, personalization_values, personalization_vector.data().get(), personalization_subset); } } is_setup = true; } else CUGRAPH_FAIL("MG PageRank : Setup can be called only once"); } // run the power iteration on the google matrix template <typename VT, typename ET, typename WT> int Pagerank<VT, ET, WT>::solve(int max_iter, float tolerance, WT *pagerank) { if (is_setup) { WT dot_res; WT one = 1.0; WT *pr = pagerank; cugraph::detail::fill(v_glob, pagerank, one / v_glob); cugraph::detail::fill(v_glob, prev_pr.data().get(), one / v_glob); // This cuda sync was added to fix #426 // This should not be requiered in theory // This is not needed on one GPU at this time hipDeviceSynchronize(); dot_res = cugraph::detail::dot(v_glob, bookmark.data().get(), pr); MGcsrmv<VT, ET, WT> spmv_solver( handle, local_vertices, part_off, off, ind, val.data().get(), pagerank); WT residual; int i; for (i = 0; i < max_iter; ++i) { spmv_solver.run(pagerank); cugraph::detail::scal(v_glob, alpha, pr); // personalization if (has_personalization) cugraph::detail::axpy(v_glob, dot_res, personalization_vector.data().get(), pr); else cugraph::detail::addv(v_glob, dot_res * (one / v_glob), pr); dot_res = cugraph::detail::dot(v_glob, bookmark.data().get(), pr); cugraph::detail::scal(v_glob, one / cugraph::detail::nrm2(v_glob, pr), pr); // convergence check cugraph::detail::axpy(v_glob, (WT)-1.0, pr, prev_pr.data().get()); residual = cugraph::detail::nrm2(v_glob, prev_pr.data().get()); if (residual < tolerance) break; else cugraph::detail::copy(v_glob, pr, prev_pr.data().get()); } cugraph::detail::scal(v_glob, one / cugraph::detail::nrm1(v_glob, pr), pr); return i; } else { CUGRAPH_FAIL("MG PageRank : Solve was called before setup"); } } template class Pagerank<int, int, double>; template class Pagerank<int, int, float>; } // namespace mg } // namespace cugraph #include "utilities/eidir_graph_utils.hpp"
29adacd90a557fa59426db43fff082c4d395475d.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Author: Alex Fender [email protected] #include <algorithm> #include <graph.hpp> #include "pagerank_1D.cuh" #include "utilities/graph_utils.cuh" namespace cugraph { namespace mg { template <typename VT, typename WT> __global__ void transition_kernel(const size_t e, const VT *ind, const VT *degree, WT *val) { for (auto i = threadIdx.x + blockIdx.x * blockDim.x; i < e; i += gridDim.x * blockDim.x) val[i] = 1.0 / degree[ind[i]]; // Degree contains IN degree. So all degree[ind[i]] were // incremented by definition (no div by 0). } template <typename VT, typename ET, typename WT> Pagerank<VT, ET, WT>::Pagerank(const raft::handle_t &handle_, GraphCSCView<VT, ET, WT> const &G) : comm(handle_.get_comms()), bookmark(G.number_of_vertices), prev_pr(G.number_of_vertices), val(G.local_edges[comm.get_rank()]), handle(handle_), has_personalization(false) { v_glob = G.number_of_vertices; v_loc = G.local_vertices[comm.get_rank()]; e_loc = G.local_edges[comm.get_rank()]; part_off = G.local_offsets; local_vertices = G.local_vertices; off = G.offsets; ind = G.indices; blocks = handle_.get_device_properties().maxGridSize[0]; threads = handle_.get_device_properties().maxThreadsPerBlock; sm_count = handle_.get_device_properties().multiProcessorCount; is_setup = false; } template <typename VT, typename ET, typename WT> Pagerank<VT, ET, WT>::~Pagerank() { } template <typename VT, typename ET, typename WT> void Pagerank<VT, ET, WT>::transition_vals(const VT *degree) { if (e_loc > 0) { int threads = std::min(e_loc, this->threads); int blocks = std::min(32 * sm_count, this->blocks); transition_kernel<VT, WT><<<blocks, threads>>>(e_loc, ind, degree, val.data().get()); CHECK_CUDA(nullptr); } } template <typename VT, typename ET, typename WT> void Pagerank<VT, ET, WT>::flag_leafs(const VT *degree) { if (v_glob > 0) { int threads = std::min(v_glob, this->threads); int blocks = std::min(32 * sm_count, this->blocks); cugraph::detail::flag_leafs_kernel<VT, WT> <<<blocks, threads>>>(v_glob, degree, bookmark.data().get()); CHECK_CUDA(nullptr); } } // Artificially create the google matrix by setting val and bookmark template <typename VT, typename ET, typename WT> void Pagerank<VT, ET, WT>::setup(WT _alpha, VT *degree, VT personalization_subset_size, VT *personalization_subset, WT *personalization_values) { if (!is_setup) { alpha = _alpha; WT zero = 0.0; WT one = 1.0; // Update dangling node vector cugraph::detail::fill(v_glob, bookmark.data().get(), zero); flag_leafs(degree); cugraph::detail::update_dangling_nodes(v_glob, bookmark.data().get(), alpha); // Transition matrix transition_vals(degree); // personalize if (personalization_subset_size != 0) { CUGRAPH_EXPECTS(personalization_subset != nullptr, "Invalid API parameter: personalization_subset array should be of size " "personalization_subset_size"); CUGRAPH_EXPECTS(personalization_values != nullptr, "Invalid API parameter: personalization_values array should be of size " "personalization_subset_size"); CUGRAPH_EXPECTS(personalization_subset_size <= v_glob, "Personalization size should be smaller than V"); WT sum = cugraph::detail::nrm1(personalization_subset_size, personalization_values); if (sum != zero) { has_personalization = true; personalization_vector.resize(v_glob); cugraph::detail::fill(v_glob, personalization_vector.data().get(), zero); cugraph::detail::scal(v_glob, one / sum, personalization_values); cugraph::detail::scatter(personalization_subset_size, personalization_values, personalization_vector.data().get(), personalization_subset); } } is_setup = true; } else CUGRAPH_FAIL("MG PageRank : Setup can be called only once"); } // run the power iteration on the google matrix template <typename VT, typename ET, typename WT> int Pagerank<VT, ET, WT>::solve(int max_iter, float tolerance, WT *pagerank) { if (is_setup) { WT dot_res; WT one = 1.0; WT *pr = pagerank; cugraph::detail::fill(v_glob, pagerank, one / v_glob); cugraph::detail::fill(v_glob, prev_pr.data().get(), one / v_glob); // This cuda sync was added to fix #426 // This should not be requiered in theory // This is not needed on one GPU at this time cudaDeviceSynchronize(); dot_res = cugraph::detail::dot(v_glob, bookmark.data().get(), pr); MGcsrmv<VT, ET, WT> spmv_solver( handle, local_vertices, part_off, off, ind, val.data().get(), pagerank); WT residual; int i; for (i = 0; i < max_iter; ++i) { spmv_solver.run(pagerank); cugraph::detail::scal(v_glob, alpha, pr); // personalization if (has_personalization) cugraph::detail::axpy(v_glob, dot_res, personalization_vector.data().get(), pr); else cugraph::detail::addv(v_glob, dot_res * (one / v_glob), pr); dot_res = cugraph::detail::dot(v_glob, bookmark.data().get(), pr); cugraph::detail::scal(v_glob, one / cugraph::detail::nrm2(v_glob, pr), pr); // convergence check cugraph::detail::axpy(v_glob, (WT)-1.0, pr, prev_pr.data().get()); residual = cugraph::detail::nrm2(v_glob, prev_pr.data().get()); if (residual < tolerance) break; else cugraph::detail::copy(v_glob, pr, prev_pr.data().get()); } cugraph::detail::scal(v_glob, one / cugraph::detail::nrm1(v_glob, pr), pr); return i; } else { CUGRAPH_FAIL("MG PageRank : Solve was called before setup"); } } template class Pagerank<int, int, double>; template class Pagerank<int, int, float>; } // namespace mg } // namespace cugraph #include "utilities/eidir_graph_utils.hpp"
8de809a01490fe6357c58b410b2be5bb8d4ba3b7.hip
// !!! This is a file automatically generated by hipify!!! /** * somma di vettore di c = a + b * */ #include <stdio.h> #include <hip/hip_runtime.h> #include "cudaUtility.h" __global__ void vecAdd(float* A, float* B, float* C, int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx < N) { C[idx] = A[idx] + B[idx]; } } int main(void) { int numElement = 50000; size_t size = numElement * sizeof(float); float *A, *B, *C; CUDA(hipMallocManaged(&A, size)); CUDA(hipMallocManaged(&B, size)); CUDA(hipMallocManaged(&C, size)); //alloco i tre puntatori a b e c della grandezza numElements float *h_A = (float*)malloc(numElement * sizeof(float)); float *h_B = (float*)malloc(numElement * sizeof(float)); float *h_C = (float*)malloc(numElement * sizeof(float)); if(h_A == NULL || h_B == NULL || h_C == NULL) { printf("fallito a fare il malloc dei puntatori cpu\n"); exit(1); } //inizializzo i puntatori cpu for(int i = 0; i < numElement; i++) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } //alloco i tre puntari della memoria GPU float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; CUDA(hipMalloc((void**)&d_A, size)); CUDA(hipMalloc((void**)&d_B, size)); CUDA(hipMalloc((void**)&d_C, size)); //copio gli host input nei rispettivi device input CUDA(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice)); CUDA(hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice)); CUDA(hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice)); //configuriamo il kernel size_t threads_per_blocks = 256; size_t blocks_per_grid = (numElement + threads_per_blocks -1) / threads_per_blocks; printf("thread = %lu, blocks = %lu, Tot = %lu\n", threads_per_blocks, blocks_per_grid, threads_per_blocks * blocks_per_grid); hipLaunchKernelGGL(( vecAdd), dim3(threads_per_blocks), dim3(blocks_per_grid), 0, 0, d_A, d_B, d_C, numElement); CUDA(hipGetLastError()); //copio i risulati nel array C CUDA(hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost)); //LIbero la memoria cuda CUDA(hipFree(d_A)); CUDA(hipFree(d_B)); CUDA(hipFree(d_C)); //libero la CPU free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
8de809a01490fe6357c58b410b2be5bb8d4ba3b7.cu
/** * somma di vettore di c = a + b * */ #include <stdio.h> #include <cuda_runtime.h> #include "cudaUtility.h" __global__ void vecAdd(float* A, float* B, float* C, int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx < N) { C[idx] = A[idx] + B[idx]; } } int main(void) { int numElement = 50000; size_t size = numElement * sizeof(float); float *A, *B, *C; CUDA(cudaMallocManaged(&A, size)); CUDA(cudaMallocManaged(&B, size)); CUDA(cudaMallocManaged(&C, size)); //alloco i tre puntatori a b e c della grandezza numElements float *h_A = (float*)malloc(numElement * sizeof(float)); float *h_B = (float*)malloc(numElement * sizeof(float)); float *h_C = (float*)malloc(numElement * sizeof(float)); if(h_A == NULL || h_B == NULL || h_C == NULL) { printf("fallito a fare il malloc dei puntatori cpu\n"); exit(1); } //inizializzo i puntatori cpu for(int i = 0; i < numElement; i++) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } //alloco i tre puntari della memoria GPU float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; CUDA(cudaMalloc((void**)&d_A, size)); CUDA(cudaMalloc((void**)&d_B, size)); CUDA(cudaMalloc((void**)&d_C, size)); //copio gli host input nei rispettivi device input CUDA(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)); CUDA(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice)); CUDA(cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice)); //configuriamo il kernel size_t threads_per_blocks = 256; size_t blocks_per_grid = (numElement + threads_per_blocks -1) / threads_per_blocks; printf("thread = %lu, blocks = %lu, Tot = %lu\n", threads_per_blocks, blocks_per_grid, threads_per_blocks * blocks_per_grid); vecAdd<<<threads_per_blocks, blocks_per_grid>>>(d_A, d_B, d_C, numElement); CUDA(cudaGetLastError()); //copio i risulati nel array C CUDA(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost)); //LIbero la memoria cuda CUDA(cudaFree(d_A)); CUDA(cudaFree(d_B)); CUDA(cudaFree(d_C)); //libero la CPU free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
3779dda9c6a96c2870ee2a0a546a4d5836d9e2f2.hip
// !!! This is a file automatically generated by hipify!!! #include "CCubicDomain.cuh" //ds domain structure #include "Timer.h" //ds time measurement #include <iostream> //ds cout #include <hip/hip_runtime.h> //ds needed for eclipse indexer only (not for compilation) #include <hip/hip_runtime.h> //ds needed for eclipse indexer only (not for compilation) #include <device_launch_parameters.h> //ds needed for eclipse indexer only (not for compilation) //ds CUDA kernels - split up acceleration and velocity verlet for better readability - no shared memory used within these blocks (no overhead due to copying of 2d arrays) //-------------------------------------------------------------------------------------------------------------------------// __global__ void computeAccelerationsLennardJones( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_arrNewAcclerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fMinimumDistance, const float p_fPotentialDepth ); __global__ void updateParticlesVelocityVerlet( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrAccelerations, float* p_arrNewAcclerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fTimeStepSize ); __global__ void getTotalEnergy( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, const float p_fMinimumDistance, const float p_fPotentialDepth, float* p_fTotalEnergy ); //-------------------------------------------------------------------------------------------------------------------------// //ds NOT USED CUDA kernels due to worse perfomance than CPU solution /*-------------------------------------------------------------------------------------------------------------------------// __global__ void getCenterOfMass( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_vecCenterOfMass ); __global__ void getTotalAngularMomentum( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, float* p_vecTotalAngularMomentum ); //-------------------------------------------------------------------------------------------------------------------------*/ int main( int argc, char** argv ) { //ds check simple input arguments - CAUTION: the implementation expects real numbers, the simulation will be corrupted if invalid values are entered if( 4 != argc ) { //ds inform std::cout << "usage: nbody_gpu [Number of particles] [Number of time steps] [Target energy]" << std::endl; return 0; } //ds start timing Timer tmTimer; tmTimer.start( ); //ds domain configuration const std::pair< double, double > pairBoundaries( -1.0, 1.0 ); const float fDomainWidth( fabs( pairBoundaries.first ) + fabs( pairBoundaries.second ) ); const unsigned int uNumberOfParticles( atoi( argv[1] ) ); //ds current simulation configuration const float fTimeStepSize( 0.0001 ); const unsigned int uNumberOfTimeSteps( atoi( argv[2] ) ); const float fMinimumDistance( pow( 1.0/uNumberOfParticles, 1.0/3 ) ); const float fPotentialDepth( 1.0 ); //ds target kinetic energy const float fTargetKineticEnergy( atol( argv[3] ) ); std::cout << "------- GPU SETUP -----------------------------------------------------------" << std::endl; std::cout << " Number of particles: " << uNumberOfParticles << std::endl; std::cout << " Boundary (3D): [" << pairBoundaries.first << ", " << pairBoundaries.second << "]" << std::endl; std::cout << " Domain Width: " << fDomainWidth << std::endl; std::cout << " Minimum distance: " << fMinimumDistance << std::endl; std::cout << " Cutoff distance: " << 2.5*fMinimumDistance << std::endl; std::cout << " Potential depth: " << fPotentialDepth << std::endl; std::cout << "Target kinetic energy: " << fTargetKineticEnergy << std::endl; std::cout << " Number of time steps: " << uNumberOfTimeSteps << std::endl; std::cout << " Time step size: " << fTimeStepSize << std::endl; std::cout << "-----------------------------------------------------------------------------" << std::endl; //ds allocate a domain to work with specifying number of particles and timing NBody::CCubicDomain cDomain( uNumberOfParticles ); //ds create particles uniformly from a normal distribution cDomain.createParticlesUniformFromNormalDistribution( fTargetKineticEnergy ); //ds host information: particles float* h_arrPositions ( cDomain.getPositions( ) ); float* h_arrVelocities ( cDomain.getVelocities( ) ); float* h_arrAccelerations( cDomain.getAccelerations( ) ); float* h_arrMasses ( cDomain.getMasses( ) ); //ds host information: integrals and initialize them float h_fTotalEnergy( 0.0 ); //float h_vecCenterOfMass[3]; h_vecCenterOfMass[0] = 0.0; h_vecCenterOfMass[1] = 0.0; h_vecCenterOfMass[2] = 0.0; //float h_vecTotalAngularMomentum[3]; h_vecTotalAngularMomentum[0] = 0.0; h_vecTotalAngularMomentum[1] = 0.0; h_vecTotalAngularMomentum[2] = 0.0; //ds device handles: particles float* d_arrPositions ( 0 ); //Nx3 float* d_arrVelocities ( 0 ); //Nx3 float* d_arrAccelerations ( 0 ); //Nx3 float* d_arrMasses ( 0 ); //Nx3 float* d_arrNewAccelerations( 0 ); //Nx3 //ds device handles: integrals float* d_fTotalEnergy ( 0 ); //1x1 //float* d_vecCenterOfMass ( 0 ); //3x1 //float* d_vecTotalAngularMomentum( 0 ); //3x1 //ds allocate memory: particles (here we see the advantage of using linear arrays) hipMalloc( (void **)&d_arrPositions , uNumberOfParticles*3*sizeof( float ) ); hipMalloc( (void **)&d_arrVelocities , uNumberOfParticles*3*sizeof( float ) ); hipMalloc( (void **)&d_arrAccelerations , uNumberOfParticles*3*sizeof( float ) ) ; hipMalloc( (void **)&d_arrMasses , uNumberOfParticles*sizeof( float ) ) ; hipMalloc( (void **)&d_arrNewAccelerations, uNumberOfParticles*3*sizeof( float ) ) ; //ds allocate memory: integrals hipMalloc( (void **)&d_fTotalEnergy , sizeof( float ) ) ; //hipMalloc( (void **)&d_vecCenterOfMass , 3*sizeof( float ) ) ; //hipMalloc( (void **)&d_vecTotalAngularMomentum, 3*sizeof( float ) ) ; //ds copy memory to gpu to initialize the situation hipMemcpy( d_arrPositions , h_arrPositions , uNumberOfParticles*3*sizeof( float ), hipMemcpyHostToDevice ); hipMemcpy( d_arrVelocities , h_arrVelocities , uNumberOfParticles*3*sizeof( float ), hipMemcpyHostToDevice ); hipMemcpy( d_arrAccelerations, h_arrAccelerations, uNumberOfParticles*3*sizeof( float ), hipMemcpyHostToDevice ); hipMemcpy( d_arrMasses , h_arrMasses , uNumberOfParticles*sizeof( float ) , hipMemcpyHostToDevice ); //ds information std::cout << " Status: 0% done - current step: 0"; //ds start simulation for( unsigned int uCurrentTimeStep = 1; uCurrentTimeStep < uNumberOfTimeSteps+1; ++uCurrentTimeStep ) { //ds calculate percentage done const float fPercentageDone( 100.0*uCurrentTimeStep/uNumberOfTimeSteps ); //ds get a formatted string -> 100% -> 3 digits char chBuffer[4]; //ds fill the buffer std::snprintf( chBuffer, 4, "%3.0f", fPercentageDone ); //ds print info std::cout << '\xd'; std::cout << " Status: " << chBuffer << "% done - current step: " << uCurrentTimeStep; //ds calculate the new accelerations hipLaunchKernelGGL(( computeAccelerationsLennardJones), dim3(1), dim3(uNumberOfParticles) , 0, 0, uNumberOfParticles, d_arrPositions, d_arrMasses, d_arrNewAccelerations, pairBoundaries.first, pairBoundaries.second, fMinimumDistance, fPotentialDepth ); //ds update particle properties according to velocity verlet scheme hipLaunchKernelGGL(( updateParticlesVelocityVerlet), dim3(1), dim3(uNumberOfParticles) , 0, 0, uNumberOfParticles, d_arrPositions, d_arrVelocities, d_arrAccelerations, d_arrNewAccelerations, pairBoundaries.first, pairBoundaries.second, fTimeStepSize ); //ds compute total energy hipLaunchKernelGGL(( getTotalEnergy), dim3(1), dim3(uNumberOfParticles), uNumberOfParticles*sizeof( float ) , 0, uNumberOfParticles, d_arrPositions, d_arrVelocities, d_arrMasses, fMinimumDistance, fPotentialDepth, d_fTotalEnergy ); /*ds compute center of mass getCenterOfMass<<< 1, uNumberOfParticles, uNumberOfParticles*4*sizeof( float ) >>>( uNumberOfParticles, d_arrPositions, d_arrMasses, d_vecCenterOfMass );*/ /*ds compute total angular momentum - INFO: slower than cpu version getTotalAngularMomentum<<< 1, uNumberOfParticles, uNumberOfParticles*3*sizeof( float ) >>>( uNumberOfParticles, d_arrPositions, d_arrVelocities, d_arrMasses, d_vecTotalAngularMomentum );*/ //ds get the particle information from gpu to cpu hipMemcpy( h_arrPositions , d_arrPositions , uNumberOfParticles*3*sizeof( float ), hipMemcpyDeviceToHost ); hipMemcpy( h_arrVelocities , d_arrVelocities , uNumberOfParticles*3*sizeof( float ), hipMemcpyDeviceToHost ); hipMemcpy( h_arrAccelerations, d_arrAccelerations, uNumberOfParticles*3*sizeof( float ), hipMemcpyDeviceToHost ); hipMemcpy( h_arrMasses , d_arrMasses , uNumberOfParticles*sizeof( float ) , hipMemcpyDeviceToHost ); //ds get the integrals information from gpu to cpu hipMemcpy( &h_fTotalEnergy , d_fTotalEnergy , sizeof( float ), hipMemcpyDeviceToHost ); //hipMemcpy( h_vecCenterOfMass , d_vecCenterOfMass , 3*sizeof( float ), hipMemcpyDeviceToHost ); //hipMemcpy( h_vecTotalAngularMomentum, d_vecTotalAngularMomentum, 3*sizeof( float ), hipMemcpyDeviceToHost ); //ds save particle and integral information - the correct integrals saving procedure gets called automatically depending on parameters cDomain.saveParticlesToStream( ); cDomain.saveIntegralsToStream( h_fTotalEnergy ); //<- only total energy resource is taken from CUDA computation } //ds deallocate memory hipFree( d_arrPositions ); hipFree( d_arrVelocities ); hipFree( d_arrAccelerations ); hipFree( d_arrMasses ); hipFree( d_arrNewAccelerations ); hipFree( d_fTotalEnergy ); //hipFree( d_vecCenterOfMass ); //hipFree( d_vecTotalAngularMomentum ); //ds save the streams to a file cDomain.writeParticlesToFile( "bin/simulation.txt", uNumberOfTimeSteps ); cDomain.writeIntegralsToFile( "bin/integrals.txt", uNumberOfTimeSteps, fTimeStepSize ); //ds stop timing const double dDurationSeconds( tmTimer.stop( ) ); //ds cause an output ostream std::cout << std::endl; std::cout << " Computation time: " << dDurationSeconds << std::endl; std::cout << "-----------------------------------------------------------------------------" << std::endl; return 0; } //ds CUDA kernels - split up acceleration and velocity verlet for better readability - no shared memory used within these blocks (no overhead due to copying of 2d arrays) //-------------------------------------------------------------------------------------------------------------------------// __global__ void computeAccelerationsLennardJones( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_arrNewAccelerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fMinimumDistance, const float p_fPotentialDepth ) { //ds regular index and "real" particle index equals three times thread index, since were working with a linear 2d array const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex3D( 3*threadIdx.x ); //ds get current mass (constant) const float fCurrentMass( p_arrMasses[uIndex1D] ); //ds force instance to calculate for the current particle float vecTotalForce[3]; //ds make sure all elements are initialized correctly vecTotalForce[0] = 0.0; vecTotalForce[1] = 0.0; vecTotalForce[2] = 0.0; //ds get the domain size const float fDomainSize( fabs( p_fLowerBoundary ) + fabs( p_fUpperBoundary ) ); //ds loop over all other particles for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { //ds do not treat itself (else nan results because division by zero) if( u != uIndex1D ) { //ds cutoff distance const float fDistanceCutoff( 2.5*p_fMinimumDistance ); //ds we have to loop over the cubic boundary conditions for( float dX = p_fLowerBoundary; dX < p_fUpperBoundary+1.0; ++dX ) { for( float dY = p_fLowerBoundary; dY < p_fUpperBoundary+1.0; ++dY ) { for( float dZ = p_fLowerBoundary; dZ < p_fUpperBoundary+1.0; ++dZ ) { //ds get the radial vector between the particles float vecRadius[3]; //ds calculate the distance: domain + particle2 - particle1 vecRadius[0] = dX*fDomainSize + p_arrPositions[3*u+0] - p_arrPositions[uIndex3D+0]; vecRadius[1] = dY*fDomainSize + p_arrPositions[3*u+1] - p_arrPositions[uIndex3D+1]; vecRadius[2] = dZ*fDomainSize + p_arrPositions[3*u+2] - p_arrPositions[uIndex3D+2]; //ds get the absolute distance const float fDistanceAbsolute( sqrt( pow( vecRadius[0], 2 ) + pow( vecRadius[1], 2 ) + pow( vecRadius[2], 2 ) ) ); //ds if we are between the minimum distance and the cutoff range if( p_fMinimumDistance < fDistanceAbsolute && fDistanceCutoff > fDistanceAbsolute ) { //ds calculate the lennard jones force prefix const float fLJFPrefix( -24*p_fPotentialDepth*( 2*pow( p_fMinimumDistance/fDistanceAbsolute, 12 ) - pow( p_fMinimumDistance/fDistanceAbsolute, 6 ) ) *1/pow( fDistanceAbsolute, 2 ) ); //ds add the information to the force including the radial component vecTotalForce[0] += fLJFPrefix*vecRadius[0]; vecTotalForce[1] += fLJFPrefix*vecRadius[1]; vecTotalForce[2] += fLJFPrefix*vecRadius[2]; } } } } } } //ds set the new acceleration p_arrNewAccelerations[uIndex3D+0] = vecTotalForce[0]/fCurrentMass; p_arrNewAccelerations[uIndex3D+1] = vecTotalForce[1]/fCurrentMass; p_arrNewAccelerations[uIndex3D+2] = vecTotalForce[2]/fCurrentMass; } __global__ void updateParticlesVelocityVerlet( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrAccelerations, float* p_arrNewAccelerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fTimeStepSize ) { //ds 3d index for the linear array const unsigned int uIndex3D( 3*threadIdx.x ); //ds calculate domain size const float fDomainSize( abs( p_fLowerBoundary ) + abs( p_fUpperBoundary ) ); //ds velocity-verlet for position p_arrPositions[uIndex3D+0] = p_arrPositions[uIndex3D+0] + p_fTimeStepSize*p_arrVelocities[uIndex3D+0] + 1.0/2*pow( p_fTimeStepSize, 2 )*p_arrAccelerations[uIndex3D+0]; p_arrPositions[uIndex3D+1] = p_arrPositions[uIndex3D+1] + p_fTimeStepSize*p_arrVelocities[uIndex3D+1] + 1.0/2*pow( p_fTimeStepSize, 2 )*p_arrAccelerations[uIndex3D+1]; p_arrPositions[uIndex3D+2] = p_arrPositions[uIndex3D+2] + p_fTimeStepSize*p_arrVelocities[uIndex3D+2] + 1.0/2*pow( p_fTimeStepSize, 2 )*p_arrAccelerations[uIndex3D+2]; //ds produce periodic boundary shifting - check each element: x,y,z for( unsigned int v = 0; v < 3; ++v ) { //ds check if we are below the boundary while( p_fLowerBoundary > p_arrPositions[uIndex3D+v] ) { //ds map the particle to the other boundary by shifting it up to the boundary p_arrPositions[uIndex3D+v] += fDomainSize; } //ds check if we are above the boundary while( p_fUpperBoundary < p_arrPositions[uIndex3D+v] ) { //ds map the particle to the other boundary by shifting it back to the boundary p_arrPositions[uIndex3D+v] -= fDomainSize; } } //ds velocity-verlet for velocity p_arrVelocities[uIndex3D+0] = p_arrVelocities[uIndex3D+0] + ( p_fTimeStepSize/2 )*( p_arrNewAccelerations[uIndex3D+0] + p_arrAccelerations[uIndex3D+0] ); p_arrVelocities[uIndex3D+1] = p_arrVelocities[uIndex3D+1] + ( p_fTimeStepSize/2 )*( p_arrNewAccelerations[uIndex3D+1] + p_arrAccelerations[uIndex3D+1] ); p_arrVelocities[uIndex3D+2] = p_arrVelocities[uIndex3D+2] + ( p_fTimeStepSize/2 )*( p_arrNewAccelerations[uIndex3D+2] + p_arrAccelerations[uIndex3D+2] ); //ds update the old accelerations p_arrAccelerations[uIndex3D+0] = p_arrNewAccelerations[uIndex3D+0]; p_arrAccelerations[uIndex3D+1] = p_arrNewAccelerations[uIndex3D+1]; p_arrAccelerations[uIndex3D+2] = p_arrNewAccelerations[uIndex3D+2]; } __global__ void getTotalEnergy( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, const float p_fMinimumDistance, const float p_fPotentialDepth, float* p_fTotalEnergy ) { //ds dynamic shared total energy to sum up by first thread extern __shared__ float s_arrTotalEnergy[]; //ds regular index and "real" particle index equals three times thread index, since were working with a linear 2d array const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex3D( 3*threadIdx.x ); //ds make sure the shared memory is empty (each thread does this) s_arrTotalEnergy[uIndex1D] = 0.0; //ds wait until all threads are done __syncthreads( ); //ds add the kinetic component of the current particle s_arrTotalEnergy[uIndex1D] += p_arrMasses[uIndex1D]/2*pow( sqrt( pow( p_arrVelocities[uIndex3D+0], 2 ) + pow( p_arrVelocities[uIndex3D+1], 2 ) + pow( p_arrVelocities[uIndex3D+2], 2 ) ), 2 ); //ds cutoff const float fDistanceCutoff( 2.5*p_fMinimumDistance ); //ds calculate the total energy of the new configuration - loop over all other particles (dont do the same particles twice) for( unsigned int u = uIndex1D+1; u < p_uNumberOfParticles; ++u ) { //ds get the absolute distance const float fDistanceAbsolute( sqrt( pow( p_arrPositions[3*u+0] - p_arrPositions[uIndex3D+0], 2 ) + pow( p_arrPositions[3*u+1] - p_arrPositions[uIndex3D+1], 2 ) + pow( p_arrPositions[3*u+2] - p_arrPositions[uIndex3D+2], 2 ) ) ); //ds if we are between the minimum distance and the cutoff range if( p_fMinimumDistance < fDistanceAbsolute && fDistanceCutoff > fDistanceAbsolute ) { //ds add the potential component s_arrTotalEnergy[uIndex1D] += 4*p_fPotentialDepth*( pow( p_fMinimumDistance/fDistanceAbsolute, 12 ) - pow( p_fMinimumDistance/fDistanceAbsolute, 6 ) ); } } //ds wait until all threads are done __syncthreads( ); //ds thread 0 calculates the total energy if( 0 == uIndex1D ) { //ds total energy to sum up float fTotalEnergy( 0.0 ); for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { fTotalEnergy += s_arrTotalEnergy[u]; } //ds set the return value *p_fTotalEnergy = fTotalEnergy; } } //-------------------------------------------------------------------------------------------------------------------------// //ds NOT USED CUDA kernels due to worse perfomance than CPU solution /*-------------------------------------------------------------------------------------------------------------------------// __global__ void getCenterOfMass( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_vecCenterOfMass ) { //ds dynamic shared relative center of mass to sum up by first thread + the mass (Nx4) extern __shared__ float s_arrRelativeCenterOfMassPlusMass[]; //ds Nx4 Array in this case const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex4D( 4*threadIdx.x ); //ds save current mass const float fCurrentMass( p_arrMasses[uIndex1D] ); //ds set the relative mass s_arrRelativeCenterOfMassPlusMass[uIndex4D+0] = fCurrentMass*p_arrPositions[uIndex4D+0]; s_arrRelativeCenterOfMassPlusMass[uIndex4D+1] = fCurrentMass*p_arrPositions[uIndex4D+1]; s_arrRelativeCenterOfMassPlusMass[uIndex4D+2] = fCurrentMass*p_arrPositions[uIndex4D+2]; //ds save it to the shared array too s_arrRelativeCenterOfMassPlusMass[uIndex4D+3] = fCurrentMass; //ds wait until all threads are done __syncthreads( ); //ds the first thread now calculates the result if( 0 == uIndex1D ) { //ds initialize p_vecCenterOfMass[0] = 0.0; p_vecCenterOfMass[1] = 0.0; p_vecCenterOfMass[2] = 0.0; //ds total mass float fTotalMass( 0.0 ); //ds for each particle for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { //ds update the center p_vecCenterOfMass[0] += s_arrRelativeCenterOfMassPlusMass[4*u+0]; p_vecCenterOfMass[1] += s_arrRelativeCenterOfMassPlusMass[4*u+1]; p_vecCenterOfMass[2] += s_arrRelativeCenterOfMassPlusMass[4*u+2]; //ds update the total mass fTotalMass += s_arrRelativeCenterOfMassPlusMass[4*u+3]; } //ds calculate the result p_vecCenterOfMass[0] /= fTotalMass; p_vecCenterOfMass[1] /= fTotalMass; p_vecCenterOfMass[2] /= fTotalMass; } } __global__ void getTotalAngularMomentum( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, float* p_vecTotalAngularMomentum ) { //ds dynamic shared memory to calculate the total angular momentum extern __shared__ float s_arrAngularMomentum[]; //ds Nx3 Array in this case const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex3D( 3*threadIdx.x ); //ds save current mass const float fCurrentMass( p_arrMasses[uIndex1D] ); //ds set the relative mass s_arrAngularMomentum[uIndex3D+0] = fCurrentMass*( p_arrPositions[uIndex3D+1]*p_arrVelocities[uIndex3D+2] - p_arrPositions[uIndex3D+2]*p_arrVelocities[uIndex3D+1] ); s_arrAngularMomentum[uIndex3D+1] = fCurrentMass*( p_arrPositions[uIndex3D+2]*p_arrVelocities[uIndex3D+0] - p_arrPositions[uIndex3D+0]*p_arrVelocities[uIndex3D+2] ); s_arrAngularMomentum[uIndex3D+2] = fCurrentMass*( p_arrPositions[uIndex3D+0]*p_arrVelocities[uIndex3D+1] - p_arrPositions[uIndex3D+1]*p_arrVelocities[uIndex3D+0] ); //ds wait until all threads are done __syncthreads( ); //ds first thread does the accumulation if( 0 == uIndex1D ) { //ds initialization p_vecTotalAngularMomentum[0] = 0.0; p_vecTotalAngularMomentum[1] = 0.0; p_vecTotalAngularMomentum[2] = 0.0; //ds loop over all particles for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { //ds get the values from the shared memory p_vecTotalAngularMomentum[0] += s_arrAngularMomentum[3*u+0]; p_vecTotalAngularMomentum[1] += s_arrAngularMomentum[3*u+1]; p_vecTotalAngularMomentum[2] += s_arrAngularMomentum[3*u+2]; } } } //-------------------------------------------------------------------------------------------------------------------------*/
3779dda9c6a96c2870ee2a0a546a4d5836d9e2f2.cu
#include "CCubicDomain.cuh" //ds domain structure #include "Timer.h" //ds time measurement #include <iostream> //ds cout #include <cuda.h> //ds needed for eclipse indexer only (not for compilation) #include <cuda_runtime.h> //ds needed for eclipse indexer only (not for compilation) #include <device_launch_parameters.h> //ds needed for eclipse indexer only (not for compilation) //ds CUDA kernels - split up acceleration and velocity verlet for better readability - no shared memory used within these blocks (no overhead due to copying of 2d arrays) //-------------------------------------------------------------------------------------------------------------------------// __global__ void computeAccelerationsLennardJones( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_arrNewAcclerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fMinimumDistance, const float p_fPotentialDepth ); __global__ void updateParticlesVelocityVerlet( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrAccelerations, float* p_arrNewAcclerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fTimeStepSize ); __global__ void getTotalEnergy( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, const float p_fMinimumDistance, const float p_fPotentialDepth, float* p_fTotalEnergy ); //-------------------------------------------------------------------------------------------------------------------------// //ds NOT USED CUDA kernels due to worse perfomance than CPU solution /*-------------------------------------------------------------------------------------------------------------------------// __global__ void getCenterOfMass( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_vecCenterOfMass ); __global__ void getTotalAngularMomentum( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, float* p_vecTotalAngularMomentum ); //-------------------------------------------------------------------------------------------------------------------------*/ int main( int argc, char** argv ) { //ds check simple input arguments - CAUTION: the implementation expects real numbers, the simulation will be corrupted if invalid values are entered if( 4 != argc ) { //ds inform std::cout << "usage: nbody_gpu [Number of particles] [Number of time steps] [Target energy]" << std::endl; return 0; } //ds start timing Timer tmTimer; tmTimer.start( ); //ds domain configuration const std::pair< double, double > pairBoundaries( -1.0, 1.0 ); const float fDomainWidth( fabs( pairBoundaries.first ) + fabs( pairBoundaries.second ) ); const unsigned int uNumberOfParticles( atoi( argv[1] ) ); //ds current simulation configuration const float fTimeStepSize( 0.0001 ); const unsigned int uNumberOfTimeSteps( atoi( argv[2] ) ); const float fMinimumDistance( pow( 1.0/uNumberOfParticles, 1.0/3 ) ); const float fPotentialDepth( 1.0 ); //ds target kinetic energy const float fTargetKineticEnergy( atol( argv[3] ) ); std::cout << "------- GPU SETUP -----------------------------------------------------------" << std::endl; std::cout << " Number of particles: " << uNumberOfParticles << std::endl; std::cout << " Boundary (3D): [" << pairBoundaries.first << ", " << pairBoundaries.second << "]" << std::endl; std::cout << " Domain Width: " << fDomainWidth << std::endl; std::cout << " Minimum distance: " << fMinimumDistance << std::endl; std::cout << " Cutoff distance: " << 2.5*fMinimumDistance << std::endl; std::cout << " Potential depth: " << fPotentialDepth << std::endl; std::cout << "Target kinetic energy: " << fTargetKineticEnergy << std::endl; std::cout << " Number of time steps: " << uNumberOfTimeSteps << std::endl; std::cout << " Time step size: " << fTimeStepSize << std::endl; std::cout << "-----------------------------------------------------------------------------" << std::endl; //ds allocate a domain to work with specifying number of particles and timing NBody::CCubicDomain cDomain( uNumberOfParticles ); //ds create particles uniformly from a normal distribution cDomain.createParticlesUniformFromNormalDistribution( fTargetKineticEnergy ); //ds host information: particles float* h_arrPositions ( cDomain.getPositions( ) ); float* h_arrVelocities ( cDomain.getVelocities( ) ); float* h_arrAccelerations( cDomain.getAccelerations( ) ); float* h_arrMasses ( cDomain.getMasses( ) ); //ds host information: integrals and initialize them float h_fTotalEnergy( 0.0 ); //float h_vecCenterOfMass[3]; h_vecCenterOfMass[0] = 0.0; h_vecCenterOfMass[1] = 0.0; h_vecCenterOfMass[2] = 0.0; //float h_vecTotalAngularMomentum[3]; h_vecTotalAngularMomentum[0] = 0.0; h_vecTotalAngularMomentum[1] = 0.0; h_vecTotalAngularMomentum[2] = 0.0; //ds device handles: particles float* d_arrPositions ( 0 ); //Nx3 float* d_arrVelocities ( 0 ); //Nx3 float* d_arrAccelerations ( 0 ); //Nx3 float* d_arrMasses ( 0 ); //Nx3 float* d_arrNewAccelerations( 0 ); //Nx3 //ds device handles: integrals float* d_fTotalEnergy ( 0 ); //1x1 //float* d_vecCenterOfMass ( 0 ); //3x1 //float* d_vecTotalAngularMomentum( 0 ); //3x1 //ds allocate memory: particles (here we see the advantage of using linear arrays) cudaMalloc( (void **)&d_arrPositions , uNumberOfParticles*3*sizeof( float ) ); cudaMalloc( (void **)&d_arrVelocities , uNumberOfParticles*3*sizeof( float ) ); cudaMalloc( (void **)&d_arrAccelerations , uNumberOfParticles*3*sizeof( float ) ) ; cudaMalloc( (void **)&d_arrMasses , uNumberOfParticles*sizeof( float ) ) ; cudaMalloc( (void **)&d_arrNewAccelerations, uNumberOfParticles*3*sizeof( float ) ) ; //ds allocate memory: integrals cudaMalloc( (void **)&d_fTotalEnergy , sizeof( float ) ) ; //cudaMalloc( (void **)&d_vecCenterOfMass , 3*sizeof( float ) ) ; //cudaMalloc( (void **)&d_vecTotalAngularMomentum, 3*sizeof( float ) ) ; //ds copy memory to gpu to initialize the situation cudaMemcpy( d_arrPositions , h_arrPositions , uNumberOfParticles*3*sizeof( float ), cudaMemcpyHostToDevice ); cudaMemcpy( d_arrVelocities , h_arrVelocities , uNumberOfParticles*3*sizeof( float ), cudaMemcpyHostToDevice ); cudaMemcpy( d_arrAccelerations, h_arrAccelerations, uNumberOfParticles*3*sizeof( float ), cudaMemcpyHostToDevice ); cudaMemcpy( d_arrMasses , h_arrMasses , uNumberOfParticles*sizeof( float ) , cudaMemcpyHostToDevice ); //ds information std::cout << " Status: 0% done - current step: 0"; //ds start simulation for( unsigned int uCurrentTimeStep = 1; uCurrentTimeStep < uNumberOfTimeSteps+1; ++uCurrentTimeStep ) { //ds calculate percentage done const float fPercentageDone( 100.0*uCurrentTimeStep/uNumberOfTimeSteps ); //ds get a formatted string -> 100% -> 3 digits char chBuffer[4]; //ds fill the buffer std::snprintf( chBuffer, 4, "%3.0f", fPercentageDone ); //ds print info std::cout << '\xd'; std::cout << " Status: " << chBuffer << "% done - current step: " << uCurrentTimeStep; //ds calculate the new accelerations computeAccelerationsLennardJones<<< 1, uNumberOfParticles >>>( uNumberOfParticles, d_arrPositions, d_arrMasses, d_arrNewAccelerations, pairBoundaries.first, pairBoundaries.second, fMinimumDistance, fPotentialDepth ); //ds update particle properties according to velocity verlet scheme updateParticlesVelocityVerlet<<< 1, uNumberOfParticles >>>( uNumberOfParticles, d_arrPositions, d_arrVelocities, d_arrAccelerations, d_arrNewAccelerations, pairBoundaries.first, pairBoundaries.second, fTimeStepSize ); //ds compute total energy getTotalEnergy<<< 1, uNumberOfParticles, uNumberOfParticles*sizeof( float ) >>>( uNumberOfParticles, d_arrPositions, d_arrVelocities, d_arrMasses, fMinimumDistance, fPotentialDepth, d_fTotalEnergy ); /*ds compute center of mass getCenterOfMass<<< 1, uNumberOfParticles, uNumberOfParticles*4*sizeof( float ) >>>( uNumberOfParticles, d_arrPositions, d_arrMasses, d_vecCenterOfMass );*/ /*ds compute total angular momentum - INFO: slower than cpu version getTotalAngularMomentum<<< 1, uNumberOfParticles, uNumberOfParticles*3*sizeof( float ) >>>( uNumberOfParticles, d_arrPositions, d_arrVelocities, d_arrMasses, d_vecTotalAngularMomentum );*/ //ds get the particle information from gpu to cpu cudaMemcpy( h_arrPositions , d_arrPositions , uNumberOfParticles*3*sizeof( float ), cudaMemcpyDeviceToHost ); cudaMemcpy( h_arrVelocities , d_arrVelocities , uNumberOfParticles*3*sizeof( float ), cudaMemcpyDeviceToHost ); cudaMemcpy( h_arrAccelerations, d_arrAccelerations, uNumberOfParticles*3*sizeof( float ), cudaMemcpyDeviceToHost ); cudaMemcpy( h_arrMasses , d_arrMasses , uNumberOfParticles*sizeof( float ) , cudaMemcpyDeviceToHost ); //ds get the integrals information from gpu to cpu cudaMemcpy( &h_fTotalEnergy , d_fTotalEnergy , sizeof( float ), cudaMemcpyDeviceToHost ); //cudaMemcpy( h_vecCenterOfMass , d_vecCenterOfMass , 3*sizeof( float ), cudaMemcpyDeviceToHost ); //cudaMemcpy( h_vecTotalAngularMomentum, d_vecTotalAngularMomentum, 3*sizeof( float ), cudaMemcpyDeviceToHost ); //ds save particle and integral information - the correct integrals saving procedure gets called automatically depending on parameters cDomain.saveParticlesToStream( ); cDomain.saveIntegralsToStream( h_fTotalEnergy ); //<- only total energy resource is taken from CUDA computation } //ds deallocate memory cudaFree( d_arrPositions ); cudaFree( d_arrVelocities ); cudaFree( d_arrAccelerations ); cudaFree( d_arrMasses ); cudaFree( d_arrNewAccelerations ); cudaFree( d_fTotalEnergy ); //cudaFree( d_vecCenterOfMass ); //cudaFree( d_vecTotalAngularMomentum ); //ds save the streams to a file cDomain.writeParticlesToFile( "bin/simulation.txt", uNumberOfTimeSteps ); cDomain.writeIntegralsToFile( "bin/integrals.txt", uNumberOfTimeSteps, fTimeStepSize ); //ds stop timing const double dDurationSeconds( tmTimer.stop( ) ); //ds cause an output ostream std::cout << std::endl; std::cout << " Computation time: " << dDurationSeconds << std::endl; std::cout << "-----------------------------------------------------------------------------" << std::endl; return 0; } //ds CUDA kernels - split up acceleration and velocity verlet for better readability - no shared memory used within these blocks (no overhead due to copying of 2d arrays) //-------------------------------------------------------------------------------------------------------------------------// __global__ void computeAccelerationsLennardJones( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_arrNewAccelerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fMinimumDistance, const float p_fPotentialDepth ) { //ds regular index and "real" particle index equals three times thread index, since were working with a linear 2d array const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex3D( 3*threadIdx.x ); //ds get current mass (constant) const float fCurrentMass( p_arrMasses[uIndex1D] ); //ds force instance to calculate for the current particle float vecTotalForce[3]; //ds make sure all elements are initialized correctly vecTotalForce[0] = 0.0; vecTotalForce[1] = 0.0; vecTotalForce[2] = 0.0; //ds get the domain size const float fDomainSize( fabs( p_fLowerBoundary ) + fabs( p_fUpperBoundary ) ); //ds loop over all other particles for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { //ds do not treat itself (else nan results because division by zero) if( u != uIndex1D ) { //ds cutoff distance const float fDistanceCutoff( 2.5*p_fMinimumDistance ); //ds we have to loop over the cubic boundary conditions for( float dX = p_fLowerBoundary; dX < p_fUpperBoundary+1.0; ++dX ) { for( float dY = p_fLowerBoundary; dY < p_fUpperBoundary+1.0; ++dY ) { for( float dZ = p_fLowerBoundary; dZ < p_fUpperBoundary+1.0; ++dZ ) { //ds get the radial vector between the particles float vecRadius[3]; //ds calculate the distance: domain + particle2 - particle1 vecRadius[0] = dX*fDomainSize + p_arrPositions[3*u+0] - p_arrPositions[uIndex3D+0]; vecRadius[1] = dY*fDomainSize + p_arrPositions[3*u+1] - p_arrPositions[uIndex3D+1]; vecRadius[2] = dZ*fDomainSize + p_arrPositions[3*u+2] - p_arrPositions[uIndex3D+2]; //ds get the absolute distance const float fDistanceAbsolute( sqrt( pow( vecRadius[0], 2 ) + pow( vecRadius[1], 2 ) + pow( vecRadius[2], 2 ) ) ); //ds if we are between the minimum distance and the cutoff range if( p_fMinimumDistance < fDistanceAbsolute && fDistanceCutoff > fDistanceAbsolute ) { //ds calculate the lennard jones force prefix const float fLJFPrefix( -24*p_fPotentialDepth*( 2*pow( p_fMinimumDistance/fDistanceAbsolute, 12 ) - pow( p_fMinimumDistance/fDistanceAbsolute, 6 ) ) *1/pow( fDistanceAbsolute, 2 ) ); //ds add the information to the force including the radial component vecTotalForce[0] += fLJFPrefix*vecRadius[0]; vecTotalForce[1] += fLJFPrefix*vecRadius[1]; vecTotalForce[2] += fLJFPrefix*vecRadius[2]; } } } } } } //ds set the new acceleration p_arrNewAccelerations[uIndex3D+0] = vecTotalForce[0]/fCurrentMass; p_arrNewAccelerations[uIndex3D+1] = vecTotalForce[1]/fCurrentMass; p_arrNewAccelerations[uIndex3D+2] = vecTotalForce[2]/fCurrentMass; } __global__ void updateParticlesVelocityVerlet( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrAccelerations, float* p_arrNewAccelerations, const float p_fLowerBoundary, const float p_fUpperBoundary, const float p_fTimeStepSize ) { //ds 3d index for the linear array const unsigned int uIndex3D( 3*threadIdx.x ); //ds calculate domain size const float fDomainSize( abs( p_fLowerBoundary ) + abs( p_fUpperBoundary ) ); //ds velocity-verlet for position p_arrPositions[uIndex3D+0] = p_arrPositions[uIndex3D+0] + p_fTimeStepSize*p_arrVelocities[uIndex3D+0] + 1.0/2*pow( p_fTimeStepSize, 2 )*p_arrAccelerations[uIndex3D+0]; p_arrPositions[uIndex3D+1] = p_arrPositions[uIndex3D+1] + p_fTimeStepSize*p_arrVelocities[uIndex3D+1] + 1.0/2*pow( p_fTimeStepSize, 2 )*p_arrAccelerations[uIndex3D+1]; p_arrPositions[uIndex3D+2] = p_arrPositions[uIndex3D+2] + p_fTimeStepSize*p_arrVelocities[uIndex3D+2] + 1.0/2*pow( p_fTimeStepSize, 2 )*p_arrAccelerations[uIndex3D+2]; //ds produce periodic boundary shifting - check each element: x,y,z for( unsigned int v = 0; v < 3; ++v ) { //ds check if we are below the boundary while( p_fLowerBoundary > p_arrPositions[uIndex3D+v] ) { //ds map the particle to the other boundary by shifting it up to the boundary p_arrPositions[uIndex3D+v] += fDomainSize; } //ds check if we are above the boundary while( p_fUpperBoundary < p_arrPositions[uIndex3D+v] ) { //ds map the particle to the other boundary by shifting it back to the boundary p_arrPositions[uIndex3D+v] -= fDomainSize; } } //ds velocity-verlet for velocity p_arrVelocities[uIndex3D+0] = p_arrVelocities[uIndex3D+0] + ( p_fTimeStepSize/2 )*( p_arrNewAccelerations[uIndex3D+0] + p_arrAccelerations[uIndex3D+0] ); p_arrVelocities[uIndex3D+1] = p_arrVelocities[uIndex3D+1] + ( p_fTimeStepSize/2 )*( p_arrNewAccelerations[uIndex3D+1] + p_arrAccelerations[uIndex3D+1] ); p_arrVelocities[uIndex3D+2] = p_arrVelocities[uIndex3D+2] + ( p_fTimeStepSize/2 )*( p_arrNewAccelerations[uIndex3D+2] + p_arrAccelerations[uIndex3D+2] ); //ds update the old accelerations p_arrAccelerations[uIndex3D+0] = p_arrNewAccelerations[uIndex3D+0]; p_arrAccelerations[uIndex3D+1] = p_arrNewAccelerations[uIndex3D+1]; p_arrAccelerations[uIndex3D+2] = p_arrNewAccelerations[uIndex3D+2]; } __global__ void getTotalEnergy( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, const float p_fMinimumDistance, const float p_fPotentialDepth, float* p_fTotalEnergy ) { //ds dynamic shared total energy to sum up by first thread extern __shared__ float s_arrTotalEnergy[]; //ds regular index and "real" particle index equals three times thread index, since were working with a linear 2d array const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex3D( 3*threadIdx.x ); //ds make sure the shared memory is empty (each thread does this) s_arrTotalEnergy[uIndex1D] = 0.0; //ds wait until all threads are done __syncthreads( ); //ds add the kinetic component of the current particle s_arrTotalEnergy[uIndex1D] += p_arrMasses[uIndex1D]/2*pow( sqrt( pow( p_arrVelocities[uIndex3D+0], 2 ) + pow( p_arrVelocities[uIndex3D+1], 2 ) + pow( p_arrVelocities[uIndex3D+2], 2 ) ), 2 ); //ds cutoff const float fDistanceCutoff( 2.5*p_fMinimumDistance ); //ds calculate the total energy of the new configuration - loop over all other particles (dont do the same particles twice) for( unsigned int u = uIndex1D+1; u < p_uNumberOfParticles; ++u ) { //ds get the absolute distance const float fDistanceAbsolute( sqrt( pow( p_arrPositions[3*u+0] - p_arrPositions[uIndex3D+0], 2 ) + pow( p_arrPositions[3*u+1] - p_arrPositions[uIndex3D+1], 2 ) + pow( p_arrPositions[3*u+2] - p_arrPositions[uIndex3D+2], 2 ) ) ); //ds if we are between the minimum distance and the cutoff range if( p_fMinimumDistance < fDistanceAbsolute && fDistanceCutoff > fDistanceAbsolute ) { //ds add the potential component s_arrTotalEnergy[uIndex1D] += 4*p_fPotentialDepth*( pow( p_fMinimumDistance/fDistanceAbsolute, 12 ) - pow( p_fMinimumDistance/fDistanceAbsolute, 6 ) ); } } //ds wait until all threads are done __syncthreads( ); //ds thread 0 calculates the total energy if( 0 == uIndex1D ) { //ds total energy to sum up float fTotalEnergy( 0.0 ); for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { fTotalEnergy += s_arrTotalEnergy[u]; } //ds set the return value *p_fTotalEnergy = fTotalEnergy; } } //-------------------------------------------------------------------------------------------------------------------------// //ds NOT USED CUDA kernels due to worse perfomance than CPU solution /*-------------------------------------------------------------------------------------------------------------------------// __global__ void getCenterOfMass( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrMasses, float* p_vecCenterOfMass ) { //ds dynamic shared relative center of mass to sum up by first thread + the mass (Nx4) extern __shared__ float s_arrRelativeCenterOfMassPlusMass[]; //ds Nx4 Array in this case const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex4D( 4*threadIdx.x ); //ds save current mass const float fCurrentMass( p_arrMasses[uIndex1D] ); //ds set the relative mass s_arrRelativeCenterOfMassPlusMass[uIndex4D+0] = fCurrentMass*p_arrPositions[uIndex4D+0]; s_arrRelativeCenterOfMassPlusMass[uIndex4D+1] = fCurrentMass*p_arrPositions[uIndex4D+1]; s_arrRelativeCenterOfMassPlusMass[uIndex4D+2] = fCurrentMass*p_arrPositions[uIndex4D+2]; //ds save it to the shared array too s_arrRelativeCenterOfMassPlusMass[uIndex4D+3] = fCurrentMass; //ds wait until all threads are done __syncthreads( ); //ds the first thread now calculates the result if( 0 == uIndex1D ) { //ds initialize p_vecCenterOfMass[0] = 0.0; p_vecCenterOfMass[1] = 0.0; p_vecCenterOfMass[2] = 0.0; //ds total mass float fTotalMass( 0.0 ); //ds for each particle for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { //ds update the center p_vecCenterOfMass[0] += s_arrRelativeCenterOfMassPlusMass[4*u+0]; p_vecCenterOfMass[1] += s_arrRelativeCenterOfMassPlusMass[4*u+1]; p_vecCenterOfMass[2] += s_arrRelativeCenterOfMassPlusMass[4*u+2]; //ds update the total mass fTotalMass += s_arrRelativeCenterOfMassPlusMass[4*u+3]; } //ds calculate the result p_vecCenterOfMass[0] /= fTotalMass; p_vecCenterOfMass[1] /= fTotalMass; p_vecCenterOfMass[2] /= fTotalMass; } } __global__ void getTotalAngularMomentum( const unsigned int p_uNumberOfParticles, float* p_arrPositions, float* p_arrVelocities, float* p_arrMasses, float* p_vecTotalAngularMomentum ) { //ds dynamic shared memory to calculate the total angular momentum extern __shared__ float s_arrAngularMomentum[]; //ds Nx3 Array in this case const unsigned int uIndex1D( threadIdx.x ); const unsigned int uIndex3D( 3*threadIdx.x ); //ds save current mass const float fCurrentMass( p_arrMasses[uIndex1D] ); //ds set the relative mass s_arrAngularMomentum[uIndex3D+0] = fCurrentMass*( p_arrPositions[uIndex3D+1]*p_arrVelocities[uIndex3D+2] - p_arrPositions[uIndex3D+2]*p_arrVelocities[uIndex3D+1] ); s_arrAngularMomentum[uIndex3D+1] = fCurrentMass*( p_arrPositions[uIndex3D+2]*p_arrVelocities[uIndex3D+0] - p_arrPositions[uIndex3D+0]*p_arrVelocities[uIndex3D+2] ); s_arrAngularMomentum[uIndex3D+2] = fCurrentMass*( p_arrPositions[uIndex3D+0]*p_arrVelocities[uIndex3D+1] - p_arrPositions[uIndex3D+1]*p_arrVelocities[uIndex3D+0] ); //ds wait until all threads are done __syncthreads( ); //ds first thread does the accumulation if( 0 == uIndex1D ) { //ds initialization p_vecTotalAngularMomentum[0] = 0.0; p_vecTotalAngularMomentum[1] = 0.0; p_vecTotalAngularMomentum[2] = 0.0; //ds loop over all particles for( unsigned int u = 0; u < p_uNumberOfParticles; ++u ) { //ds get the values from the shared memory p_vecTotalAngularMomentum[0] += s_arrAngularMomentum[3*u+0]; p_vecTotalAngularMomentum[1] += s_arrAngularMomentum[3*u+1]; p_vecTotalAngularMomentum[2] += s_arrAngularMomentum[3*u+2]; } } } //-------------------------------------------------------------------------------------------------------------------------*/
b4096c7099885a540bdc007fc2725a19556324fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cuda_dot(int N, double *a, double *b, double *c) { // __shared__ double localDot[threadsPerBlock]; /* Statically defined */ extern __shared__ double localDot[]; int ix = threadIdx.x + blockIdx.x * blockDim.x; int localIndex = threadIdx.x; double localSum = 0; while (ix < N) { localSum += a[ix] * b[ix]; /* Reduction is here */ ix += blockDim.x * gridDim.x; } /* Store sum computed by this thread */ localDot[localIndex] = localSum; /* Wait for all threads to get to this point */ __syncthreads(); /* Every block should add up sum computed on threads in the block */ int i = blockDim.x/2; while (i != 0) { if (localIndex < i) { localDot[localIndex] += localDot[localIndex + i]; } __syncthreads(); i /= 2; } /* Each block stores local dot product */ if (localIndex == 0) c[blockIdx.x] = localDot[0]; }
b4096c7099885a540bdc007fc2725a19556324fb.cu
#include "includes.h" __global__ void cuda_dot(int N, double *a, double *b, double *c) { // __shared__ double localDot[threadsPerBlock]; /* Statically defined */ extern __shared__ double localDot[]; int ix = threadIdx.x + blockIdx.x * blockDim.x; int localIndex = threadIdx.x; double localSum = 0; while (ix < N) { localSum += a[ix] * b[ix]; /* Reduction is here */ ix += blockDim.x * gridDim.x; } /* Store sum computed by this thread */ localDot[localIndex] = localSum; /* Wait for all threads to get to this point */ __syncthreads(); /* Every block should add up sum computed on threads in the block */ int i = blockDim.x/2; while (i != 0) { if (localIndex < i) { localDot[localIndex] += localDot[localIndex + i]; } __syncthreads(); i /= 2; } /* Each block stores local dot product */ if (localIndex == 0) c[blockIdx.x] = localDot[0]; }
f39f3e3297928a17039f085dc9c416ad92a86d5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * calcNormalFromSmoothedColorWCL.cu * * Created on: 16-12-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/interactions.cuh" __device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par) { real q = sqrt(pow2(dpos.x) + pow2(dpos.y)) * par->I_H; if ((q < 2.0) && ((i != j) || ((i == j) && (q > 0.001f*par->H)))) { real gkx = grad_of_kern(dpos.x, q, par->I_H); real gky = grad_of_kern(dpos.y, q, par->I_H); real put = p[i].cs - p[j].cs; return MAKE_REAL2(p[j].m*put*gkx / p[j].d, p[j].m*put*gky / p[j].d); } else { return MAKE_REAL2(0.0, 0.0); } } __global__ void calcNormalFromSmoothedColorWCL(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { register real2 result = MAKE_REAL2(0.0, 0.0); #include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh" p[index].n.x = -result.x; p[index].n.y = -result.y; p[index].n.z = sqrt(pow2(result.x) + pow2(result.y)); } }
f39f3e3297928a17039f085dc9c416ad92a86d5c.cu
/* * calcNormalFromSmoothedColorWCL.cu * * Created on: 16-12-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/interactions.cuh" __device__ static real2 interaction(uint i, uint j, real2 dpos, real2 dvel, Particle *p, Parameters *par) { real q = sqrt(pow2(dpos.x) + pow2(dpos.y)) * par->I_H; if ((q < 2.0) && ((i != j) || ((i == j) && (q > 0.001f*par->H)))) { real gkx = grad_of_kern(dpos.x, q, par->I_H); real gky = grad_of_kern(dpos.y, q, par->I_H); real put = p[i].cs - p[j].cs; return MAKE_REAL2(p[j].m*put*gkx / p[j].d, p[j].m*put*gky / p[j].d); } else { return MAKE_REAL2(0.0, 0.0); } } __global__ void calcNormalFromSmoothedColorWCL(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { register real2 result = MAKE_REAL2(0.0, 0.0); #include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh" p[index].n.x = -result.x; p[index].n.y = -result.y; p[index].n.z = sqrt(pow2(result.x) + pow2(result.y)); } }